iwl-trans-pcie-tx.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
  4. *
  5. * Portions of this file are derived from the ipw3945 project, as well
  6. * as portions of the ieee80211 subsystem header files.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of version 2 of the GNU General Public License as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20. *
  21. * The full GNU General Public License is included in this distribution in the
  22. * file called LICENSE.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/etherdevice.h>
  30. #include <linux/slab.h>
  31. #include <linux/sched.h>
  32. #include "iwl-debug.h"
  33. #include "iwl-csr.h"
  34. #include "iwl-prph.h"
  35. #include "iwl-io.h"
  36. #include "iwl-agn-hw.h"
  37. #include "iwl-op-mode.h"
  38. #include "iwl-trans-pcie-int.h"
  39. #define IWL_TX_CRC_SIZE 4
  40. #define IWL_TX_DELIMITER_SIZE 4
  41. /*
  42. * mac80211 queues, ACs, hardware queues, FIFOs.
  43. *
  44. * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
  45. *
  46. * Mac80211 uses the following numbers, which we get as from it
  47. * by way of skb_get_queue_mapping(skb):
  48. *
  49. * VO 0
  50. * VI 1
  51. * BE 2
  52. * BK 3
  53. *
  54. *
  55. * Regular (not A-MPDU) frames are put into hardware queues corresponding
  56. * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
  57. * own queue per aggregation session (RA/TID combination), such queues are
  58. * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
  59. * order to map frames to the right queue, we also need an AC->hw queue
  60. * mapping. This is implemented here.
  61. *
  62. * Due to the way hw queues are set up (by the hw specific code), the AC->hw
  63. * queue mapping is the identity mapping.
  64. */
  65. static const u8 tid_to_ac[] = {
  66. IEEE80211_AC_BE,
  67. IEEE80211_AC_BK,
  68. IEEE80211_AC_BK,
  69. IEEE80211_AC_BE,
  70. IEEE80211_AC_VI,
  71. IEEE80211_AC_VI,
  72. IEEE80211_AC_VO,
  73. IEEE80211_AC_VO
  74. };
  75. /**
  76. * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
  77. */
  78. void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
  79. struct iwl_tx_queue *txq,
  80. u16 byte_cnt)
  81. {
  82. struct iwlagn_scd_bc_tbl *scd_bc_tbl;
  83. struct iwl_trans_pcie *trans_pcie =
  84. IWL_TRANS_GET_PCIE_TRANS(trans);
  85. int write_ptr = txq->q.write_ptr;
  86. int txq_id = txq->q.id;
  87. u8 sec_ctl = 0;
  88. u8 sta_id = 0;
  89. u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
  90. __le16 bc_ent;
  91. struct iwl_tx_cmd *tx_cmd =
  92. (struct iwl_tx_cmd *) txq->cmd[txq->q.write_ptr]->payload;
  93. scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
  94. WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
  95. sta_id = tx_cmd->sta_id;
  96. sec_ctl = tx_cmd->sec_ctl;
  97. switch (sec_ctl & TX_CMD_SEC_MSK) {
  98. case TX_CMD_SEC_CCM:
  99. len += CCMP_MIC_LEN;
  100. break;
  101. case TX_CMD_SEC_TKIP:
  102. len += TKIP_ICV_LEN;
  103. break;
  104. case TX_CMD_SEC_WEP:
  105. len += WEP_IV_LEN + WEP_ICV_LEN;
  106. break;
  107. }
  108. bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
  109. scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
  110. if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
  111. scd_bc_tbl[txq_id].
  112. tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
  113. }
  114. /**
  115. * iwl_txq_update_write_ptr - Send new write index to hardware
  116. */
  117. void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
  118. {
  119. u32 reg = 0;
  120. int txq_id = txq->q.id;
  121. if (txq->need_update == 0)
  122. return;
  123. if (cfg(trans)->base_params->shadow_reg_enable) {
  124. /* shadow register enabled */
  125. iwl_write32(trans, HBUS_TARG_WRPTR,
  126. txq->q.write_ptr | (txq_id << 8));
  127. } else {
  128. /* if we're trying to save power */
  129. if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
  130. /* wake up nic if it's powered down ...
  131. * uCode will wake up, and interrupt us again, so next
  132. * time we'll skip this part. */
  133. reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
  134. if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
  135. IWL_DEBUG_INFO(trans,
  136. "Tx queue %d requesting wakeup,"
  137. " GP1 = 0x%x\n", txq_id, reg);
  138. iwl_set_bit(trans, CSR_GP_CNTRL,
  139. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  140. return;
  141. }
  142. iwl_write_direct32(trans, HBUS_TARG_WRPTR,
  143. txq->q.write_ptr | (txq_id << 8));
  144. /*
  145. * else not in power-save mode,
  146. * uCode will never sleep when we're
  147. * trying to tx (during RFKILL, we're not trying to tx).
  148. */
  149. } else
  150. iwl_write32(trans, HBUS_TARG_WRPTR,
  151. txq->q.write_ptr | (txq_id << 8));
  152. }
  153. txq->need_update = 0;
  154. }
  155. static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
  156. {
  157. struct iwl_tfd_tb *tb = &tfd->tbs[idx];
  158. dma_addr_t addr = get_unaligned_le32(&tb->lo);
  159. if (sizeof(dma_addr_t) > sizeof(u32))
  160. addr |=
  161. ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
  162. return addr;
  163. }
  164. static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
  165. {
  166. struct iwl_tfd_tb *tb = &tfd->tbs[idx];
  167. return le16_to_cpu(tb->hi_n_len) >> 4;
  168. }
  169. static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
  170. dma_addr_t addr, u16 len)
  171. {
  172. struct iwl_tfd_tb *tb = &tfd->tbs[idx];
  173. u16 hi_n_len = len << 4;
  174. put_unaligned_le32(addr, &tb->lo);
  175. if (sizeof(dma_addr_t) > sizeof(u32))
  176. hi_n_len |= ((addr >> 16) >> 16) & 0xF;
  177. tb->hi_n_len = cpu_to_le16(hi_n_len);
  178. tfd->num_tbs = idx + 1;
  179. }
  180. static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
  181. {
  182. return tfd->num_tbs & 0x1f;
  183. }
  184. static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
  185. struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
  186. {
  187. int i;
  188. int num_tbs;
  189. /* Sanity check on number of chunks */
  190. num_tbs = iwl_tfd_get_num_tbs(tfd);
  191. if (num_tbs >= IWL_NUM_OF_TBS) {
  192. IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
  193. /* @todo issue fatal error, it is quite serious situation */
  194. return;
  195. }
  196. /* Unmap tx_cmd */
  197. if (num_tbs)
  198. dma_unmap_single(trans->dev,
  199. dma_unmap_addr(meta, mapping),
  200. dma_unmap_len(meta, len),
  201. DMA_BIDIRECTIONAL);
  202. /* Unmap chunks, if any. */
  203. for (i = 1; i < num_tbs; i++)
  204. dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
  205. iwl_tfd_tb_get_len(tfd, i), dma_dir);
  206. }
  207. /**
  208. * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
  209. * @trans - transport private data
  210. * @txq - tx queue
  211. * @index - the index of the TFD to be freed
  212. *@dma_dir - the direction of the DMA mapping
  213. *
  214. * Does NOT advance any TFD circular buffer read/write indexes
  215. * Does NOT free the TFD itself (which is within circular buffer)
  216. */
  217. void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
  218. int index, enum dma_data_direction dma_dir)
  219. {
  220. struct iwl_tfd *tfd_tmp = txq->tfds;
  221. lockdep_assert_held(&txq->lock);
  222. iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir);
  223. /* free SKB */
  224. if (txq->skbs) {
  225. struct sk_buff *skb;
  226. skb = txq->skbs[index];
  227. /* Can be called from irqs-disabled context
  228. * If skb is not NULL, it means that the whole queue is being
  229. * freed and that the queue is not empty - free the skb
  230. */
  231. if (skb) {
  232. iwl_op_mode_free_skb(trans->op_mode, skb);
  233. txq->skbs[index] = NULL;
  234. }
  235. }
  236. }
  237. int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
  238. struct iwl_tx_queue *txq,
  239. dma_addr_t addr, u16 len,
  240. u8 reset)
  241. {
  242. struct iwl_queue *q;
  243. struct iwl_tfd *tfd, *tfd_tmp;
  244. u32 num_tbs;
  245. q = &txq->q;
  246. tfd_tmp = txq->tfds;
  247. tfd = &tfd_tmp[q->write_ptr];
  248. if (reset)
  249. memset(tfd, 0, sizeof(*tfd));
  250. num_tbs = iwl_tfd_get_num_tbs(tfd);
  251. /* Each TFD can point to a maximum 20 Tx buffers */
  252. if (num_tbs >= IWL_NUM_OF_TBS) {
  253. IWL_ERR(trans, "Error can not send more than %d chunks\n",
  254. IWL_NUM_OF_TBS);
  255. return -EINVAL;
  256. }
  257. if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
  258. return -EINVAL;
  259. if (unlikely(addr & ~IWL_TX_DMA_MASK))
  260. IWL_ERR(trans, "Unaligned address = %llx\n",
  261. (unsigned long long)addr);
  262. iwl_tfd_set_tb(tfd, num_tbs, addr, len);
  263. return 0;
  264. }
  265. /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
  266. * DMA services
  267. *
  268. * Theory of operation
  269. *
  270. * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
  271. * of buffer descriptors, each of which points to one or more data buffers for
  272. * the device to read from or fill. Driver and device exchange status of each
  273. * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
  274. * entries in each circular buffer, to protect against confusing empty and full
  275. * queue states.
  276. *
  277. * The device reads or writes the data in the queues via the device's several
  278. * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
  279. *
  280. * For Tx queue, there are low mark and high mark limits. If, after queuing
  281. * the packet for Tx, free space become < low mark, Tx queue stopped. When
  282. * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
  283. * Tx queue resumed.
  284. *
  285. ***************************************************/
  286. int iwl_queue_space(const struct iwl_queue *q)
  287. {
  288. int s = q->read_ptr - q->write_ptr;
  289. if (q->read_ptr > q->write_ptr)
  290. s -= q->n_bd;
  291. if (s <= 0)
  292. s += q->n_window;
  293. /* keep some reserve to not confuse empty and full situations */
  294. s -= 2;
  295. if (s < 0)
  296. s = 0;
  297. return s;
  298. }
  299. /**
  300. * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
  301. */
  302. int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
  303. {
  304. q->n_bd = count;
  305. q->n_window = slots_num;
  306. q->id = id;
  307. /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
  308. * and iwl_queue_dec_wrap are broken. */
  309. if (WARN_ON(!is_power_of_2(count)))
  310. return -EINVAL;
  311. /* slots_num must be power-of-two size, otherwise
  312. * get_cmd_index is broken. */
  313. if (WARN_ON(!is_power_of_2(slots_num)))
  314. return -EINVAL;
  315. q->low_mark = q->n_window / 4;
  316. if (q->low_mark < 4)
  317. q->low_mark = 4;
  318. q->high_mark = q->n_window / 8;
  319. if (q->high_mark < 2)
  320. q->high_mark = 2;
  321. q->write_ptr = q->read_ptr = 0;
  322. return 0;
  323. }
  324. static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
  325. struct iwl_tx_queue *txq)
  326. {
  327. struct iwl_trans_pcie *trans_pcie =
  328. IWL_TRANS_GET_PCIE_TRANS(trans);
  329. struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
  330. int txq_id = txq->q.id;
  331. int read_ptr = txq->q.read_ptr;
  332. u8 sta_id = 0;
  333. __le16 bc_ent;
  334. struct iwl_tx_cmd *tx_cmd =
  335. (struct iwl_tx_cmd *) txq->cmd[txq->q.read_ptr]->payload;
  336. WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
  337. if (txq_id != trans_pcie->cmd_queue)
  338. sta_id = tx_cmd->sta_id;
  339. bc_ent = cpu_to_le16(1 | (sta_id << 12));
  340. scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
  341. if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
  342. scd_bc_tbl[txq_id].
  343. tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
  344. }
  345. static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
  346. u16 txq_id)
  347. {
  348. u32 tbl_dw_addr;
  349. u32 tbl_dw;
  350. u16 scd_q2ratid;
  351. struct iwl_trans_pcie *trans_pcie =
  352. IWL_TRANS_GET_PCIE_TRANS(trans);
  353. scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
  354. tbl_dw_addr = trans_pcie->scd_base_addr +
  355. SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
  356. tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr);
  357. if (txq_id & 0x1)
  358. tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
  359. else
  360. tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
  361. iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw);
  362. return 0;
  363. }
  364. static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
  365. {
  366. /* Simply stop the queue, but don't change any configuration;
  367. * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
  368. iwl_write_prph(trans,
  369. SCD_QUEUE_STATUS_BITS(txq_id),
  370. (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
  371. (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
  372. }
  373. void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
  374. int txq_id, u32 index)
  375. {
  376. IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d", txq_id, index & 0xff);
  377. iwl_write_direct32(trans, HBUS_TARG_WRPTR,
  378. (index & 0xff) | (txq_id << 8));
  379. iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
  380. }
  381. void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
  382. struct iwl_tx_queue *txq,
  383. int tx_fifo_id, int scd_retry)
  384. {
  385. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  386. int txq_id = txq->q.id;
  387. int active =
  388. test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
  389. iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
  390. (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
  391. (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
  392. (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
  393. SCD_QUEUE_STTS_REG_MSK);
  394. txq->sched_retry = scd_retry;
  395. if (active)
  396. IWL_DEBUG_TX_QUEUES(trans, "Activate %s Queue %d on FIFO %d\n",
  397. scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
  398. else
  399. IWL_DEBUG_TX_QUEUES(trans, "Deactivate %s Queue %d\n",
  400. scd_retry ? "BA" : "AC/CMD", txq_id);
  401. }
  402. static inline int get_ac_from_tid(u16 tid)
  403. {
  404. if (likely(tid < ARRAY_SIZE(tid_to_ac)))
  405. return tid_to_ac[tid];
  406. /* no support for TIDs 8-15 yet */
  407. return -EINVAL;
  408. }
  409. static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
  410. u8 ctx, u16 tid)
  411. {
  412. const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx];
  413. if (likely(tid < ARRAY_SIZE(tid_to_ac)))
  414. return ac_to_fifo[tid_to_ac[tid]];
  415. /* no support for TIDs 8-15 yet */
  416. return -EINVAL;
  417. }
  418. static inline bool is_agg_txqid_valid(struct iwl_trans *trans, int txq_id)
  419. {
  420. if (txq_id < IWLAGN_FIRST_AMPDU_QUEUE)
  421. return false;
  422. return txq_id < (IWLAGN_FIRST_AMPDU_QUEUE +
  423. hw_params(trans).num_ampdu_queues);
  424. }
  425. void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
  426. enum iwl_rxon_context_id ctx, int sta_id,
  427. int tid, int frame_limit, u16 ssn)
  428. {
  429. int tx_fifo, txq_id;
  430. u16 ra_tid;
  431. unsigned long flags;
  432. struct iwl_trans_pcie *trans_pcie =
  433. IWL_TRANS_GET_PCIE_TRANS(trans);
  434. if (WARN_ON(sta_id == IWL_INVALID_STATION))
  435. return;
  436. if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
  437. return;
  438. tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid);
  439. if (WARN_ON(tx_fifo < 0)) {
  440. IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
  441. return;
  442. }
  443. txq_id = trans_pcie->agg_txq[sta_id][tid];
  444. if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) {
  445. IWL_ERR(trans,
  446. "queue number out of range: %d, must be %d to %d\n",
  447. txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
  448. IWLAGN_FIRST_AMPDU_QUEUE +
  449. hw_params(trans).num_ampdu_queues - 1);
  450. return;
  451. }
  452. ra_tid = BUILD_RAxTID(sta_id, tid);
  453. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  454. /* Stop this Tx queue before configuring it */
  455. iwlagn_tx_queue_stop_scheduler(trans, txq_id);
  456. /* Map receiver-address / traffic-ID to this queue */
  457. iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
  458. /* Set this queue as a chain-building queue */
  459. iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, (1<<txq_id));
  460. /* enable aggregations for the queue */
  461. iwl_set_bits_prph(trans, SCD_AGGR_SEL, (1<<txq_id));
  462. /* Place first TFD at index corresponding to start sequence number.
  463. * Assumes that ssn_idx is valid (!= 0xFFF) */
  464. trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
  465. trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
  466. iwl_trans_set_wr_ptrs(trans, txq_id, ssn);
  467. /* Set up Tx window size and frame limit for this queue */
  468. iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
  469. SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
  470. sizeof(u32),
  471. ((frame_limit <<
  472. SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
  473. SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
  474. ((frame_limit <<
  475. SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
  476. SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
  477. iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
  478. /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
  479. iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
  480. tx_fifo, 1);
  481. trans_pcie->txq[txq_id].sta_id = sta_id;
  482. trans_pcie->txq[txq_id].tid = tid;
  483. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  484. }
  485. /*
  486. * Find first available (lowest unused) Tx Queue, mark it "active".
  487. * Called only when finding queue for aggregation.
  488. * Should never return anything < 7, because they should already
  489. * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
  490. */
  491. static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
  492. {
  493. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  494. int txq_id;
  495. for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
  496. if (!test_and_set_bit(txq_id,
  497. &trans_pcie->txq_ctx_active_msk))
  498. return txq_id;
  499. return -1;
  500. }
  501. int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
  502. int sta_id, int tid)
  503. {
  504. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  505. int txq_id;
  506. txq_id = iwlagn_txq_ctx_activate_free(trans);
  507. if (txq_id == -1) {
  508. IWL_ERR(trans, "No free aggregation queue available\n");
  509. return -ENXIO;
  510. }
  511. trans_pcie->agg_txq[sta_id][tid] = txq_id;
  512. iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
  513. return 0;
  514. }
  515. int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
  516. {
  517. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  518. u8 txq_id = trans_pcie->agg_txq[sta_id][tid];
  519. if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) {
  520. IWL_ERR(trans,
  521. "queue number out of range: %d, must be %d to %d\n",
  522. txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
  523. IWLAGN_FIRST_AMPDU_QUEUE +
  524. hw_params(trans).num_ampdu_queues - 1);
  525. return -EINVAL;
  526. }
  527. iwlagn_tx_queue_stop_scheduler(trans, txq_id);
  528. iwl_clear_bits_prph(trans, SCD_AGGR_SEL, (1 << txq_id));
  529. trans_pcie->agg_txq[sta_id][tid] = 0;
  530. trans_pcie->txq[txq_id].q.read_ptr = 0;
  531. trans_pcie->txq[txq_id].q.write_ptr = 0;
  532. /* supposes that ssn_idx is valid (!= 0xFFF) */
  533. iwl_trans_set_wr_ptrs(trans, txq_id, 0);
  534. iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
  535. iwl_txq_ctx_deactivate(trans_pcie, txq_id);
  536. iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
  537. return 0;
  538. }
  539. /*************** HOST COMMAND QUEUE FUNCTIONS *****/
  540. /**
  541. * iwl_enqueue_hcmd - enqueue a uCode command
  542. * @priv: device private data point
  543. * @cmd: a point to the ucode command structure
  544. *
  545. * The function returns < 0 values to indicate the operation is
  546. * failed. On success, it turns the index (> 0) of command in the
  547. * command queue.
  548. */
  549. static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
  550. {
  551. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  552. struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
  553. struct iwl_queue *q = &txq->q;
  554. struct iwl_device_cmd *out_cmd;
  555. struct iwl_cmd_meta *out_meta;
  556. dma_addr_t phys_addr;
  557. u32 idx;
  558. u16 copy_size, cmd_size;
  559. bool had_nocopy = false;
  560. int i;
  561. u8 *cmd_dest;
  562. #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
  563. const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
  564. int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
  565. int trace_idx;
  566. #endif
  567. if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
  568. IWL_WARN(trans, "fw recovery, no hcmd send\n");
  569. return -EIO;
  570. }
  571. copy_size = sizeof(out_cmd->hdr);
  572. cmd_size = sizeof(out_cmd->hdr);
  573. /* need one for the header if the first is NOCOPY */
  574. BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
  575. for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
  576. if (!cmd->len[i])
  577. continue;
  578. if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
  579. had_nocopy = true;
  580. } else {
  581. /* NOCOPY must not be followed by normal! */
  582. if (WARN_ON(had_nocopy))
  583. return -EINVAL;
  584. copy_size += cmd->len[i];
  585. }
  586. cmd_size += cmd->len[i];
  587. }
  588. /*
  589. * If any of the command structures end up being larger than
  590. * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
  591. * allocated into separate TFDs, then we will need to
  592. * increase the size of the buffers.
  593. */
  594. if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
  595. return -EINVAL;
  596. spin_lock_bh(&txq->lock);
  597. if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
  598. spin_unlock_bh(&txq->lock);
  599. IWL_ERR(trans, "No space in command queue\n");
  600. iwl_op_mode_cmd_queue_full(trans->op_mode);
  601. return -ENOSPC;
  602. }
  603. idx = get_cmd_index(q, q->write_ptr);
  604. out_cmd = txq->cmd[idx];
  605. out_meta = &txq->meta[idx];
  606. memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
  607. if (cmd->flags & CMD_WANT_SKB)
  608. out_meta->source = cmd;
  609. /* set up the header */
  610. out_cmd->hdr.cmd = cmd->id;
  611. out_cmd->hdr.flags = 0;
  612. out_cmd->hdr.sequence =
  613. cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
  614. INDEX_TO_SEQ(q->write_ptr));
  615. /* and copy the data that needs to be copied */
  616. cmd_dest = out_cmd->payload;
  617. for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
  618. if (!cmd->len[i])
  619. continue;
  620. if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
  621. break;
  622. memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
  623. cmd_dest += cmd->len[i];
  624. }
  625. IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
  626. "%d bytes at %d[%d]:%d\n",
  627. get_cmd_string(out_cmd->hdr.cmd),
  628. out_cmd->hdr.cmd,
  629. le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
  630. q->write_ptr, idx, trans_pcie->cmd_queue);
  631. phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
  632. DMA_BIDIRECTIONAL);
  633. if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
  634. idx = -ENOMEM;
  635. goto out;
  636. }
  637. dma_unmap_addr_set(out_meta, mapping, phys_addr);
  638. dma_unmap_len_set(out_meta, len, copy_size);
  639. iwlagn_txq_attach_buf_to_tfd(trans, txq,
  640. phys_addr, copy_size, 1);
  641. #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
  642. trace_bufs[0] = &out_cmd->hdr;
  643. trace_lens[0] = copy_size;
  644. trace_idx = 1;
  645. #endif
  646. for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
  647. if (!cmd->len[i])
  648. continue;
  649. if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
  650. continue;
  651. phys_addr = dma_map_single(trans->dev,
  652. (void *)cmd->data[i],
  653. cmd->len[i], DMA_BIDIRECTIONAL);
  654. if (dma_mapping_error(trans->dev, phys_addr)) {
  655. iwlagn_unmap_tfd(trans, out_meta,
  656. &txq->tfds[q->write_ptr],
  657. DMA_BIDIRECTIONAL);
  658. idx = -ENOMEM;
  659. goto out;
  660. }
  661. iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
  662. cmd->len[i], 0);
  663. #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
  664. trace_bufs[trace_idx] = cmd->data[i];
  665. trace_lens[trace_idx] = cmd->len[i];
  666. trace_idx++;
  667. #endif
  668. }
  669. out_meta->flags = cmd->flags;
  670. txq->need_update = 1;
  671. /* check that tracing gets all possible blocks */
  672. BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
  673. #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
  674. trace_iwlwifi_dev_hcmd(trans->dev, cmd->flags,
  675. trace_bufs[0], trace_lens[0],
  676. trace_bufs[1], trace_lens[1],
  677. trace_bufs[2], trace_lens[2]);
  678. #endif
  679. /* Increment and update queue's write index */
  680. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  681. iwl_txq_update_write_ptr(trans, txq);
  682. out:
  683. spin_unlock_bh(&txq->lock);
  684. return idx;
  685. }
  686. /**
  687. * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
  688. *
  689. * When FW advances 'R' index, all entries between old and new 'R' index
  690. * need to be reclaimed. As result, some free space forms. If there is
  691. * enough free space (> low mark), wake the stack that feeds us.
  692. */
  693. static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
  694. int idx)
  695. {
  696. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  697. struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
  698. struct iwl_queue *q = &txq->q;
  699. int nfreed = 0;
  700. lockdep_assert_held(&txq->lock);
  701. if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
  702. IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
  703. "index %d is out of range [0-%d] %d %d.\n", __func__,
  704. txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
  705. return;
  706. }
  707. for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
  708. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
  709. if (nfreed++ > 0) {
  710. IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx,
  711. q->write_ptr, q->read_ptr);
  712. iwl_op_mode_nic_error(trans->op_mode);
  713. }
  714. }
  715. }
  716. /**
  717. * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
  718. * @rxb: Rx buffer to reclaim
  719. * @handler_status: return value of the handler of the command
  720. * (put in setup_rx_handlers)
  721. *
  722. * If an Rx buffer has an async callback associated with it the callback
  723. * will be executed. The attached skb (if present) will only be freed
  724. * if the callback returns 1
  725. */
  726. void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
  727. int handler_status)
  728. {
  729. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  730. u16 sequence = le16_to_cpu(pkt->hdr.sequence);
  731. int txq_id = SEQ_TO_QUEUE(sequence);
  732. int index = SEQ_TO_INDEX(sequence);
  733. int cmd_index;
  734. struct iwl_device_cmd *cmd;
  735. struct iwl_cmd_meta *meta;
  736. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  737. struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
  738. /* If a Tx command is being handled and it isn't in the actual
  739. * command queue then there a command routing bug has been introduced
  740. * in the queue management code. */
  741. if (WARN(txq_id != trans_pcie->cmd_queue,
  742. "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
  743. txq_id, trans_pcie->cmd_queue, sequence,
  744. trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
  745. trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
  746. iwl_print_hex_error(trans, pkt, 32);
  747. return;
  748. }
  749. spin_lock(&txq->lock);
  750. cmd_index = get_cmd_index(&txq->q, index);
  751. cmd = txq->cmd[cmd_index];
  752. meta = &txq->meta[cmd_index];
  753. txq->time_stamp = jiffies;
  754. iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
  755. DMA_BIDIRECTIONAL);
  756. /* Input error checking is done when commands are added to queue. */
  757. if (meta->flags & CMD_WANT_SKB) {
  758. struct page *p = rxb_steal_page(rxb);
  759. meta->source->resp_pkt = pkt;
  760. meta->source->_rx_page_addr = (unsigned long)page_address(p);
  761. meta->source->_rx_page_order = hw_params(trans).rx_page_order;
  762. meta->source->handler_status = handler_status;
  763. }
  764. iwl_hcmd_queue_reclaim(trans, txq_id, index);
  765. if (!(meta->flags & CMD_ASYNC)) {
  766. if (!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
  767. IWL_WARN(trans,
  768. "HCMD_ACTIVE already clear for command %s\n",
  769. get_cmd_string(cmd->hdr.cmd));
  770. }
  771. clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
  772. IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
  773. get_cmd_string(cmd->hdr.cmd));
  774. wake_up(&trans->shrd->wait_command_queue);
  775. }
  776. meta->flags = 0;
  777. spin_unlock(&txq->lock);
  778. }
  779. #define HOST_COMPLETE_TIMEOUT (2 * HZ)
  780. static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
  781. {
  782. int ret;
  783. /* An asynchronous command can not expect an SKB to be set. */
  784. if (WARN_ON(cmd->flags & CMD_WANT_SKB))
  785. return -EINVAL;
  786. ret = iwl_enqueue_hcmd(trans, cmd);
  787. if (ret < 0) {
  788. IWL_ERR(trans,
  789. "Error sending %s: enqueue_hcmd failed: %d\n",
  790. get_cmd_string(cmd->id), ret);
  791. return ret;
  792. }
  793. return 0;
  794. }
  795. static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
  796. {
  797. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  798. int cmd_idx;
  799. int ret;
  800. IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
  801. get_cmd_string(cmd->id));
  802. if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
  803. IWL_ERR(trans, "Command %s failed: FW Error\n",
  804. get_cmd_string(cmd->id));
  805. return -EIO;
  806. }
  807. if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
  808. &trans->shrd->status))) {
  809. IWL_ERR(trans, "Command %s: a command is already active!\n",
  810. get_cmd_string(cmd->id));
  811. return -EIO;
  812. }
  813. IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
  814. get_cmd_string(cmd->id));
  815. cmd_idx = iwl_enqueue_hcmd(trans, cmd);
  816. if (cmd_idx < 0) {
  817. ret = cmd_idx;
  818. clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
  819. IWL_ERR(trans,
  820. "Error sending %s: enqueue_hcmd failed: %d\n",
  821. get_cmd_string(cmd->id), ret);
  822. return ret;
  823. }
  824. ret = wait_event_timeout(trans->shrd->wait_command_queue,
  825. !test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status),
  826. HOST_COMPLETE_TIMEOUT);
  827. if (!ret) {
  828. if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
  829. struct iwl_tx_queue *txq =
  830. &trans_pcie->txq[trans_pcie->cmd_queue];
  831. struct iwl_queue *q = &txq->q;
  832. IWL_ERR(trans,
  833. "Error sending %s: time out after %dms.\n",
  834. get_cmd_string(cmd->id),
  835. jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
  836. IWL_ERR(trans,
  837. "Current CMD queue read_ptr %d write_ptr %d\n",
  838. q->read_ptr, q->write_ptr);
  839. clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
  840. IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command"
  841. "%s\n", get_cmd_string(cmd->id));
  842. ret = -ETIMEDOUT;
  843. goto cancel;
  844. }
  845. }
  846. if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
  847. IWL_ERR(trans, "Error: Response NULL in '%s'\n",
  848. get_cmd_string(cmd->id));
  849. ret = -EIO;
  850. goto cancel;
  851. }
  852. return 0;
  853. cancel:
  854. if (cmd->flags & CMD_WANT_SKB) {
  855. /*
  856. * Cancel the CMD_WANT_SKB flag for the cmd in the
  857. * TX cmd queue. Otherwise in case the cmd comes
  858. * in later, it will possibly set an invalid
  859. * address (cmd->meta.source).
  860. */
  861. trans_pcie->txq[trans_pcie->cmd_queue].meta[cmd_idx].flags &=
  862. ~CMD_WANT_SKB;
  863. }
  864. if (cmd->resp_pkt) {
  865. iwl_free_resp(cmd);
  866. cmd->resp_pkt = NULL;
  867. }
  868. return ret;
  869. }
  870. int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
  871. {
  872. if (cmd->flags & CMD_ASYNC)
  873. return iwl_send_cmd_async(trans, cmd);
  874. return iwl_send_cmd_sync(trans, cmd);
  875. }
  876. /* Frees buffers until index _not_ inclusive */
  877. int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
  878. struct sk_buff_head *skbs)
  879. {
  880. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  881. struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
  882. struct iwl_queue *q = &txq->q;
  883. int last_to_free;
  884. int freed = 0;
  885. /* This function is not meant to release cmd queue*/
  886. if (WARN_ON(txq_id == trans_pcie->cmd_queue))
  887. return 0;
  888. lockdep_assert_held(&txq->lock);
  889. /*Since we free until index _not_ inclusive, the one before index is
  890. * the last we will free. This one must be used */
  891. last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
  892. if ((index >= q->n_bd) ||
  893. (iwl_queue_used(q, last_to_free) == 0)) {
  894. IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
  895. "last_to_free %d is out of range [0-%d] %d %d.\n",
  896. __func__, txq_id, last_to_free, q->n_bd,
  897. q->write_ptr, q->read_ptr);
  898. return 0;
  899. }
  900. if (WARN_ON(!skb_queue_empty(skbs)))
  901. return 0;
  902. for (;
  903. q->read_ptr != index;
  904. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
  905. if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL))
  906. continue;
  907. __skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]);
  908. txq->skbs[txq->q.read_ptr] = NULL;
  909. iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
  910. iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);
  911. freed++;
  912. }
  913. return freed;
  914. }