iwl-tx.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
  4. *
  5. * Portions of this file are derived from the ipw3945 project, as well
  6. * as portions of the ieee80211 subsystem header files.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of version 2 of the GNU General Public License as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20. *
  21. * The full GNU General Public License is included in this distribution in the
  22. * file called LICENSE.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/etherdevice.h>
  30. #include <net/mac80211.h>
  31. #include "iwl-eeprom.h"
  32. #include "iwl-dev.h"
  33. #include "iwl-core.h"
  34. #include "iwl-sta.h"
  35. #include "iwl-io.h"
  36. #include "iwl-helpers.h"
  37. static const u16 default_tid_to_tx_fifo[] = {
  38. IWL_TX_FIFO_AC1,
  39. IWL_TX_FIFO_AC0,
  40. IWL_TX_FIFO_AC0,
  41. IWL_TX_FIFO_AC1,
  42. IWL_TX_FIFO_AC2,
  43. IWL_TX_FIFO_AC2,
  44. IWL_TX_FIFO_AC3,
  45. IWL_TX_FIFO_AC3,
  46. IWL_TX_FIFO_NONE,
  47. IWL_TX_FIFO_NONE,
  48. IWL_TX_FIFO_NONE,
  49. IWL_TX_FIFO_NONE,
  50. IWL_TX_FIFO_NONE,
  51. IWL_TX_FIFO_NONE,
  52. IWL_TX_FIFO_NONE,
  53. IWL_TX_FIFO_NONE,
  54. IWL_TX_FIFO_AC3
  55. };
  56. static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
  57. struct iwl_dma_ptr *ptr, size_t size)
  58. {
  59. ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma);
  60. if (!ptr->addr)
  61. return -ENOMEM;
  62. ptr->size = size;
  63. return 0;
  64. }
  65. static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
  66. struct iwl_dma_ptr *ptr)
  67. {
  68. if (unlikely(!ptr->addr))
  69. return;
  70. pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma);
  71. memset(ptr, 0, sizeof(*ptr));
  72. }
  73. /**
  74. * iwl_txq_update_write_ptr - Send new write index to hardware
  75. */
  76. int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
  77. {
  78. u32 reg = 0;
  79. int ret = 0;
  80. int txq_id = txq->q.id;
  81. if (txq->need_update == 0)
  82. return ret;
  83. /* if we're trying to save power */
  84. if (test_bit(STATUS_POWER_PMI, &priv->status)) {
  85. /* wake up nic if it's powered down ...
  86. * uCode will wake up, and interrupt us again, so next
  87. * time we'll skip this part. */
  88. reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
  89. if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
  90. IWL_DEBUG_INFO(priv, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
  91. txq_id, reg);
  92. iwl_set_bit(priv, CSR_GP_CNTRL,
  93. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  94. return ret;
  95. }
  96. iwl_write_direct32(priv, HBUS_TARG_WRPTR,
  97. txq->q.write_ptr | (txq_id << 8));
  98. /* else not in power-save mode, uCode will never sleep when we're
  99. * trying to tx (during RFKILL, we're not trying to tx). */
  100. } else
  101. iwl_write32(priv, HBUS_TARG_WRPTR,
  102. txq->q.write_ptr | (txq_id << 8));
  103. txq->need_update = 0;
  104. return ret;
  105. }
  106. EXPORT_SYMBOL(iwl_txq_update_write_ptr);
  107. /**
  108. * iwl_tx_queue_free - Deallocate DMA queue.
  109. * @txq: Transmit queue to deallocate.
  110. *
  111. * Empty queue by removing and destroying all BD's.
  112. * Free all buffers.
  113. * 0-fill, but do not free "txq" descriptor structure.
  114. */
  115. void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
  116. {
  117. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  118. struct iwl_queue *q = &txq->q;
  119. struct pci_dev *dev = priv->pci_dev;
  120. int i;
  121. if (q->n_bd == 0)
  122. return;
  123. /* first, empty all BD's */
  124. for (; q->write_ptr != q->read_ptr;
  125. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
  126. priv->cfg->ops->lib->txq_free_tfd(priv, txq);
  127. /* De-alloc array of command/tx buffers */
  128. for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
  129. kfree(txq->cmd[i]);
  130. /* De-alloc circular buffer of TFDs */
  131. if (txq->q.n_bd)
  132. pci_free_consistent(dev, priv->hw_params.tfd_size *
  133. txq->q.n_bd, txq->tfds, txq->q.dma_addr);
  134. /* De-alloc array of per-TFD driver data */
  135. kfree(txq->txb);
  136. txq->txb = NULL;
  137. /* deallocate arrays */
  138. kfree(txq->cmd);
  139. kfree(txq->meta);
  140. txq->cmd = NULL;
  141. txq->meta = NULL;
  142. /* 0-fill queue descriptor structure */
  143. memset(txq, 0, sizeof(*txq));
  144. }
  145. EXPORT_SYMBOL(iwl_tx_queue_free);
  146. /**
  147. * iwl_cmd_queue_free - Deallocate DMA queue.
  148. * @txq: Transmit queue to deallocate.
  149. *
  150. * Empty queue by removing and destroying all BD's.
  151. * Free all buffers.
  152. * 0-fill, but do not free "txq" descriptor structure.
  153. */
  154. void iwl_cmd_queue_free(struct iwl_priv *priv)
  155. {
  156. struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
  157. struct iwl_queue *q = &txq->q;
  158. struct pci_dev *dev = priv->pci_dev;
  159. int i;
  160. if (q->n_bd == 0)
  161. return;
  162. /* De-alloc array of command/tx buffers */
  163. for (i = 0; i <= TFD_CMD_SLOTS; i++)
  164. kfree(txq->cmd[i]);
  165. /* De-alloc circular buffer of TFDs */
  166. if (txq->q.n_bd)
  167. pci_free_consistent(dev, priv->hw_params.tfd_size *
  168. txq->q.n_bd, txq->tfds, txq->q.dma_addr);
  169. /* deallocate arrays */
  170. kfree(txq->cmd);
  171. kfree(txq->meta);
  172. txq->cmd = NULL;
  173. txq->meta = NULL;
  174. /* 0-fill queue descriptor structure */
  175. memset(txq, 0, sizeof(*txq));
  176. }
  177. EXPORT_SYMBOL(iwl_cmd_queue_free);
  178. /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
  179. * DMA services
  180. *
  181. * Theory of operation
  182. *
  183. * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
  184. * of buffer descriptors, each of which points to one or more data buffers for
  185. * the device to read from or fill. Driver and device exchange status of each
  186. * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
  187. * entries in each circular buffer, to protect against confusing empty and full
  188. * queue states.
  189. *
  190. * The device reads or writes the data in the queues via the device's several
  191. * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
  192. *
  193. * For Tx queue, there are low mark and high mark limits. If, after queuing
  194. * the packet for Tx, free space become < low mark, Tx queue stopped. When
  195. * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
  196. * Tx queue resumed.
  197. *
  198. * See more detailed info in iwl-4965-hw.h.
  199. ***************************************************/
  200. int iwl_queue_space(const struct iwl_queue *q)
  201. {
  202. int s = q->read_ptr - q->write_ptr;
  203. if (q->read_ptr > q->write_ptr)
  204. s -= q->n_bd;
  205. if (s <= 0)
  206. s += q->n_window;
  207. /* keep some reserve to not confuse empty and full situations */
  208. s -= 2;
  209. if (s < 0)
  210. s = 0;
  211. return s;
  212. }
  213. EXPORT_SYMBOL(iwl_queue_space);
  214. /**
  215. * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
  216. */
  217. static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
  218. int count, int slots_num, u32 id)
  219. {
  220. q->n_bd = count;
  221. q->n_window = slots_num;
  222. q->id = id;
  223. /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
  224. * and iwl_queue_dec_wrap are broken. */
  225. BUG_ON(!is_power_of_2(count));
  226. /* slots_num must be power-of-two size, otherwise
  227. * get_cmd_index is broken. */
  228. BUG_ON(!is_power_of_2(slots_num));
  229. q->low_mark = q->n_window / 4;
  230. if (q->low_mark < 4)
  231. q->low_mark = 4;
  232. q->high_mark = q->n_window / 8;
  233. if (q->high_mark < 2)
  234. q->high_mark = 2;
  235. q->write_ptr = q->read_ptr = 0;
  236. return 0;
  237. }
  238. /**
  239. * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
  240. */
  241. static int iwl_tx_queue_alloc(struct iwl_priv *priv,
  242. struct iwl_tx_queue *txq, u32 id)
  243. {
  244. struct pci_dev *dev = priv->pci_dev;
  245. size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
  246. /* Driver private data, only for Tx (not command) queues,
  247. * not shared with device. */
  248. if (id != IWL_CMD_QUEUE_NUM) {
  249. txq->txb = kmalloc(sizeof(txq->txb[0]) *
  250. TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
  251. if (!txq->txb) {
  252. IWL_ERR(priv, "kmalloc for auxiliary BD "
  253. "structures failed\n");
  254. goto error;
  255. }
  256. } else {
  257. txq->txb = NULL;
  258. }
  259. /* Circular buffer of transmit frame descriptors (TFDs),
  260. * shared with device */
  261. txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr);
  262. if (!txq->tfds) {
  263. IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
  264. goto error;
  265. }
  266. txq->q.id = id;
  267. return 0;
  268. error:
  269. kfree(txq->txb);
  270. txq->txb = NULL;
  271. return -ENOMEM;
  272. }
  273. /**
  274. * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
  275. */
  276. int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
  277. int slots_num, u32 txq_id)
  278. {
  279. int i, len;
  280. int ret;
  281. int actual_slots = slots_num;
  282. /*
  283. * Alloc buffer array for commands (Tx or other types of commands).
  284. * For the command queue (#4), allocate command space + one big
  285. * command for scan, since scan command is very huge; the system will
  286. * not have two scans at the same time, so only one is needed.
  287. * For normal Tx queues (all other queues), no super-size command
  288. * space is needed.
  289. */
  290. if (txq_id == IWL_CMD_QUEUE_NUM)
  291. actual_slots++;
  292. txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
  293. GFP_KERNEL);
  294. txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
  295. GFP_KERNEL);
  296. if (!txq->meta || !txq->cmd)
  297. goto out_free_arrays;
  298. len = sizeof(struct iwl_device_cmd);
  299. for (i = 0; i < actual_slots; i++) {
  300. /* only happens for cmd queue */
  301. if (i == slots_num)
  302. len += IWL_MAX_SCAN_SIZE;
  303. txq->cmd[i] = kmalloc(len, GFP_KERNEL);
  304. if (!txq->cmd[i])
  305. goto err;
  306. }
  307. /* Alloc driver data array and TFD circular buffer */
  308. ret = iwl_tx_queue_alloc(priv, txq, txq_id);
  309. if (ret)
  310. goto err;
  311. txq->need_update = 0;
  312. /*
  313. * Aggregation TX queues will get their ID when aggregation begins;
  314. * they overwrite the setting done here. The command FIFO doesn't
  315. * need an swq_id so don't set one to catch errors, all others can
  316. * be set up to the identity mapping.
  317. */
  318. if (txq_id != IWL_CMD_QUEUE_NUM)
  319. txq->swq_id = txq_id;
  320. /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
  321. * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
  322. BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
  323. /* Initialize queue's high/low-water marks, and head/tail indexes */
  324. iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
  325. /* Tell device where to find queue */
  326. priv->cfg->ops->lib->txq_init(priv, txq);
  327. return 0;
  328. err:
  329. for (i = 0; i < actual_slots; i++)
  330. kfree(txq->cmd[i]);
  331. out_free_arrays:
  332. kfree(txq->meta);
  333. kfree(txq->cmd);
  334. return -ENOMEM;
  335. }
  336. EXPORT_SYMBOL(iwl_tx_queue_init);
  337. /**
  338. * iwl_hw_txq_ctx_free - Free TXQ Context
  339. *
  340. * Destroy all TX DMA queues and structures
  341. */
  342. void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
  343. {
  344. int txq_id;
  345. /* Tx queues */
  346. if (priv->txq)
  347. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
  348. txq_id++)
  349. if (txq_id == IWL_CMD_QUEUE_NUM)
  350. iwl_cmd_queue_free(priv);
  351. else
  352. iwl_tx_queue_free(priv, txq_id);
  353. iwl_free_dma_ptr(priv, &priv->kw);
  354. iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
  355. /* free tx queue structure */
  356. iwl_free_txq_mem(priv);
  357. }
  358. EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
  359. /**
  360. * iwl_txq_ctx_reset - Reset TX queue context
  361. * Destroys all DMA structures and initialize them again
  362. *
  363. * @param priv
  364. * @return error code
  365. */
  366. int iwl_txq_ctx_reset(struct iwl_priv *priv)
  367. {
  368. int ret = 0;
  369. int txq_id, slots_num;
  370. unsigned long flags;
  371. /* Free all tx/cmd queues and keep-warm buffer */
  372. iwl_hw_txq_ctx_free(priv);
  373. ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
  374. priv->hw_params.scd_bc_tbls_size);
  375. if (ret) {
  376. IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
  377. goto error_bc_tbls;
  378. }
  379. /* Alloc keep-warm buffer */
  380. ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
  381. if (ret) {
  382. IWL_ERR(priv, "Keep Warm allocation failed\n");
  383. goto error_kw;
  384. }
  385. /* allocate tx queue structure */
  386. ret = iwl_alloc_txq_mem(priv);
  387. if (ret)
  388. goto error;
  389. spin_lock_irqsave(&priv->lock, flags);
  390. /* Turn off all Tx DMA fifos */
  391. priv->cfg->ops->lib->txq_set_sched(priv, 0);
  392. /* Tell NIC where to find the "keep warm" buffer */
  393. iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
  394. spin_unlock_irqrestore(&priv->lock, flags);
  395. /* Alloc and init all Tx queues, including the command queue (#4) */
  396. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
  397. slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
  398. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  399. ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
  400. txq_id);
  401. if (ret) {
  402. IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
  403. goto error;
  404. }
  405. }
  406. return ret;
  407. error:
  408. iwl_hw_txq_ctx_free(priv);
  409. iwl_free_dma_ptr(priv, &priv->kw);
  410. error_kw:
  411. iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
  412. error_bc_tbls:
  413. return ret;
  414. }
  415. /**
  416. * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
  417. */
  418. void iwl_txq_ctx_stop(struct iwl_priv *priv)
  419. {
  420. int ch;
  421. unsigned long flags;
  422. /* Turn off all Tx DMA fifos */
  423. spin_lock_irqsave(&priv->lock, flags);
  424. priv->cfg->ops->lib->txq_set_sched(priv, 0);
  425. /* Stop each Tx DMA channel, and wait for it to be idle */
  426. for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
  427. iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
  428. iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
  429. FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
  430. 1000);
  431. }
  432. spin_unlock_irqrestore(&priv->lock, flags);
  433. /* Deallocate memory for all Tx queues */
  434. iwl_hw_txq_ctx_free(priv);
  435. }
  436. EXPORT_SYMBOL(iwl_txq_ctx_stop);
  437. /*
  438. * handle build REPLY_TX command notification.
  439. */
  440. static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
  441. struct iwl_tx_cmd *tx_cmd,
  442. struct ieee80211_tx_info *info,
  443. struct ieee80211_hdr *hdr,
  444. u8 std_id)
  445. {
  446. __le16 fc = hdr->frame_control;
  447. __le32 tx_flags = tx_cmd->tx_flags;
  448. tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
  449. if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
  450. tx_flags |= TX_CMD_FLG_ACK_MSK;
  451. if (ieee80211_is_mgmt(fc))
  452. tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
  453. if (ieee80211_is_probe_resp(fc) &&
  454. !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
  455. tx_flags |= TX_CMD_FLG_TSF_MSK;
  456. } else {
  457. tx_flags &= (~TX_CMD_FLG_ACK_MSK);
  458. tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
  459. }
  460. if (ieee80211_is_back_req(fc))
  461. tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
  462. tx_cmd->sta_id = std_id;
  463. if (ieee80211_has_morefrags(fc))
  464. tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
  465. if (ieee80211_is_data_qos(fc)) {
  466. u8 *qc = ieee80211_get_qos_ctl(hdr);
  467. tx_cmd->tid_tspec = qc[0] & 0xf;
  468. tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
  469. } else {
  470. tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
  471. }
  472. priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
  473. if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
  474. tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
  475. tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
  476. if (ieee80211_is_mgmt(fc)) {
  477. if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
  478. tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
  479. else
  480. tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
  481. } else {
  482. tx_cmd->timeout.pm_frame_timeout = 0;
  483. }
  484. tx_cmd->driver_txop = 0;
  485. tx_cmd->tx_flags = tx_flags;
  486. tx_cmd->next_frame_len = 0;
  487. }
  488. #define RTS_HCCA_RETRY_LIMIT 3
  489. #define RTS_DFAULT_RETRY_LIMIT 60
  490. static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
  491. struct iwl_tx_cmd *tx_cmd,
  492. struct ieee80211_tx_info *info,
  493. __le16 fc, int is_hcca)
  494. {
  495. u32 rate_flags;
  496. int rate_idx;
  497. u8 rts_retry_limit;
  498. u8 data_retry_limit;
  499. u8 rate_plcp;
  500. /* Set retry limit on DATA packets and Probe Responses*/
  501. if (ieee80211_is_probe_resp(fc))
  502. data_retry_limit = 3;
  503. else
  504. data_retry_limit = IWL_DEFAULT_TX_RETRY;
  505. tx_cmd->data_retry_limit = data_retry_limit;
  506. /* Set retry limit on RTS packets */
  507. rts_retry_limit = (is_hcca) ? RTS_HCCA_RETRY_LIMIT :
  508. RTS_DFAULT_RETRY_LIMIT;
  509. if (data_retry_limit < rts_retry_limit)
  510. rts_retry_limit = data_retry_limit;
  511. tx_cmd->rts_retry_limit = rts_retry_limit;
  512. /* DATA packets will use the uCode station table for rate/antenna
  513. * selection */
  514. if (ieee80211_is_data(fc)) {
  515. tx_cmd->initial_rate_index = 0;
  516. tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
  517. return;
  518. }
  519. /**
  520. * If the current TX rate stored in mac80211 has the MCS bit set, it's
  521. * not really a TX rate. Thus, we use the lowest supported rate for
  522. * this band. Also use the lowest supported rate if the stored rate
  523. * index is invalid.
  524. */
  525. rate_idx = info->control.rates[0].idx;
  526. if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
  527. (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
  528. rate_idx = rate_lowest_index(&priv->bands[info->band],
  529. info->control.sta);
  530. /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
  531. if (info->band == IEEE80211_BAND_5GHZ)
  532. rate_idx += IWL_FIRST_OFDM_RATE;
  533. /* Get PLCP rate for tx_cmd->rate_n_flags */
  534. rate_plcp = iwl_rates[rate_idx].plcp;
  535. /* Zero out flags for this packet */
  536. rate_flags = 0;
  537. /* Set CCK flag as needed */
  538. if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
  539. rate_flags |= RATE_MCS_CCK_MSK;
  540. /* Set up RTS and CTS flags for certain packets */
  541. switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
  542. case cpu_to_le16(IEEE80211_STYPE_AUTH):
  543. case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
  544. case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
  545. case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
  546. if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
  547. tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
  548. tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
  549. }
  550. break;
  551. default:
  552. break;
  553. }
  554. /* Set up antennas */
  555. priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
  556. rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
  557. /* Set the rate in the TX cmd */
  558. tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
  559. }
  560. static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
  561. struct ieee80211_tx_info *info,
  562. struct iwl_tx_cmd *tx_cmd,
  563. struct sk_buff *skb_frag,
  564. int sta_id)
  565. {
  566. struct ieee80211_key_conf *keyconf = info->control.hw_key;
  567. switch (keyconf->alg) {
  568. case ALG_CCMP:
  569. tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
  570. memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
  571. if (info->flags & IEEE80211_TX_CTL_AMPDU)
  572. tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
  573. IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
  574. break;
  575. case ALG_TKIP:
  576. tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
  577. ieee80211_get_tkip_key(keyconf, skb_frag,
  578. IEEE80211_TKIP_P2_KEY, tx_cmd->key);
  579. IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
  580. break;
  581. case ALG_WEP:
  582. tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
  583. (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
  584. if (keyconf->keylen == WEP_KEY_LEN_128)
  585. tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
  586. memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
  587. IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
  588. "with key %d\n", keyconf->keyidx);
  589. break;
  590. default:
  591. IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
  592. break;
  593. }
  594. }
  595. /*
  596. * start REPLY_TX command process
  597. */
  598. int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
  599. {
  600. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  601. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  602. struct ieee80211_sta *sta = info->control.sta;
  603. struct iwl_station_priv *sta_priv = NULL;
  604. struct iwl_tx_queue *txq;
  605. struct iwl_queue *q;
  606. struct iwl_device_cmd *out_cmd;
  607. struct iwl_cmd_meta *out_meta;
  608. struct iwl_tx_cmd *tx_cmd;
  609. int swq_id, txq_id;
  610. dma_addr_t phys_addr;
  611. dma_addr_t txcmd_phys;
  612. dma_addr_t scratch_phys;
  613. u16 len, len_org, firstlen, secondlen;
  614. u16 seq_number = 0;
  615. __le16 fc;
  616. u8 hdr_len;
  617. u8 sta_id;
  618. u8 wait_write_ptr = 0;
  619. u8 tid = 0;
  620. u8 *qc = NULL;
  621. unsigned long flags;
  622. int ret;
  623. spin_lock_irqsave(&priv->lock, flags);
  624. if (iwl_is_rfkill(priv)) {
  625. IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
  626. goto drop_unlock;
  627. }
  628. fc = hdr->frame_control;
  629. #ifdef CONFIG_IWLWIFI_DEBUG
  630. if (ieee80211_is_auth(fc))
  631. IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
  632. else if (ieee80211_is_assoc_req(fc))
  633. IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
  634. else if (ieee80211_is_reassoc_req(fc))
  635. IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
  636. #endif
  637. /* drop all non-injected data frame if we are not associated */
  638. if (ieee80211_is_data(fc) &&
  639. !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
  640. (!iwl_is_associated(priv) ||
  641. ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
  642. !priv->assoc_station_added)) {
  643. IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
  644. goto drop_unlock;
  645. }
  646. hdr_len = ieee80211_hdrlen(fc);
  647. /* Find (or create) index into station table for destination station */
  648. if (info->flags & IEEE80211_TX_CTL_INJECTED)
  649. sta_id = priv->hw_params.bcast_sta_id;
  650. else
  651. sta_id = iwl_get_sta_id(priv, hdr);
  652. if (sta_id == IWL_INVALID_STATION) {
  653. IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
  654. hdr->addr1);
  655. goto drop_unlock;
  656. }
  657. IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
  658. if (sta)
  659. sta_priv = (void *)sta->drv_priv;
  660. if (sta_priv && sta_id != priv->hw_params.bcast_sta_id &&
  661. sta_priv->asleep) {
  662. WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
  663. /*
  664. * This sends an asynchronous command to the device,
  665. * but we can rely on it being processed before the
  666. * next frame is processed -- and the next frame to
  667. * this station is the one that will consume this
  668. * counter.
  669. * For now set the counter to just 1 since we do not
  670. * support uAPSD yet.
  671. */
  672. iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
  673. }
  674. txq_id = skb_get_queue_mapping(skb);
  675. if (ieee80211_is_data_qos(fc)) {
  676. qc = ieee80211_get_qos_ctl(hdr);
  677. tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
  678. if (unlikely(tid >= MAX_TID_COUNT))
  679. goto drop_unlock;
  680. seq_number = priv->stations[sta_id].tid[tid].seq_number;
  681. seq_number &= IEEE80211_SCTL_SEQ;
  682. hdr->seq_ctrl = hdr->seq_ctrl &
  683. cpu_to_le16(IEEE80211_SCTL_FRAG);
  684. hdr->seq_ctrl |= cpu_to_le16(seq_number);
  685. seq_number += 0x10;
  686. /* aggregation is on for this <sta,tid> */
  687. if (info->flags & IEEE80211_TX_CTL_AMPDU)
  688. txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
  689. }
  690. txq = &priv->txq[txq_id];
  691. swq_id = txq->swq_id;
  692. q = &txq->q;
  693. if (unlikely(iwl_queue_space(q) < q->high_mark))
  694. goto drop_unlock;
  695. if (ieee80211_is_data_qos(fc))
  696. priv->stations[sta_id].tid[tid].tfds_in_queue++;
  697. /* Set up driver data for this TFD */
  698. memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
  699. txq->txb[q->write_ptr].skb[0] = skb;
  700. /* Set up first empty entry in queue's array of Tx/cmd buffers */
  701. out_cmd = txq->cmd[q->write_ptr];
  702. out_meta = &txq->meta[q->write_ptr];
  703. tx_cmd = &out_cmd->cmd.tx;
  704. memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
  705. memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
  706. /*
  707. * Set up the Tx-command (not MAC!) header.
  708. * Store the chosen Tx queue and TFD index within the sequence field;
  709. * after Tx, uCode's Tx response will return this value so driver can
  710. * locate the frame within the tx queue and do post-tx processing.
  711. */
  712. out_cmd->hdr.cmd = REPLY_TX;
  713. out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
  714. INDEX_TO_SEQ(q->write_ptr)));
  715. /* Copy MAC header from skb into command buffer */
  716. memcpy(tx_cmd->hdr, hdr, hdr_len);
  717. /* Total # bytes to be transmitted */
  718. len = (u16)skb->len;
  719. tx_cmd->len = cpu_to_le16(len);
  720. if (info->control.hw_key)
  721. iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
  722. /* TODO need this for burst mode later on */
  723. iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
  724. iwl_dbg_log_tx_data_frame(priv, len, hdr);
  725. /* set is_hcca to 0; it probably will never be implemented */
  726. iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, 0);
  727. iwl_update_stats(priv, true, fc, len);
  728. /*
  729. * Use the first empty entry in this queue's command buffer array
  730. * to contain the Tx command and MAC header concatenated together
  731. * (payload data will be in another buffer).
  732. * Size of this varies, due to varying MAC header length.
  733. * If end is not dword aligned, we'll have 2 extra bytes at the end
  734. * of the MAC header (device reads on dword boundaries).
  735. * We'll tell device about this padding later.
  736. */
  737. len = sizeof(struct iwl_tx_cmd) +
  738. sizeof(struct iwl_cmd_header) + hdr_len;
  739. len_org = len;
  740. firstlen = len = (len + 3) & ~3;
  741. if (len_org != len)
  742. len_org = 1;
  743. else
  744. len_org = 0;
  745. /* Tell NIC about any 2-byte padding after MAC header */
  746. if (len_org)
  747. tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
  748. /* Physical address of this Tx command's header (not MAC header!),
  749. * within command buffer array. */
  750. txcmd_phys = pci_map_single(priv->pci_dev,
  751. &out_cmd->hdr, len,
  752. PCI_DMA_BIDIRECTIONAL);
  753. pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
  754. pci_unmap_len_set(out_meta, len, len);
  755. /* Add buffer containing Tx command and MAC(!) header to TFD's
  756. * first entry */
  757. priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
  758. txcmd_phys, len, 1, 0);
  759. if (!ieee80211_has_morefrags(hdr->frame_control)) {
  760. txq->need_update = 1;
  761. if (qc)
  762. priv->stations[sta_id].tid[tid].seq_number = seq_number;
  763. } else {
  764. wait_write_ptr = 1;
  765. txq->need_update = 0;
  766. }
  767. /* Set up TFD's 2nd entry to point directly to remainder of skb,
  768. * if any (802.11 null frames have no payload). */
  769. secondlen = len = skb->len - hdr_len;
  770. if (len) {
  771. phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
  772. len, PCI_DMA_TODEVICE);
  773. priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
  774. phys_addr, len,
  775. 0, 0);
  776. }
  777. scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
  778. offsetof(struct iwl_tx_cmd, scratch);
  779. len = sizeof(struct iwl_tx_cmd) +
  780. sizeof(struct iwl_cmd_header) + hdr_len;
  781. /* take back ownership of DMA buffer to enable update */
  782. pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
  783. len, PCI_DMA_BIDIRECTIONAL);
  784. tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
  785. tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
  786. IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
  787. le16_to_cpu(out_cmd->hdr.sequence));
  788. IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
  789. iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
  790. iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
  791. /* Set up entry for this TFD in Tx byte-count array */
  792. if (info->flags & IEEE80211_TX_CTL_AMPDU)
  793. priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
  794. le16_to_cpu(tx_cmd->len));
  795. pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
  796. len, PCI_DMA_BIDIRECTIONAL);
  797. trace_iwlwifi_dev_tx(priv,
  798. &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
  799. sizeof(struct iwl_tfd),
  800. &out_cmd->hdr, firstlen,
  801. skb->data + hdr_len, secondlen);
  802. /* Tell device the write index *just past* this latest filled TFD */
  803. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  804. ret = iwl_txq_update_write_ptr(priv, txq);
  805. spin_unlock_irqrestore(&priv->lock, flags);
  806. /*
  807. * At this point the frame is "transmitted" successfully
  808. * and we will get a TX status notification eventually,
  809. * regardless of the value of ret. "ret" only indicates
  810. * whether or not we should update the write pointer.
  811. */
  812. /* avoid atomic ops if it isn't an associated client */
  813. if (sta_priv && sta_priv->client)
  814. atomic_inc(&sta_priv->pending_frames);
  815. if (ret)
  816. return ret;
  817. if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
  818. if (wait_write_ptr) {
  819. spin_lock_irqsave(&priv->lock, flags);
  820. txq->need_update = 1;
  821. iwl_txq_update_write_ptr(priv, txq);
  822. spin_unlock_irqrestore(&priv->lock, flags);
  823. } else {
  824. iwl_stop_queue(priv, txq->swq_id);
  825. }
  826. }
  827. return 0;
  828. drop_unlock:
  829. spin_unlock_irqrestore(&priv->lock, flags);
  830. return -1;
  831. }
  832. EXPORT_SYMBOL(iwl_tx_skb);
  833. /*************** HOST COMMAND QUEUE FUNCTIONS *****/
  834. /**
  835. * iwl_enqueue_hcmd - enqueue a uCode command
  836. * @priv: device private data point
  837. * @cmd: a point to the ucode command structure
  838. *
  839. * The function returns < 0 values to indicate the operation is
  840. * failed. On success, it turns the index (> 0) of command in the
  841. * command queue.
  842. */
  843. int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
  844. {
  845. struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
  846. struct iwl_queue *q = &txq->q;
  847. struct iwl_device_cmd *out_cmd;
  848. struct iwl_cmd_meta *out_meta;
  849. dma_addr_t phys_addr;
  850. unsigned long flags;
  851. int len, ret;
  852. u32 idx;
  853. u16 fix_size;
  854. cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
  855. fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
  856. /* If any of the command structures end up being larger than
  857. * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
  858. * we will need to increase the size of the TFD entries */
  859. BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
  860. !(cmd->flags & CMD_SIZE_HUGE));
  861. if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
  862. IWL_WARN(priv, "Not sending command - %s KILL\n",
  863. iwl_is_rfkill(priv) ? "RF" : "CT");
  864. return -EIO;
  865. }
  866. if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
  867. IWL_ERR(priv, "No space for Tx\n");
  868. if (iwl_within_ct_kill_margin(priv))
  869. iwl_tt_enter_ct_kill(priv);
  870. else {
  871. IWL_ERR(priv, "Restarting adapter due to queue full\n");
  872. queue_work(priv->workqueue, &priv->restart);
  873. }
  874. return -ENOSPC;
  875. }
  876. spin_lock_irqsave(&priv->hcmd_lock, flags);
  877. idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
  878. out_cmd = txq->cmd[idx];
  879. out_meta = &txq->meta[idx];
  880. memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
  881. out_meta->flags = cmd->flags;
  882. if (cmd->flags & CMD_WANT_SKB)
  883. out_meta->source = cmd;
  884. if (cmd->flags & CMD_ASYNC)
  885. out_meta->callback = cmd->callback;
  886. out_cmd->hdr.cmd = cmd->id;
  887. memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
  888. /* At this point, the out_cmd now has all of the incoming cmd
  889. * information */
  890. out_cmd->hdr.flags = 0;
  891. out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
  892. INDEX_TO_SEQ(q->write_ptr));
  893. if (cmd->flags & CMD_SIZE_HUGE)
  894. out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
  895. len = sizeof(struct iwl_device_cmd);
  896. len += (idx == TFD_CMD_SLOTS) ? IWL_MAX_SCAN_SIZE : 0;
  897. #ifdef CONFIG_IWLWIFI_DEBUG
  898. switch (out_cmd->hdr.cmd) {
  899. case REPLY_TX_LINK_QUALITY_CMD:
  900. case SENSITIVITY_CMD:
  901. IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, "
  902. "%d bytes at %d[%d]:%d\n",
  903. get_cmd_string(out_cmd->hdr.cmd),
  904. out_cmd->hdr.cmd,
  905. le16_to_cpu(out_cmd->hdr.sequence), fix_size,
  906. q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
  907. break;
  908. default:
  909. IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
  910. "%d bytes at %d[%d]:%d\n",
  911. get_cmd_string(out_cmd->hdr.cmd),
  912. out_cmd->hdr.cmd,
  913. le16_to_cpu(out_cmd->hdr.sequence), fix_size,
  914. q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
  915. }
  916. #endif
  917. txq->need_update = 1;
  918. if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
  919. /* Set up entry in queue's byte count circular buffer */
  920. priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
  921. phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
  922. fix_size, PCI_DMA_BIDIRECTIONAL);
  923. pci_unmap_addr_set(out_meta, mapping, phys_addr);
  924. pci_unmap_len_set(out_meta, len, fix_size);
  925. trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags);
  926. priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
  927. phys_addr, fix_size, 1,
  928. U32_PAD(cmd->len));
  929. /* Increment and update queue's write index */
  930. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  931. ret = iwl_txq_update_write_ptr(priv, txq);
  932. spin_unlock_irqrestore(&priv->hcmd_lock, flags);
  933. return ret ? ret : idx;
  934. }
  935. static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
  936. {
  937. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  938. struct ieee80211_sta *sta;
  939. struct iwl_station_priv *sta_priv;
  940. sta = ieee80211_find_sta(priv->vif, hdr->addr1);
  941. if (sta) {
  942. sta_priv = (void *)sta->drv_priv;
  943. /* avoid atomic ops if this isn't a client */
  944. if (sta_priv->client &&
  945. atomic_dec_return(&sta_priv->pending_frames) == 0)
  946. ieee80211_sta_block_awake(priv->hw, sta, false);
  947. }
  948. ieee80211_tx_status_irqsafe(priv->hw, skb);
  949. }
  950. int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
  951. {
  952. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  953. struct iwl_queue *q = &txq->q;
  954. struct iwl_tx_info *tx_info;
  955. int nfreed = 0;
  956. if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
  957. IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
  958. "is out of range [0-%d] %d %d.\n", txq_id,
  959. index, q->n_bd, q->write_ptr, q->read_ptr);
  960. return 0;
  961. }
  962. for (index = iwl_queue_inc_wrap(index, q->n_bd);
  963. q->read_ptr != index;
  964. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
  965. tx_info = &txq->txb[txq->q.read_ptr];
  966. iwl_tx_status(priv, tx_info->skb[0]);
  967. tx_info->skb[0] = NULL;
  968. if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
  969. priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
  970. priv->cfg->ops->lib->txq_free_tfd(priv, txq);
  971. nfreed++;
  972. }
  973. return nfreed;
  974. }
  975. EXPORT_SYMBOL(iwl_tx_queue_reclaim);
  976. /**
  977. * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
  978. *
  979. * When FW advances 'R' index, all entries between old and new 'R' index
  980. * need to be reclaimed. As result, some free space forms. If there is
  981. * enough free space (> low mark), wake the stack that feeds us.
  982. */
  983. static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
  984. int idx, int cmd_idx)
  985. {
  986. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  987. struct iwl_queue *q = &txq->q;
  988. int nfreed = 0;
  989. if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
  990. IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
  991. "is out of range [0-%d] %d %d.\n", txq_id,
  992. idx, q->n_bd, q->write_ptr, q->read_ptr);
  993. return;
  994. }
  995. for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
  996. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
  997. if (nfreed++ > 0) {
  998. IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
  999. q->write_ptr, q->read_ptr);
  1000. queue_work(priv->workqueue, &priv->restart);
  1001. }
  1002. }
  1003. }
  1004. /**
  1005. * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
  1006. * @rxb: Rx buffer to reclaim
  1007. *
  1008. * If an Rx buffer has an async callback associated with it the callback
  1009. * will be executed. The attached skb (if present) will only be freed
  1010. * if the callback returns 1
  1011. */
  1012. void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
  1013. {
  1014. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  1015. u16 sequence = le16_to_cpu(pkt->hdr.sequence);
  1016. int txq_id = SEQ_TO_QUEUE(sequence);
  1017. int index = SEQ_TO_INDEX(sequence);
  1018. int cmd_index;
  1019. bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
  1020. struct iwl_device_cmd *cmd;
  1021. struct iwl_cmd_meta *meta;
  1022. /* If a Tx command is being handled and it isn't in the actual
  1023. * command queue then there a command routing bug has been introduced
  1024. * in the queue management code. */
  1025. if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
  1026. "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
  1027. txq_id, sequence,
  1028. priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
  1029. priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
  1030. iwl_print_hex_error(priv, pkt, 32);
  1031. return;
  1032. }
  1033. cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
  1034. cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
  1035. meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index];
  1036. pci_unmap_single(priv->pci_dev,
  1037. pci_unmap_addr(meta, mapping),
  1038. pci_unmap_len(meta, len),
  1039. PCI_DMA_BIDIRECTIONAL);
  1040. /* Input error checking is done when commands are added to queue. */
  1041. if (meta->flags & CMD_WANT_SKB) {
  1042. meta->source->reply_page = (unsigned long)rxb_addr(rxb);
  1043. rxb->page = NULL;
  1044. } else if (meta->callback)
  1045. meta->callback(priv, cmd, pkt);
  1046. iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
  1047. if (!(meta->flags & CMD_ASYNC)) {
  1048. clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
  1049. wake_up_interruptible(&priv->wait_command_queue);
  1050. }
  1051. }
  1052. EXPORT_SYMBOL(iwl_tx_cmd_complete);
  1053. /*
  1054. * Find first available (lowest unused) Tx Queue, mark it "active".
  1055. * Called only when finding queue for aggregation.
  1056. * Should never return anything < 7, because they should already
  1057. * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
  1058. */
  1059. static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
  1060. {
  1061. int txq_id;
  1062. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
  1063. if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
  1064. return txq_id;
  1065. return -1;
  1066. }
  1067. int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
  1068. {
  1069. int sta_id;
  1070. int tx_fifo;
  1071. int txq_id;
  1072. int ret;
  1073. unsigned long flags;
  1074. struct iwl_tid_data *tid_data;
  1075. if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
  1076. tx_fifo = default_tid_to_tx_fifo[tid];
  1077. else
  1078. return -EINVAL;
  1079. IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
  1080. __func__, ra, tid);
  1081. sta_id = iwl_find_station(priv, ra);
  1082. if (sta_id == IWL_INVALID_STATION) {
  1083. IWL_ERR(priv, "Start AGG on invalid station\n");
  1084. return -ENXIO;
  1085. }
  1086. if (unlikely(tid >= MAX_TID_COUNT))
  1087. return -EINVAL;
  1088. if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
  1089. IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
  1090. return -ENXIO;
  1091. }
  1092. txq_id = iwl_txq_ctx_activate_free(priv);
  1093. if (txq_id == -1) {
  1094. IWL_ERR(priv, "No free aggregation queue available\n");
  1095. return -ENXIO;
  1096. }
  1097. spin_lock_irqsave(&priv->sta_lock, flags);
  1098. tid_data = &priv->stations[sta_id].tid[tid];
  1099. *ssn = SEQ_TO_SN(tid_data->seq_number);
  1100. tid_data->agg.txq_id = txq_id;
  1101. priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id);
  1102. spin_unlock_irqrestore(&priv->sta_lock, flags);
  1103. ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
  1104. sta_id, tid, *ssn);
  1105. if (ret)
  1106. return ret;
  1107. if (tid_data->tfds_in_queue == 0) {
  1108. IWL_DEBUG_HT(priv, "HW queue is empty\n");
  1109. tid_data->agg.state = IWL_AGG_ON;
  1110. ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid);
  1111. } else {
  1112. IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
  1113. tid_data->tfds_in_queue);
  1114. tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
  1115. }
  1116. return ret;
  1117. }
  1118. EXPORT_SYMBOL(iwl_tx_agg_start);
  1119. int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
  1120. {
  1121. int tx_fifo_id, txq_id, sta_id, ssn = -1;
  1122. struct iwl_tid_data *tid_data;
  1123. int ret, write_ptr, read_ptr;
  1124. unsigned long flags;
  1125. if (!ra) {
  1126. IWL_ERR(priv, "ra = NULL\n");
  1127. return -EINVAL;
  1128. }
  1129. if (unlikely(tid >= MAX_TID_COUNT))
  1130. return -EINVAL;
  1131. if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
  1132. tx_fifo_id = default_tid_to_tx_fifo[tid];
  1133. else
  1134. return -EINVAL;
  1135. sta_id = iwl_find_station(priv, ra);
  1136. if (sta_id == IWL_INVALID_STATION) {
  1137. IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
  1138. return -ENXIO;
  1139. }
  1140. if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
  1141. IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n");
  1142. tid_data = &priv->stations[sta_id].tid[tid];
  1143. ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
  1144. txq_id = tid_data->agg.txq_id;
  1145. write_ptr = priv->txq[txq_id].q.write_ptr;
  1146. read_ptr = priv->txq[txq_id].q.read_ptr;
  1147. /* The queue is not empty */
  1148. if (write_ptr != read_ptr) {
  1149. IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
  1150. priv->stations[sta_id].tid[tid].agg.state =
  1151. IWL_EMPTYING_HW_QUEUE_DELBA;
  1152. return 0;
  1153. }
  1154. IWL_DEBUG_HT(priv, "HW queue is empty\n");
  1155. priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
  1156. spin_lock_irqsave(&priv->lock, flags);
  1157. ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
  1158. tx_fifo_id);
  1159. spin_unlock_irqrestore(&priv->lock, flags);
  1160. if (ret)
  1161. return ret;
  1162. ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
  1163. return 0;
  1164. }
  1165. EXPORT_SYMBOL(iwl_tx_agg_stop);
  1166. int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
  1167. {
  1168. struct iwl_queue *q = &priv->txq[txq_id].q;
  1169. u8 *addr = priv->stations[sta_id].sta.sta.addr;
  1170. struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
  1171. switch (priv->stations[sta_id].tid[tid].agg.state) {
  1172. case IWL_EMPTYING_HW_QUEUE_DELBA:
  1173. /* We are reclaiming the last packet of the */
  1174. /* aggregated HW queue */
  1175. if ((txq_id == tid_data->agg.txq_id) &&
  1176. (q->read_ptr == q->write_ptr)) {
  1177. u16 ssn = SEQ_TO_SN(tid_data->seq_number);
  1178. int tx_fifo = default_tid_to_tx_fifo[tid];
  1179. IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
  1180. priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
  1181. ssn, tx_fifo);
  1182. tid_data->agg.state = IWL_AGG_OFF;
  1183. ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid);
  1184. }
  1185. break;
  1186. case IWL_EMPTYING_HW_QUEUE_ADDBA:
  1187. /* We are reclaiming the last packet of the queue */
  1188. if (tid_data->tfds_in_queue == 0) {
  1189. IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
  1190. tid_data->agg.state = IWL_AGG_ON;
  1191. ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid);
  1192. }
  1193. break;
  1194. }
  1195. return 0;
  1196. }
  1197. EXPORT_SYMBOL(iwl_txq_check_empty);
  1198. /**
  1199. * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
  1200. *
  1201. * Go through block-ack's bitmap of ACK'd frames, update driver's record of
  1202. * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
  1203. */
  1204. static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
  1205. struct iwl_ht_agg *agg,
  1206. struct iwl_compressed_ba_resp *ba_resp)
  1207. {
  1208. int i, sh, ack;
  1209. u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
  1210. u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
  1211. u64 bitmap;
  1212. int successes = 0;
  1213. struct ieee80211_tx_info *info;
  1214. if (unlikely(!agg->wait_for_ba)) {
  1215. IWL_ERR(priv, "Received BA when not expected\n");
  1216. return -EINVAL;
  1217. }
  1218. /* Mark that the expected block-ack response arrived */
  1219. agg->wait_for_ba = 0;
  1220. IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
  1221. /* Calculate shift to align block-ack bits with our Tx window bits */
  1222. sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
  1223. if (sh < 0) /* tbw something is wrong with indices */
  1224. sh += 0x100;
  1225. /* don't use 64-bit values for now */
  1226. bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
  1227. if (agg->frame_count > (64 - sh)) {
  1228. IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
  1229. return -1;
  1230. }
  1231. /* check for success or failure according to the
  1232. * transmitted bitmap and block-ack bitmap */
  1233. bitmap &= agg->bitmap;
  1234. /* For each frame attempted in aggregation,
  1235. * update driver's record of tx frame's status. */
  1236. for (i = 0; i < agg->frame_count ; i++) {
  1237. ack = bitmap & (1ULL << i);
  1238. successes += !!ack;
  1239. IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
  1240. ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
  1241. agg->start_idx + i);
  1242. }
  1243. info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
  1244. memset(&info->status, 0, sizeof(info->status));
  1245. info->flags |= IEEE80211_TX_STAT_ACK;
  1246. info->flags |= IEEE80211_TX_STAT_AMPDU;
  1247. info->status.ampdu_ack_map = successes;
  1248. info->status.ampdu_ack_len = agg->frame_count;
  1249. iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
  1250. IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
  1251. return 0;
  1252. }
  1253. /**
  1254. * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
  1255. *
  1256. * Handles block-acknowledge notification from device, which reports success
  1257. * of frames sent via aggregation.
  1258. */
  1259. void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
  1260. struct iwl_rx_mem_buffer *rxb)
  1261. {
  1262. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  1263. struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
  1264. struct iwl_tx_queue *txq = NULL;
  1265. struct iwl_ht_agg *agg;
  1266. int index;
  1267. int sta_id;
  1268. int tid;
  1269. /* "flow" corresponds to Tx queue */
  1270. u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
  1271. /* "ssn" is start of block-ack Tx window, corresponds to index
  1272. * (in Tx queue's circular buffer) of first TFD/frame in window */
  1273. u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
  1274. if (scd_flow >= priv->hw_params.max_txq_num) {
  1275. IWL_ERR(priv,
  1276. "BUG_ON scd_flow is bigger than number of queues\n");
  1277. return;
  1278. }
  1279. txq = &priv->txq[scd_flow];
  1280. sta_id = ba_resp->sta_id;
  1281. tid = ba_resp->tid;
  1282. agg = &priv->stations[sta_id].tid[tid].agg;
  1283. /* Find index just before block-ack window */
  1284. index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
  1285. /* TODO: Need to get this copy more safely - now good for debug */
  1286. IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
  1287. "sta_id = %d\n",
  1288. agg->wait_for_ba,
  1289. (u8 *) &ba_resp->sta_addr_lo32,
  1290. ba_resp->sta_id);
  1291. IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
  1292. "%d, scd_ssn = %d\n",
  1293. ba_resp->tid,
  1294. ba_resp->seq_ctl,
  1295. (unsigned long long)le64_to_cpu(ba_resp->bitmap),
  1296. ba_resp->scd_flow,
  1297. ba_resp->scd_ssn);
  1298. IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n",
  1299. agg->start_idx,
  1300. (unsigned long long)agg->bitmap);
  1301. /* Update driver's record of ACK vs. not for each frame in window */
  1302. iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
  1303. /* Release all TFDs before the SSN, i.e. all TFDs in front of
  1304. * block-ack window (we assume that they've been successfully
  1305. * transmitted ... if not, it's too late anyway). */
  1306. if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
  1307. /* calculate mac80211 ampdu sw queue to wake */
  1308. int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
  1309. priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
  1310. if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
  1311. priv->mac80211_registered &&
  1312. (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
  1313. iwl_wake_queue(priv, txq->swq_id);
  1314. iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
  1315. }
  1316. }
  1317. EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
  1318. #ifdef CONFIG_IWLWIFI_DEBUG
  1319. #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
  1320. const char *iwl_get_tx_fail_reason(u32 status)
  1321. {
  1322. switch (status & TX_STATUS_MSK) {
  1323. case TX_STATUS_SUCCESS:
  1324. return "SUCCESS";
  1325. TX_STATUS_ENTRY(SHORT_LIMIT);
  1326. TX_STATUS_ENTRY(LONG_LIMIT);
  1327. TX_STATUS_ENTRY(FIFO_UNDERRUN);
  1328. TX_STATUS_ENTRY(MGMNT_ABORT);
  1329. TX_STATUS_ENTRY(NEXT_FRAG);
  1330. TX_STATUS_ENTRY(LIFE_EXPIRE);
  1331. TX_STATUS_ENTRY(DEST_PS);
  1332. TX_STATUS_ENTRY(ABORTED);
  1333. TX_STATUS_ENTRY(BT_RETRY);
  1334. TX_STATUS_ENTRY(STA_INVALID);
  1335. TX_STATUS_ENTRY(FRAG_DROPPED);
  1336. TX_STATUS_ENTRY(TID_DISABLE);
  1337. TX_STATUS_ENTRY(FRAME_FLUSHED);
  1338. TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
  1339. TX_STATUS_ENTRY(TX_LOCKED);
  1340. TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
  1341. }
  1342. return "UNKNOWN";
  1343. }
  1344. EXPORT_SYMBOL(iwl_get_tx_fail_reason);
  1345. #endif /* CONFIG_IWLWIFI_DEBUG */