iwl-tx.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
  4. *
  5. * Portions of this file are derived from the ipw3945 project, as well
  6. * as portions of the ieee80211 subsystem header files.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of version 2 of the GNU General Public License as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20. *
  21. * The full GNU General Public License is included in this distribution in the
  22. * file called LICENSE.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/etherdevice.h>
  30. #include <net/mac80211.h>
  31. #include "iwl-eeprom.h"
  32. #include "iwl-dev.h"
  33. #include "iwl-core.h"
  34. #include "iwl-sta.h"
  35. #include "iwl-io.h"
  36. #include "iwl-helpers.h"
  37. static const u16 default_tid_to_tx_fifo[] = {
  38. IWL_TX_FIFO_AC1,
  39. IWL_TX_FIFO_AC0,
  40. IWL_TX_FIFO_AC0,
  41. IWL_TX_FIFO_AC1,
  42. IWL_TX_FIFO_AC2,
  43. IWL_TX_FIFO_AC2,
  44. IWL_TX_FIFO_AC3,
  45. IWL_TX_FIFO_AC3,
  46. IWL_TX_FIFO_NONE,
  47. IWL_TX_FIFO_NONE,
  48. IWL_TX_FIFO_NONE,
  49. IWL_TX_FIFO_NONE,
  50. IWL_TX_FIFO_NONE,
  51. IWL_TX_FIFO_NONE,
  52. IWL_TX_FIFO_NONE,
  53. IWL_TX_FIFO_NONE,
  54. IWL_TX_FIFO_AC3
  55. };
  56. static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
  57. struct iwl_dma_ptr *ptr, size_t size)
  58. {
  59. ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma);
  60. if (!ptr->addr)
  61. return -ENOMEM;
  62. ptr->size = size;
  63. return 0;
  64. }
  65. static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
  66. struct iwl_dma_ptr *ptr)
  67. {
  68. if (unlikely(!ptr->addr))
  69. return;
  70. pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma);
  71. memset(ptr, 0, sizeof(*ptr));
  72. }
  73. static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
  74. {
  75. struct iwl_tfd_tb *tb = &tfd->tbs[idx];
  76. dma_addr_t addr = get_unaligned_le32(&tb->lo);
  77. if (sizeof(dma_addr_t) > sizeof(u32))
  78. addr |=
  79. ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
  80. return addr;
  81. }
  82. static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
  83. {
  84. struct iwl_tfd_tb *tb = &tfd->tbs[idx];
  85. return le16_to_cpu(tb->hi_n_len) >> 4;
  86. }
  87. static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
  88. dma_addr_t addr, u16 len)
  89. {
  90. struct iwl_tfd_tb *tb = &tfd->tbs[idx];
  91. u16 hi_n_len = len << 4;
  92. put_unaligned_le32(addr, &tb->lo);
  93. if (sizeof(dma_addr_t) > sizeof(u32))
  94. hi_n_len |= ((addr >> 16) >> 16) & 0xF;
  95. tb->hi_n_len = cpu_to_le16(hi_n_len);
  96. tfd->num_tbs = idx + 1;
  97. }
  98. static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
  99. {
  100. return tfd->num_tbs & 0x1f;
  101. }
  102. /**
  103. * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
  104. * @priv - driver private data
  105. * @txq - tx queue
  106. *
  107. * Does NOT advance any TFD circular buffer read/write indexes
  108. * Does NOT free the TFD itself (which is within circular buffer)
  109. */
  110. static void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
  111. {
  112. struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)&txq->tfds[0];
  113. struct iwl_tfd *tfd;
  114. struct pci_dev *dev = priv->pci_dev;
  115. int index = txq->q.read_ptr;
  116. int i;
  117. int num_tbs;
  118. tfd = &tfd_tmp[index];
  119. /* Sanity check on number of chunks */
  120. num_tbs = iwl_tfd_get_num_tbs(tfd);
  121. if (num_tbs >= IWL_NUM_OF_TBS) {
  122. IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
  123. /* @todo issue fatal error, it is quite serious situation */
  124. return;
  125. }
  126. /* Unmap tx_cmd */
  127. if (num_tbs)
  128. pci_unmap_single(dev,
  129. pci_unmap_addr(&txq->cmd[index]->meta, mapping),
  130. pci_unmap_len(&txq->cmd[index]->meta, len),
  131. PCI_DMA_TODEVICE);
  132. /* Unmap chunks, if any. */
  133. for (i = 1; i < num_tbs; i++) {
  134. pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
  135. iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
  136. if (txq->txb) {
  137. dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]);
  138. txq->txb[txq->q.read_ptr].skb[i - 1] = NULL;
  139. }
  140. }
  141. }
  142. static int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
  143. struct iwl_tfd *tfd,
  144. dma_addr_t addr, u16 len)
  145. {
  146. u32 num_tbs = iwl_tfd_get_num_tbs(tfd);
  147. /* Each TFD can point to a maximum 20 Tx buffers */
  148. if (num_tbs >= IWL_NUM_OF_TBS) {
  149. IWL_ERR(priv, "Error can not send more than %d chunks\n",
  150. IWL_NUM_OF_TBS);
  151. return -EINVAL;
  152. }
  153. BUG_ON(addr & ~DMA_BIT_MASK(36));
  154. if (unlikely(addr & ~IWL_TX_DMA_MASK))
  155. IWL_ERR(priv, "Unaligned address = %llx\n",
  156. (unsigned long long)addr);
  157. iwl_tfd_set_tb(tfd, num_tbs, addr, len);
  158. return 0;
  159. }
  160. /**
  161. * iwl_txq_update_write_ptr - Send new write index to hardware
  162. */
  163. int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
  164. {
  165. u32 reg = 0;
  166. int ret = 0;
  167. int txq_id = txq->q.id;
  168. if (txq->need_update == 0)
  169. return ret;
  170. /* if we're trying to save power */
  171. if (test_bit(STATUS_POWER_PMI, &priv->status)) {
  172. /* wake up nic if it's powered down ...
  173. * uCode will wake up, and interrupt us again, so next
  174. * time we'll skip this part. */
  175. reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
  176. if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
  177. IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
  178. iwl_set_bit(priv, CSR_GP_CNTRL,
  179. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  180. return ret;
  181. }
  182. /* restore this queue's parameters in nic hardware. */
  183. ret = iwl_grab_nic_access(priv);
  184. if (ret)
  185. return ret;
  186. iwl_write_direct32(priv, HBUS_TARG_WRPTR,
  187. txq->q.write_ptr | (txq_id << 8));
  188. iwl_release_nic_access(priv);
  189. /* else not in power-save mode, uCode will never sleep when we're
  190. * trying to tx (during RFKILL, we're not trying to tx). */
  191. } else
  192. iwl_write32(priv, HBUS_TARG_WRPTR,
  193. txq->q.write_ptr | (txq_id << 8));
  194. txq->need_update = 0;
  195. return ret;
  196. }
  197. EXPORT_SYMBOL(iwl_txq_update_write_ptr);
  198. /**
  199. * iwl_tx_queue_free - Deallocate DMA queue.
  200. * @txq: Transmit queue to deallocate.
  201. *
  202. * Empty queue by removing and destroying all BD's.
  203. * Free all buffers.
  204. * 0-fill, but do not free "txq" descriptor structure.
  205. */
  206. static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
  207. {
  208. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  209. struct iwl_queue *q = &txq->q;
  210. struct pci_dev *dev = priv->pci_dev;
  211. int i, len;
  212. if (q->n_bd == 0)
  213. return;
  214. /* first, empty all BD's */
  215. for (; q->write_ptr != q->read_ptr;
  216. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
  217. iwl_hw_txq_free_tfd(priv, txq);
  218. len = sizeof(struct iwl_cmd) * q->n_window;
  219. /* De-alloc array of command/tx buffers */
  220. for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
  221. kfree(txq->cmd[i]);
  222. /* De-alloc circular buffer of TFDs */
  223. if (txq->q.n_bd)
  224. pci_free_consistent(dev, sizeof(struct iwl_tfd) *
  225. txq->q.n_bd, txq->tfds, txq->q.dma_addr);
  226. /* De-alloc array of per-TFD driver data */
  227. kfree(txq->txb);
  228. txq->txb = NULL;
  229. /* 0-fill queue descriptor structure */
  230. memset(txq, 0, sizeof(*txq));
  231. }
  232. /**
  233. * iwl_cmd_queue_free - Deallocate DMA queue.
  234. * @txq: Transmit queue to deallocate.
  235. *
  236. * Empty queue by removing and destroying all BD's.
  237. * Free all buffers.
  238. * 0-fill, but do not free "txq" descriptor structure.
  239. */
  240. static void iwl_cmd_queue_free(struct iwl_priv *priv)
  241. {
  242. struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
  243. struct iwl_queue *q = &txq->q;
  244. struct pci_dev *dev = priv->pci_dev;
  245. int i, len;
  246. if (q->n_bd == 0)
  247. return;
  248. len = sizeof(struct iwl_cmd) * q->n_window;
  249. len += IWL_MAX_SCAN_SIZE;
  250. /* De-alloc array of command/tx buffers */
  251. for (i = 0; i <= TFD_CMD_SLOTS; i++)
  252. kfree(txq->cmd[i]);
  253. /* De-alloc circular buffer of TFDs */
  254. if (txq->q.n_bd)
  255. pci_free_consistent(dev, sizeof(struct iwl_tfd) *
  256. txq->q.n_bd, txq->tfds, txq->q.dma_addr);
  257. /* 0-fill queue descriptor structure */
  258. memset(txq, 0, sizeof(*txq));
  259. }
  260. /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
  261. * DMA services
  262. *
  263. * Theory of operation
  264. *
  265. * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
  266. * of buffer descriptors, each of which points to one or more data buffers for
  267. * the device to read from or fill. Driver and device exchange status of each
  268. * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
  269. * entries in each circular buffer, to protect against confusing empty and full
  270. * queue states.
  271. *
  272. * The device reads or writes the data in the queues via the device's several
  273. * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
  274. *
  275. * For Tx queue, there are low mark and high mark limits. If, after queuing
  276. * the packet for Tx, free space become < low mark, Tx queue stopped. When
  277. * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
  278. * Tx queue resumed.
  279. *
  280. * See more detailed info in iwl-4965-hw.h.
  281. ***************************************************/
  282. int iwl_queue_space(const struct iwl_queue *q)
  283. {
  284. int s = q->read_ptr - q->write_ptr;
  285. if (q->read_ptr > q->write_ptr)
  286. s -= q->n_bd;
  287. if (s <= 0)
  288. s += q->n_window;
  289. /* keep some reserve to not confuse empty and full situations */
  290. s -= 2;
  291. if (s < 0)
  292. s = 0;
  293. return s;
  294. }
  295. EXPORT_SYMBOL(iwl_queue_space);
  296. /**
  297. * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
  298. */
  299. static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
  300. int count, int slots_num, u32 id)
  301. {
  302. q->n_bd = count;
  303. q->n_window = slots_num;
  304. q->id = id;
  305. /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
  306. * and iwl_queue_dec_wrap are broken. */
  307. BUG_ON(!is_power_of_2(count));
  308. /* slots_num must be power-of-two size, otherwise
  309. * get_cmd_index is broken. */
  310. BUG_ON(!is_power_of_2(slots_num));
  311. q->low_mark = q->n_window / 4;
  312. if (q->low_mark < 4)
  313. q->low_mark = 4;
  314. q->high_mark = q->n_window / 8;
  315. if (q->high_mark < 2)
  316. q->high_mark = 2;
  317. q->write_ptr = q->read_ptr = 0;
  318. return 0;
  319. }
  320. /**
  321. * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
  322. */
  323. static int iwl_tx_queue_alloc(struct iwl_priv *priv,
  324. struct iwl_tx_queue *txq, u32 id)
  325. {
  326. struct pci_dev *dev = priv->pci_dev;
  327. /* Driver private data, only for Tx (not command) queues,
  328. * not shared with device. */
  329. if (id != IWL_CMD_QUEUE_NUM) {
  330. txq->txb = kmalloc(sizeof(txq->txb[0]) *
  331. TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
  332. if (!txq->txb) {
  333. IWL_ERR(priv, "kmalloc for auxiliary BD "
  334. "structures failed\n");
  335. goto error;
  336. }
  337. } else
  338. txq->txb = NULL;
  339. /* Circular buffer of transmit frame descriptors (TFDs),
  340. * shared with device */
  341. txq->tfds = pci_alloc_consistent(dev,
  342. sizeof(txq->tfds[0]) * TFD_QUEUE_SIZE_MAX,
  343. &txq->q.dma_addr);
  344. if (!txq->tfds) {
  345. IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n",
  346. sizeof(txq->tfds[0]) * TFD_QUEUE_SIZE_MAX);
  347. goto error;
  348. }
  349. txq->q.id = id;
  350. return 0;
  351. error:
  352. kfree(txq->txb);
  353. txq->txb = NULL;
  354. return -ENOMEM;
  355. }
  356. /*
  357. * Tell nic where to find circular buffer of Tx Frame Descriptors for
  358. * given Tx queue, and enable the DMA channel used for that queue.
  359. *
  360. * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
  361. * channels supported in hardware.
  362. */
  363. static int iwl_hw_tx_queue_init(struct iwl_priv *priv,
  364. struct iwl_tx_queue *txq)
  365. {
  366. int ret;
  367. unsigned long flags;
  368. int txq_id = txq->q.id;
  369. spin_lock_irqsave(&priv->lock, flags);
  370. ret = iwl_grab_nic_access(priv);
  371. if (ret) {
  372. spin_unlock_irqrestore(&priv->lock, flags);
  373. return ret;
  374. }
  375. /* Circular buffer (TFD queue in DRAM) physical base address */
  376. iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
  377. txq->q.dma_addr >> 8);
  378. iwl_release_nic_access(priv);
  379. spin_unlock_irqrestore(&priv->lock, flags);
  380. return 0;
  381. }
  382. /**
  383. * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
  384. */
  385. static int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
  386. int slots_num, u32 txq_id)
  387. {
  388. int i, len;
  389. int ret;
  390. /*
  391. * Alloc buffer array for commands (Tx or other types of commands).
  392. * For the command queue (#4), allocate command space + one big
  393. * command for scan, since scan command is very huge; the system will
  394. * not have two scans at the same time, so only one is needed.
  395. * For normal Tx queues (all other queues), no super-size command
  396. * space is needed.
  397. */
  398. len = sizeof(struct iwl_cmd);
  399. for (i = 0; i <= slots_num; i++) {
  400. if (i == slots_num) {
  401. if (txq_id == IWL_CMD_QUEUE_NUM)
  402. len += IWL_MAX_SCAN_SIZE;
  403. else
  404. continue;
  405. }
  406. txq->cmd[i] = kmalloc(len, GFP_KERNEL);
  407. if (!txq->cmd[i])
  408. goto err;
  409. }
  410. /* Alloc driver data array and TFD circular buffer */
  411. ret = iwl_tx_queue_alloc(priv, txq, txq_id);
  412. if (ret)
  413. goto err;
  414. txq->need_update = 0;
  415. /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
  416. * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
  417. BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
  418. /* Initialize queue's high/low-water marks, and head/tail indexes */
  419. iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
  420. /* Tell device where to find queue */
  421. iwl_hw_tx_queue_init(priv, txq);
  422. return 0;
  423. err:
  424. for (i = 0; i < slots_num; i++) {
  425. kfree(txq->cmd[i]);
  426. txq->cmd[i] = NULL;
  427. }
  428. if (txq_id == IWL_CMD_QUEUE_NUM) {
  429. kfree(txq->cmd[slots_num]);
  430. txq->cmd[slots_num] = NULL;
  431. }
  432. return -ENOMEM;
  433. }
  434. /**
  435. * iwl_hw_txq_ctx_free - Free TXQ Context
  436. *
  437. * Destroy all TX DMA queues and structures
  438. */
  439. void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
  440. {
  441. int txq_id;
  442. /* Tx queues */
  443. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
  444. if (txq_id == IWL_CMD_QUEUE_NUM)
  445. iwl_cmd_queue_free(priv);
  446. else
  447. iwl_tx_queue_free(priv, txq_id);
  448. iwl_free_dma_ptr(priv, &priv->kw);
  449. iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
  450. }
  451. EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
  452. /**
  453. * iwl_txq_ctx_reset - Reset TX queue context
  454. * Destroys all DMA structures and initialize them again
  455. *
  456. * @param priv
  457. * @return error code
  458. */
  459. int iwl_txq_ctx_reset(struct iwl_priv *priv)
  460. {
  461. int ret = 0;
  462. int txq_id, slots_num;
  463. unsigned long flags;
  464. /* Free all tx/cmd queues and keep-warm buffer */
  465. iwl_hw_txq_ctx_free(priv);
  466. ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
  467. priv->hw_params.scd_bc_tbls_size);
  468. if (ret) {
  469. IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
  470. goto error_bc_tbls;
  471. }
  472. /* Alloc keep-warm buffer */
  473. ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
  474. if (ret) {
  475. IWL_ERR(priv, "Keep Warm allocation failed\n");
  476. goto error_kw;
  477. }
  478. spin_lock_irqsave(&priv->lock, flags);
  479. ret = iwl_grab_nic_access(priv);
  480. if (unlikely(ret)) {
  481. spin_unlock_irqrestore(&priv->lock, flags);
  482. goto error_reset;
  483. }
  484. /* Turn off all Tx DMA fifos */
  485. priv->cfg->ops->lib->txq_set_sched(priv, 0);
  486. /* Tell NIC where to find the "keep warm" buffer */
  487. iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
  488. iwl_release_nic_access(priv);
  489. spin_unlock_irqrestore(&priv->lock, flags);
  490. /* Alloc and init all Tx queues, including the command queue (#4) */
  491. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
  492. slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
  493. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  494. ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
  495. txq_id);
  496. if (ret) {
  497. IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
  498. goto error;
  499. }
  500. }
  501. return ret;
  502. error:
  503. iwl_hw_txq_ctx_free(priv);
  504. error_reset:
  505. iwl_free_dma_ptr(priv, &priv->kw);
  506. error_kw:
  507. iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
  508. error_bc_tbls:
  509. return ret;
  510. }
  511. /**
  512. * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
  513. */
  514. void iwl_txq_ctx_stop(struct iwl_priv *priv)
  515. {
  516. int ch;
  517. unsigned long flags;
  518. /* Turn off all Tx DMA fifos */
  519. spin_lock_irqsave(&priv->lock, flags);
  520. if (iwl_grab_nic_access(priv)) {
  521. spin_unlock_irqrestore(&priv->lock, flags);
  522. return;
  523. }
  524. priv->cfg->ops->lib->txq_set_sched(priv, 0);
  525. /* Stop each Tx DMA channel, and wait for it to be idle */
  526. for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
  527. iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
  528. iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
  529. FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
  530. 1000);
  531. }
  532. iwl_release_nic_access(priv);
  533. spin_unlock_irqrestore(&priv->lock, flags);
  534. /* Deallocate memory for all Tx queues */
  535. iwl_hw_txq_ctx_free(priv);
  536. }
  537. EXPORT_SYMBOL(iwl_txq_ctx_stop);
  538. /*
  539. * handle build REPLY_TX command notification.
  540. */
  541. static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
  542. struct iwl_tx_cmd *tx_cmd,
  543. struct ieee80211_tx_info *info,
  544. struct ieee80211_hdr *hdr,
  545. u8 std_id)
  546. {
  547. __le16 fc = hdr->frame_control;
  548. __le32 tx_flags = tx_cmd->tx_flags;
  549. tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
  550. if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
  551. tx_flags |= TX_CMD_FLG_ACK_MSK;
  552. if (ieee80211_is_mgmt(fc))
  553. tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
  554. if (ieee80211_is_probe_resp(fc) &&
  555. !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
  556. tx_flags |= TX_CMD_FLG_TSF_MSK;
  557. } else {
  558. tx_flags &= (~TX_CMD_FLG_ACK_MSK);
  559. tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
  560. }
  561. if (ieee80211_is_back_req(fc))
  562. tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
  563. tx_cmd->sta_id = std_id;
  564. if (ieee80211_has_morefrags(fc))
  565. tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
  566. if (ieee80211_is_data_qos(fc)) {
  567. u8 *qc = ieee80211_get_qos_ctl(hdr);
  568. tx_cmd->tid_tspec = qc[0] & 0xf;
  569. tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
  570. } else {
  571. tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
  572. }
  573. priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
  574. if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
  575. tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
  576. tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
  577. if (ieee80211_is_mgmt(fc)) {
  578. if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
  579. tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
  580. else
  581. tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
  582. } else {
  583. tx_cmd->timeout.pm_frame_timeout = 0;
  584. }
  585. tx_cmd->driver_txop = 0;
  586. tx_cmd->tx_flags = tx_flags;
  587. tx_cmd->next_frame_len = 0;
  588. }
  589. #define RTS_HCCA_RETRY_LIMIT 3
  590. #define RTS_DFAULT_RETRY_LIMIT 60
  591. static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
  592. struct iwl_tx_cmd *tx_cmd,
  593. struct ieee80211_tx_info *info,
  594. __le16 fc, int sta_id,
  595. int is_hcca)
  596. {
  597. u32 rate_flags = 0;
  598. int rate_idx;
  599. u8 rts_retry_limit = 0;
  600. u8 data_retry_limit = 0;
  601. u8 rate_plcp;
  602. rate_idx = min(ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xffff,
  603. IWL_RATE_COUNT - 1);
  604. rate_plcp = iwl_rates[rate_idx].plcp;
  605. rts_retry_limit = (is_hcca) ?
  606. RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
  607. if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
  608. rate_flags |= RATE_MCS_CCK_MSK;
  609. if (ieee80211_is_probe_resp(fc)) {
  610. data_retry_limit = 3;
  611. if (data_retry_limit < rts_retry_limit)
  612. rts_retry_limit = data_retry_limit;
  613. } else
  614. data_retry_limit = IWL_DEFAULT_TX_RETRY;
  615. if (priv->data_retry_limit != -1)
  616. data_retry_limit = priv->data_retry_limit;
  617. if (ieee80211_is_data(fc)) {
  618. tx_cmd->initial_rate_index = 0;
  619. tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
  620. } else {
  621. switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
  622. case cpu_to_le16(IEEE80211_STYPE_AUTH):
  623. case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
  624. case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
  625. case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
  626. if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
  627. tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
  628. tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
  629. }
  630. break;
  631. default:
  632. break;
  633. }
  634. priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
  635. rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
  636. }
  637. tx_cmd->rts_retry_limit = rts_retry_limit;
  638. tx_cmd->data_retry_limit = data_retry_limit;
  639. tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
  640. }
  641. static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
  642. struct ieee80211_tx_info *info,
  643. struct iwl_tx_cmd *tx_cmd,
  644. struct sk_buff *skb_frag,
  645. int sta_id)
  646. {
  647. struct ieee80211_key_conf *keyconf = info->control.hw_key;
  648. switch (keyconf->alg) {
  649. case ALG_CCMP:
  650. tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
  651. memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
  652. if (info->flags & IEEE80211_TX_CTL_AMPDU)
  653. tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
  654. IWL_DEBUG_TX("tx_cmd with AES hwcrypto\n");
  655. break;
  656. case ALG_TKIP:
  657. tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
  658. ieee80211_get_tkip_key(keyconf, skb_frag,
  659. IEEE80211_TKIP_P2_KEY, tx_cmd->key);
  660. IWL_DEBUG_TX("tx_cmd with tkip hwcrypto\n");
  661. break;
  662. case ALG_WEP:
  663. tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
  664. (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
  665. if (keyconf->keylen == WEP_KEY_LEN_128)
  666. tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
  667. memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
  668. IWL_DEBUG_TX("Configuring packet for WEP encryption "
  669. "with key %d\n", keyconf->keyidx);
  670. break;
  671. default:
  672. IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
  673. break;
  674. }
  675. }
  676. static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len)
  677. {
  678. /* 0 - mgmt, 1 - cnt, 2 - data */
  679. int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
  680. priv->tx_stats[idx].cnt++;
  681. priv->tx_stats[idx].bytes += len;
  682. }
  683. /*
  684. * start REPLY_TX command process
  685. */
  686. int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
  687. {
  688. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  689. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  690. struct iwl_tfd *tfd;
  691. struct iwl_tx_queue *txq;
  692. struct iwl_queue *q;
  693. struct iwl_cmd *out_cmd;
  694. struct iwl_tx_cmd *tx_cmd;
  695. int swq_id, txq_id;
  696. dma_addr_t phys_addr;
  697. dma_addr_t txcmd_phys;
  698. dma_addr_t scratch_phys;
  699. u16 len, len_org;
  700. u16 seq_number = 0;
  701. __le16 fc;
  702. u8 hdr_len;
  703. u8 sta_id;
  704. u8 wait_write_ptr = 0;
  705. u8 tid = 0;
  706. u8 *qc = NULL;
  707. unsigned long flags;
  708. int ret;
  709. spin_lock_irqsave(&priv->lock, flags);
  710. if (iwl_is_rfkill(priv)) {
  711. IWL_DEBUG_DROP("Dropping - RF KILL\n");
  712. goto drop_unlock;
  713. }
  714. if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) ==
  715. IWL_INVALID_RATE) {
  716. IWL_ERR(priv, "ERROR: No TX rate available.\n");
  717. goto drop_unlock;
  718. }
  719. fc = hdr->frame_control;
  720. #ifdef CONFIG_IWLWIFI_DEBUG
  721. if (ieee80211_is_auth(fc))
  722. IWL_DEBUG_TX("Sending AUTH frame\n");
  723. else if (ieee80211_is_assoc_req(fc))
  724. IWL_DEBUG_TX("Sending ASSOC frame\n");
  725. else if (ieee80211_is_reassoc_req(fc))
  726. IWL_DEBUG_TX("Sending REASSOC frame\n");
  727. #endif
  728. /* drop all data frame if we are not associated */
  729. if (ieee80211_is_data(fc) &&
  730. (priv->iw_mode != NL80211_IFTYPE_MONITOR ||
  731. !(info->flags & IEEE80211_TX_CTL_INJECTED)) && /* packet injection */
  732. (!iwl_is_associated(priv) ||
  733. ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
  734. !priv->assoc_station_added)) {
  735. IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
  736. goto drop_unlock;
  737. }
  738. spin_unlock_irqrestore(&priv->lock, flags);
  739. hdr_len = ieee80211_hdrlen(fc);
  740. /* Find (or create) index into station table for destination station */
  741. sta_id = iwl_get_sta_id(priv, hdr);
  742. if (sta_id == IWL_INVALID_STATION) {
  743. IWL_DEBUG_DROP("Dropping - INVALID STATION: %pM\n",
  744. hdr->addr1);
  745. goto drop;
  746. }
  747. IWL_DEBUG_TX("station Id %d\n", sta_id);
  748. swq_id = skb_get_queue_mapping(skb);
  749. txq_id = swq_id;
  750. if (ieee80211_is_data_qos(fc)) {
  751. qc = ieee80211_get_qos_ctl(hdr);
  752. tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
  753. seq_number = priv->stations[sta_id].tid[tid].seq_number;
  754. seq_number &= IEEE80211_SCTL_SEQ;
  755. hdr->seq_ctrl = hdr->seq_ctrl &
  756. __constant_cpu_to_le16(IEEE80211_SCTL_FRAG);
  757. hdr->seq_ctrl |= cpu_to_le16(seq_number);
  758. seq_number += 0x10;
  759. /* aggregation is on for this <sta,tid> */
  760. if (info->flags & IEEE80211_TX_CTL_AMPDU)
  761. txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
  762. priv->stations[sta_id].tid[tid].tfds_in_queue++;
  763. }
  764. txq = &priv->txq[txq_id];
  765. q = &txq->q;
  766. txq->swq_id = swq_id;
  767. spin_lock_irqsave(&priv->lock, flags);
  768. /* Set up first empty TFD within this queue's circular TFD buffer */
  769. tfd = &txq->tfds[q->write_ptr];
  770. memset(tfd, 0, sizeof(*tfd));
  771. /* Set up driver data for this TFD */
  772. memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
  773. txq->txb[q->write_ptr].skb[0] = skb;
  774. /* Set up first empty entry in queue's array of Tx/cmd buffers */
  775. out_cmd = txq->cmd[q->write_ptr];
  776. tx_cmd = &out_cmd->cmd.tx;
  777. memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
  778. memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
  779. /*
  780. * Set up the Tx-command (not MAC!) header.
  781. * Store the chosen Tx queue and TFD index within the sequence field;
  782. * after Tx, uCode's Tx response will return this value so driver can
  783. * locate the frame within the tx queue and do post-tx processing.
  784. */
  785. out_cmd->hdr.cmd = REPLY_TX;
  786. out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
  787. INDEX_TO_SEQ(q->write_ptr)));
  788. /* Copy MAC header from skb into command buffer */
  789. memcpy(tx_cmd->hdr, hdr, hdr_len);
  790. /*
  791. * Use the first empty entry in this queue's command buffer array
  792. * to contain the Tx command and MAC header concatenated together
  793. * (payload data will be in another buffer).
  794. * Size of this varies, due to varying MAC header length.
  795. * If end is not dword aligned, we'll have 2 extra bytes at the end
  796. * of the MAC header (device reads on dword boundaries).
  797. * We'll tell device about this padding later.
  798. */
  799. len = sizeof(struct iwl_tx_cmd) +
  800. sizeof(struct iwl_cmd_header) + hdr_len;
  801. len_org = len;
  802. len = (len + 3) & ~3;
  803. if (len_org != len)
  804. len_org = 1;
  805. else
  806. len_org = 0;
  807. /* Physical address of this Tx command's header (not MAC header!),
  808. * within command buffer array. */
  809. txcmd_phys = pci_map_single(priv->pci_dev,
  810. out_cmd, sizeof(struct iwl_cmd),
  811. PCI_DMA_TODEVICE);
  812. pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
  813. pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd));
  814. /* Add buffer containing Tx command and MAC(!) header to TFD's
  815. * first entry */
  816. txcmd_phys += offsetof(struct iwl_cmd, hdr);
  817. iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
  818. if (info->control.hw_key)
  819. iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
  820. /* Set up TFD's 2nd entry to point directly to remainder of skb,
  821. * if any (802.11 null frames have no payload). */
  822. len = skb->len - hdr_len;
  823. if (len) {
  824. phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
  825. len, PCI_DMA_TODEVICE);
  826. iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
  827. }
  828. /* Tell NIC about any 2-byte padding after MAC header */
  829. if (len_org)
  830. tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
  831. /* Total # bytes to be transmitted */
  832. len = (u16)skb->len;
  833. tx_cmd->len = cpu_to_le16(len);
  834. /* TODO need this for burst mode later on */
  835. iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
  836. /* set is_hcca to 0; it probably will never be implemented */
  837. iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
  838. iwl_update_tx_stats(priv, le16_to_cpu(fc), len);
  839. scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
  840. offsetof(struct iwl_tx_cmd, scratch);
  841. tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
  842. tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
  843. if (!ieee80211_has_morefrags(hdr->frame_control)) {
  844. txq->need_update = 1;
  845. if (qc)
  846. priv->stations[sta_id].tid[tid].seq_number = seq_number;
  847. } else {
  848. wait_write_ptr = 1;
  849. txq->need_update = 0;
  850. }
  851. iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
  852. iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
  853. /* Set up entry for this TFD in Tx byte-count array */
  854. priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len);
  855. /* Tell device the write index *just past* this latest filled TFD */
  856. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  857. ret = iwl_txq_update_write_ptr(priv, txq);
  858. spin_unlock_irqrestore(&priv->lock, flags);
  859. if (ret)
  860. return ret;
  861. if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
  862. if (wait_write_ptr) {
  863. spin_lock_irqsave(&priv->lock, flags);
  864. txq->need_update = 1;
  865. iwl_txq_update_write_ptr(priv, txq);
  866. spin_unlock_irqrestore(&priv->lock, flags);
  867. } else {
  868. ieee80211_stop_queue(priv->hw, txq->swq_id);
  869. }
  870. }
  871. return 0;
  872. drop_unlock:
  873. spin_unlock_irqrestore(&priv->lock, flags);
  874. drop:
  875. return -1;
  876. }
  877. EXPORT_SYMBOL(iwl_tx_skb);
  878. /*************** HOST COMMAND QUEUE FUNCTIONS *****/
  879. /**
  880. * iwl_enqueue_hcmd - enqueue a uCode command
  881. * @priv: device private data point
  882. * @cmd: a point to the ucode command structure
  883. *
  884. * The function returns < 0 values to indicate the operation is
  885. * failed. On success, it turns the index (> 0) of command in the
  886. * command queue.
  887. */
  888. int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
  889. {
  890. struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
  891. struct iwl_queue *q = &txq->q;
  892. struct iwl_tfd *tfd;
  893. struct iwl_cmd *out_cmd;
  894. dma_addr_t phys_addr;
  895. unsigned long flags;
  896. int len, ret;
  897. u32 idx;
  898. u16 fix_size;
  899. cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
  900. fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
  901. /* If any of the command structures end up being larger than
  902. * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
  903. * we will need to increase the size of the TFD entries */
  904. BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
  905. !(cmd->meta.flags & CMD_SIZE_HUGE));
  906. if (iwl_is_rfkill(priv)) {
  907. IWL_DEBUG_INFO("Not sending command - RF KILL");
  908. return -EIO;
  909. }
  910. if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
  911. IWL_ERR(priv, "No space for Tx\n");
  912. return -ENOSPC;
  913. }
  914. spin_lock_irqsave(&priv->hcmd_lock, flags);
  915. tfd = &txq->tfds[q->write_ptr];
  916. memset(tfd, 0, sizeof(*tfd));
  917. idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
  918. out_cmd = txq->cmd[idx];
  919. out_cmd->hdr.cmd = cmd->id;
  920. memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
  921. memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
  922. /* At this point, the out_cmd now has all of the incoming cmd
  923. * information */
  924. out_cmd->hdr.flags = 0;
  925. out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
  926. INDEX_TO_SEQ(q->write_ptr));
  927. if (out_cmd->meta.flags & CMD_SIZE_HUGE)
  928. out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
  929. len = (idx == TFD_CMD_SLOTS) ?
  930. IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
  931. phys_addr = pci_map_single(priv->pci_dev, out_cmd,
  932. len, PCI_DMA_TODEVICE);
  933. pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
  934. pci_unmap_len_set(&out_cmd->meta, len, len);
  935. phys_addr += offsetof(struct iwl_cmd, hdr);
  936. iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
  937. #ifdef CONFIG_IWLWIFI_DEBUG
  938. switch (out_cmd->hdr.cmd) {
  939. case REPLY_TX_LINK_QUALITY_CMD:
  940. case SENSITIVITY_CMD:
  941. IWL_DEBUG_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
  942. "%d bytes at %d[%d]:%d\n",
  943. get_cmd_string(out_cmd->hdr.cmd),
  944. out_cmd->hdr.cmd,
  945. le16_to_cpu(out_cmd->hdr.sequence), fix_size,
  946. q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
  947. break;
  948. default:
  949. IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
  950. "%d bytes at %d[%d]:%d\n",
  951. get_cmd_string(out_cmd->hdr.cmd),
  952. out_cmd->hdr.cmd,
  953. le16_to_cpu(out_cmd->hdr.sequence), fix_size,
  954. q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
  955. }
  956. #endif
  957. txq->need_update = 1;
  958. /* Set up entry in queue's byte count circular buffer */
  959. priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
  960. /* Increment and update queue's write index */
  961. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  962. ret = iwl_txq_update_write_ptr(priv, txq);
  963. spin_unlock_irqrestore(&priv->hcmd_lock, flags);
  964. return ret ? ret : idx;
  965. }
  966. int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
  967. {
  968. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  969. struct iwl_queue *q = &txq->q;
  970. struct iwl_tx_info *tx_info;
  971. int nfreed = 0;
  972. if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
  973. IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
  974. "is out of range [0-%d] %d %d.\n", txq_id,
  975. index, q->n_bd, q->write_ptr, q->read_ptr);
  976. return 0;
  977. }
  978. for (index = iwl_queue_inc_wrap(index, q->n_bd);
  979. q->read_ptr != index;
  980. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
  981. tx_info = &txq->txb[txq->q.read_ptr];
  982. ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
  983. tx_info->skb[0] = NULL;
  984. if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
  985. priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
  986. iwl_hw_txq_free_tfd(priv, txq);
  987. nfreed++;
  988. }
  989. return nfreed;
  990. }
  991. EXPORT_SYMBOL(iwl_tx_queue_reclaim);
  992. /**
  993. * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
  994. *
  995. * When FW advances 'R' index, all entries between old and new 'R' index
  996. * need to be reclaimed. As result, some free space forms. If there is
  997. * enough free space (> low mark), wake the stack that feeds us.
  998. */
  999. static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
  1000. int idx, int cmd_idx)
  1001. {
  1002. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  1003. struct iwl_queue *q = &txq->q;
  1004. int nfreed = 0;
  1005. if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
  1006. IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
  1007. "is out of range [0-%d] %d %d.\n", txq_id,
  1008. idx, q->n_bd, q->write_ptr, q->read_ptr);
  1009. return;
  1010. }
  1011. pci_unmap_single(priv->pci_dev,
  1012. pci_unmap_addr(&txq->cmd[cmd_idx]->meta, mapping),
  1013. pci_unmap_len(&txq->cmd[cmd_idx]->meta, len),
  1014. PCI_DMA_TODEVICE);
  1015. for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
  1016. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
  1017. if (nfreed++ > 0) {
  1018. IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
  1019. q->write_ptr, q->read_ptr);
  1020. queue_work(priv->workqueue, &priv->restart);
  1021. }
  1022. }
  1023. }
  1024. /**
  1025. * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
  1026. * @rxb: Rx buffer to reclaim
  1027. *
  1028. * If an Rx buffer has an async callback associated with it the callback
  1029. * will be executed. The attached skb (if present) will only be freed
  1030. * if the callback returns 1
  1031. */
  1032. void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
  1033. {
  1034. struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
  1035. u16 sequence = le16_to_cpu(pkt->hdr.sequence);
  1036. int txq_id = SEQ_TO_QUEUE(sequence);
  1037. int index = SEQ_TO_INDEX(sequence);
  1038. int cmd_index;
  1039. bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
  1040. struct iwl_cmd *cmd;
  1041. /* If a Tx command is being handled and it isn't in the actual
  1042. * command queue then there a command routing bug has been introduced
  1043. * in the queue management code. */
  1044. if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
  1045. "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
  1046. txq_id, sequence,
  1047. priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
  1048. priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
  1049. iwl_print_hex_dump(priv, IWL_DL_INFO , rxb, 32);
  1050. return;
  1051. }
  1052. cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
  1053. cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
  1054. /* Input error checking is done when commands are added to queue. */
  1055. if (cmd->meta.flags & CMD_WANT_SKB) {
  1056. cmd->meta.source->u.skb = rxb->skb;
  1057. rxb->skb = NULL;
  1058. } else if (cmd->meta.u.callback &&
  1059. !cmd->meta.u.callback(priv, cmd, rxb->skb))
  1060. rxb->skb = NULL;
  1061. iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
  1062. if (!(cmd->meta.flags & CMD_ASYNC)) {
  1063. clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
  1064. wake_up_interruptible(&priv->wait_command_queue);
  1065. }
  1066. }
  1067. EXPORT_SYMBOL(iwl_tx_cmd_complete);
  1068. /*
  1069. * Find first available (lowest unused) Tx Queue, mark it "active".
  1070. * Called only when finding queue for aggregation.
  1071. * Should never return anything < 7, because they should already
  1072. * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
  1073. */
  1074. static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
  1075. {
  1076. int txq_id;
  1077. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
  1078. if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
  1079. return txq_id;
  1080. return -1;
  1081. }
  1082. int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
  1083. {
  1084. int sta_id;
  1085. int tx_fifo;
  1086. int txq_id;
  1087. int ret;
  1088. unsigned long flags;
  1089. struct iwl_tid_data *tid_data;
  1090. if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
  1091. tx_fifo = default_tid_to_tx_fifo[tid];
  1092. else
  1093. return -EINVAL;
  1094. IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
  1095. __func__, ra, tid);
  1096. sta_id = iwl_find_station(priv, ra);
  1097. if (sta_id == IWL_INVALID_STATION)
  1098. return -ENXIO;
  1099. if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
  1100. IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
  1101. return -ENXIO;
  1102. }
  1103. txq_id = iwl_txq_ctx_activate_free(priv);
  1104. if (txq_id == -1)
  1105. return -ENXIO;
  1106. spin_lock_irqsave(&priv->sta_lock, flags);
  1107. tid_data = &priv->stations[sta_id].tid[tid];
  1108. *ssn = SEQ_TO_SN(tid_data->seq_number);
  1109. tid_data->agg.txq_id = txq_id;
  1110. spin_unlock_irqrestore(&priv->sta_lock, flags);
  1111. ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
  1112. sta_id, tid, *ssn);
  1113. if (ret)
  1114. return ret;
  1115. if (tid_data->tfds_in_queue == 0) {
  1116. IWL_ERR(priv, "HW queue is empty\n");
  1117. tid_data->agg.state = IWL_AGG_ON;
  1118. ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid);
  1119. } else {
  1120. IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
  1121. tid_data->tfds_in_queue);
  1122. tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
  1123. }
  1124. return ret;
  1125. }
  1126. EXPORT_SYMBOL(iwl_tx_agg_start);
  1127. int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
  1128. {
  1129. int tx_fifo_id, txq_id, sta_id, ssn = -1;
  1130. struct iwl_tid_data *tid_data;
  1131. int ret, write_ptr, read_ptr;
  1132. unsigned long flags;
  1133. if (!ra) {
  1134. IWL_ERR(priv, "ra = NULL\n");
  1135. return -EINVAL;
  1136. }
  1137. if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
  1138. tx_fifo_id = default_tid_to_tx_fifo[tid];
  1139. else
  1140. return -EINVAL;
  1141. sta_id = iwl_find_station(priv, ra);
  1142. if (sta_id == IWL_INVALID_STATION)
  1143. return -ENXIO;
  1144. if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
  1145. IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n");
  1146. tid_data = &priv->stations[sta_id].tid[tid];
  1147. ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
  1148. txq_id = tid_data->agg.txq_id;
  1149. write_ptr = priv->txq[txq_id].q.write_ptr;
  1150. read_ptr = priv->txq[txq_id].q.read_ptr;
  1151. /* The queue is not empty */
  1152. if (write_ptr != read_ptr) {
  1153. IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n");
  1154. priv->stations[sta_id].tid[tid].agg.state =
  1155. IWL_EMPTYING_HW_QUEUE_DELBA;
  1156. return 0;
  1157. }
  1158. IWL_DEBUG_HT("HW queue is empty\n");
  1159. priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
  1160. spin_lock_irqsave(&priv->lock, flags);
  1161. ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
  1162. tx_fifo_id);
  1163. spin_unlock_irqrestore(&priv->lock, flags);
  1164. if (ret)
  1165. return ret;
  1166. ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
  1167. return 0;
  1168. }
  1169. EXPORT_SYMBOL(iwl_tx_agg_stop);
  1170. int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
  1171. {
  1172. struct iwl_queue *q = &priv->txq[txq_id].q;
  1173. u8 *addr = priv->stations[sta_id].sta.sta.addr;
  1174. struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
  1175. switch (priv->stations[sta_id].tid[tid].agg.state) {
  1176. case IWL_EMPTYING_HW_QUEUE_DELBA:
  1177. /* We are reclaiming the last packet of the */
  1178. /* aggregated HW queue */
  1179. if ((txq_id == tid_data->agg.txq_id) &&
  1180. (q->read_ptr == q->write_ptr)) {
  1181. u16 ssn = SEQ_TO_SN(tid_data->seq_number);
  1182. int tx_fifo = default_tid_to_tx_fifo[tid];
  1183. IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
  1184. priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
  1185. ssn, tx_fifo);
  1186. tid_data->agg.state = IWL_AGG_OFF;
  1187. ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
  1188. }
  1189. break;
  1190. case IWL_EMPTYING_HW_QUEUE_ADDBA:
  1191. /* We are reclaiming the last packet of the queue */
  1192. if (tid_data->tfds_in_queue == 0) {
  1193. IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
  1194. tid_data->agg.state = IWL_AGG_ON;
  1195. ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
  1196. }
  1197. break;
  1198. }
  1199. return 0;
  1200. }
  1201. EXPORT_SYMBOL(iwl_txq_check_empty);
  1202. /**
  1203. * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
  1204. *
  1205. * Go through block-ack's bitmap of ACK'd frames, update driver's record of
  1206. * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
  1207. */
  1208. static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
  1209. struct iwl_ht_agg *agg,
  1210. struct iwl_compressed_ba_resp *ba_resp)
  1211. {
  1212. int i, sh, ack;
  1213. u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
  1214. u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
  1215. u64 bitmap;
  1216. int successes = 0;
  1217. struct ieee80211_tx_info *info;
  1218. if (unlikely(!agg->wait_for_ba)) {
  1219. IWL_ERR(priv, "Received BA when not expected\n");
  1220. return -EINVAL;
  1221. }
  1222. /* Mark that the expected block-ack response arrived */
  1223. agg->wait_for_ba = 0;
  1224. IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
  1225. /* Calculate shift to align block-ack bits with our Tx window bits */
  1226. sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
  1227. if (sh < 0) /* tbw something is wrong with indices */
  1228. sh += 0x100;
  1229. /* don't use 64-bit values for now */
  1230. bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
  1231. if (agg->frame_count > (64 - sh)) {
  1232. IWL_DEBUG_TX_REPLY("more frames than bitmap size");
  1233. return -1;
  1234. }
  1235. /* check for success or failure according to the
  1236. * transmitted bitmap and block-ack bitmap */
  1237. bitmap &= agg->bitmap;
  1238. /* For each frame attempted in aggregation,
  1239. * update driver's record of tx frame's status. */
  1240. for (i = 0; i < agg->frame_count ; i++) {
  1241. ack = bitmap & (1ULL << i);
  1242. successes += !!ack;
  1243. IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
  1244. ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
  1245. agg->start_idx + i);
  1246. }
  1247. info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
  1248. memset(&info->status, 0, sizeof(info->status));
  1249. info->flags = IEEE80211_TX_STAT_ACK;
  1250. info->flags |= IEEE80211_TX_STAT_AMPDU;
  1251. info->status.ampdu_ack_map = successes;
  1252. info->status.ampdu_ack_len = agg->frame_count;
  1253. iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
  1254. IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
  1255. return 0;
  1256. }
  1257. /**
  1258. * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
  1259. *
  1260. * Handles block-acknowledge notification from device, which reports success
  1261. * of frames sent via aggregation.
  1262. */
  1263. void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
  1264. struct iwl_rx_mem_buffer *rxb)
  1265. {
  1266. struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
  1267. struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
  1268. struct iwl_tx_queue *txq = NULL;
  1269. struct iwl_ht_agg *agg;
  1270. int index;
  1271. int sta_id;
  1272. int tid;
  1273. /* "flow" corresponds to Tx queue */
  1274. u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
  1275. /* "ssn" is start of block-ack Tx window, corresponds to index
  1276. * (in Tx queue's circular buffer) of first TFD/frame in window */
  1277. u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
  1278. if (scd_flow >= priv->hw_params.max_txq_num) {
  1279. IWL_ERR(priv,
  1280. "BUG_ON scd_flow is bigger than number of queues\n");
  1281. return;
  1282. }
  1283. txq = &priv->txq[scd_flow];
  1284. sta_id = ba_resp->sta_id;
  1285. tid = ba_resp->tid;
  1286. agg = &priv->stations[sta_id].tid[tid].agg;
  1287. /* Find index just before block-ack window */
  1288. index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
  1289. /* TODO: Need to get this copy more safely - now good for debug */
  1290. IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d] Received from %pM, "
  1291. "sta_id = %d\n",
  1292. agg->wait_for_ba,
  1293. (u8 *) &ba_resp->sta_addr_lo32,
  1294. ba_resp->sta_id);
  1295. IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
  1296. "%d, scd_ssn = %d\n",
  1297. ba_resp->tid,
  1298. ba_resp->seq_ctl,
  1299. (unsigned long long)le64_to_cpu(ba_resp->bitmap),
  1300. ba_resp->scd_flow,
  1301. ba_resp->scd_ssn);
  1302. IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n",
  1303. agg->start_idx,
  1304. (unsigned long long)agg->bitmap);
  1305. /* Update driver's record of ACK vs. not for each frame in window */
  1306. iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
  1307. /* Release all TFDs before the SSN, i.e. all TFDs in front of
  1308. * block-ack window (we assume that they've been successfully
  1309. * transmitted ... if not, it's too late anyway). */
  1310. if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
  1311. /* calculate mac80211 ampdu sw queue to wake */
  1312. int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
  1313. priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
  1314. if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
  1315. priv->mac80211_registered &&
  1316. (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
  1317. ieee80211_wake_queue(priv->hw, txq->swq_id);
  1318. iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
  1319. }
  1320. }
  1321. EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
  1322. #ifdef CONFIG_IWLWIFI_DEBUG
  1323. #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
  1324. const char *iwl_get_tx_fail_reason(u32 status)
  1325. {
  1326. switch (status & TX_STATUS_MSK) {
  1327. case TX_STATUS_SUCCESS:
  1328. return "SUCCESS";
  1329. TX_STATUS_ENTRY(SHORT_LIMIT);
  1330. TX_STATUS_ENTRY(LONG_LIMIT);
  1331. TX_STATUS_ENTRY(FIFO_UNDERRUN);
  1332. TX_STATUS_ENTRY(MGMNT_ABORT);
  1333. TX_STATUS_ENTRY(NEXT_FRAG);
  1334. TX_STATUS_ENTRY(LIFE_EXPIRE);
  1335. TX_STATUS_ENTRY(DEST_PS);
  1336. TX_STATUS_ENTRY(ABORTED);
  1337. TX_STATUS_ENTRY(BT_RETRY);
  1338. TX_STATUS_ENTRY(STA_INVALID);
  1339. TX_STATUS_ENTRY(FRAG_DROPPED);
  1340. TX_STATUS_ENTRY(TID_DISABLE);
  1341. TX_STATUS_ENTRY(FRAME_FLUSHED);
  1342. TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
  1343. TX_STATUS_ENTRY(TX_LOCKED);
  1344. TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
  1345. }
  1346. return "UNKNOWN";
  1347. }
  1348. EXPORT_SYMBOL(iwl_get_tx_fail_reason);
  1349. #endif /* CONFIG_IWLWIFI_DEBUG */