iwl-tx.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
  4. *
  5. * Portions of this file are derived from the ipw3945 project, as well
  6. * as portions of the ieee80211 subsystem header files.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of version 2 of the GNU General Public License as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20. *
  21. * The full GNU General Public License is included in this distribution in the
  22. * file called LICENSE.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/etherdevice.h>
  30. #include <linux/sched.h>
  31. #include <net/mac80211.h>
  32. #include "iwl-eeprom.h"
  33. #include "iwl-dev.h"
  34. #include "iwl-core.h"
  35. #include "iwl-sta.h"
  36. #include "iwl-io.h"
  37. #include "iwl-helpers.h"
  38. static const u16 default_tid_to_tx_fifo[] = {
  39. IWL_TX_FIFO_AC1,
  40. IWL_TX_FIFO_AC0,
  41. IWL_TX_FIFO_AC0,
  42. IWL_TX_FIFO_AC1,
  43. IWL_TX_FIFO_AC2,
  44. IWL_TX_FIFO_AC2,
  45. IWL_TX_FIFO_AC3,
  46. IWL_TX_FIFO_AC3,
  47. IWL_TX_FIFO_NONE,
  48. IWL_TX_FIFO_NONE,
  49. IWL_TX_FIFO_NONE,
  50. IWL_TX_FIFO_NONE,
  51. IWL_TX_FIFO_NONE,
  52. IWL_TX_FIFO_NONE,
  53. IWL_TX_FIFO_NONE,
  54. IWL_TX_FIFO_NONE,
  55. IWL_TX_FIFO_AC3
  56. };
  57. static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
  58. struct iwl_dma_ptr *ptr, size_t size)
  59. {
  60. ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
  61. GFP_KERNEL);
  62. if (!ptr->addr)
  63. return -ENOMEM;
  64. ptr->size = size;
  65. return 0;
  66. }
  67. static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
  68. struct iwl_dma_ptr *ptr)
  69. {
  70. if (unlikely(!ptr->addr))
  71. return;
  72. dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
  73. memset(ptr, 0, sizeof(*ptr));
  74. }
  75. /**
  76. * iwl_txq_update_write_ptr - Send new write index to hardware
  77. */
  78. void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
  79. {
  80. u32 reg = 0;
  81. int txq_id = txq->q.id;
  82. if (txq->need_update == 0)
  83. return;
  84. /* if we're trying to save power */
  85. if (test_bit(STATUS_POWER_PMI, &priv->status)) {
  86. /* wake up nic if it's powered down ...
  87. * uCode will wake up, and interrupt us again, so next
  88. * time we'll skip this part. */
  89. reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
  90. if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
  91. IWL_DEBUG_INFO(priv, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
  92. txq_id, reg);
  93. iwl_set_bit(priv, CSR_GP_CNTRL,
  94. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  95. return;
  96. }
  97. iwl_write_direct32(priv, HBUS_TARG_WRPTR,
  98. txq->q.write_ptr | (txq_id << 8));
  99. /* else not in power-save mode, uCode will never sleep when we're
  100. * trying to tx (during RFKILL, we're not trying to tx). */
  101. } else
  102. iwl_write32(priv, HBUS_TARG_WRPTR,
  103. txq->q.write_ptr | (txq_id << 8));
  104. txq->need_update = 0;
  105. }
  106. EXPORT_SYMBOL(iwl_txq_update_write_ptr);
  107. void iwl_free_tfds_in_queue(struct iwl_priv *priv,
  108. int sta_id, int tid, int freed)
  109. {
  110. if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
  111. priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
  112. else {
  113. IWL_ERR(priv, "free more than tfds_in_queue (%u:%d)\n",
  114. priv->stations[sta_id].tid[tid].tfds_in_queue,
  115. freed);
  116. priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
  117. }
  118. }
  119. EXPORT_SYMBOL(iwl_free_tfds_in_queue);
  120. /**
  121. * iwl_tx_queue_free - Deallocate DMA queue.
  122. * @txq: Transmit queue to deallocate.
  123. *
  124. * Empty queue by removing and destroying all BD's.
  125. * Free all buffers.
  126. * 0-fill, but do not free "txq" descriptor structure.
  127. */
  128. void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
  129. {
  130. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  131. struct iwl_queue *q = &txq->q;
  132. struct device *dev = &priv->pci_dev->dev;
  133. int i;
  134. if (q->n_bd == 0)
  135. return;
  136. /* first, empty all BD's */
  137. for (; q->write_ptr != q->read_ptr;
  138. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
  139. priv->cfg->ops->lib->txq_free_tfd(priv, txq);
  140. /* De-alloc array of command/tx buffers */
  141. for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
  142. kfree(txq->cmd[i]);
  143. /* De-alloc circular buffer of TFDs */
  144. if (txq->q.n_bd)
  145. dma_free_coherent(dev, priv->hw_params.tfd_size *
  146. txq->q.n_bd, txq->tfds, txq->q.dma_addr);
  147. /* De-alloc array of per-TFD driver data */
  148. kfree(txq->txb);
  149. txq->txb = NULL;
  150. /* deallocate arrays */
  151. kfree(txq->cmd);
  152. kfree(txq->meta);
  153. txq->cmd = NULL;
  154. txq->meta = NULL;
  155. /* 0-fill queue descriptor structure */
  156. memset(txq, 0, sizeof(*txq));
  157. }
  158. EXPORT_SYMBOL(iwl_tx_queue_free);
  159. /**
  160. * iwl_cmd_queue_free - Deallocate DMA queue.
  161. * @txq: Transmit queue to deallocate.
  162. *
  163. * Empty queue by removing and destroying all BD's.
  164. * Free all buffers.
  165. * 0-fill, but do not free "txq" descriptor structure.
  166. */
  167. void iwl_cmd_queue_free(struct iwl_priv *priv)
  168. {
  169. struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
  170. struct iwl_queue *q = &txq->q;
  171. struct device *dev = &priv->pci_dev->dev;
  172. int i;
  173. if (q->n_bd == 0)
  174. return;
  175. /* De-alloc array of command/tx buffers */
  176. for (i = 0; i <= TFD_CMD_SLOTS; i++)
  177. kfree(txq->cmd[i]);
  178. /* De-alloc circular buffer of TFDs */
  179. if (txq->q.n_bd)
  180. dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
  181. txq->tfds, txq->q.dma_addr);
  182. /* deallocate arrays */
  183. kfree(txq->cmd);
  184. kfree(txq->meta);
  185. txq->cmd = NULL;
  186. txq->meta = NULL;
  187. /* 0-fill queue descriptor structure */
  188. memset(txq, 0, sizeof(*txq));
  189. }
  190. EXPORT_SYMBOL(iwl_cmd_queue_free);
  191. /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
  192. * DMA services
  193. *
  194. * Theory of operation
  195. *
  196. * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
  197. * of buffer descriptors, each of which points to one or more data buffers for
  198. * the device to read from or fill. Driver and device exchange status of each
  199. * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
  200. * entries in each circular buffer, to protect against confusing empty and full
  201. * queue states.
  202. *
  203. * The device reads or writes the data in the queues via the device's several
  204. * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
  205. *
  206. * For Tx queue, there are low mark and high mark limits. If, after queuing
  207. * the packet for Tx, free space become < low mark, Tx queue stopped. When
  208. * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
  209. * Tx queue resumed.
  210. *
  211. * See more detailed info in iwl-4965-hw.h.
  212. ***************************************************/
  213. int iwl_queue_space(const struct iwl_queue *q)
  214. {
  215. int s = q->read_ptr - q->write_ptr;
  216. if (q->read_ptr > q->write_ptr)
  217. s -= q->n_bd;
  218. if (s <= 0)
  219. s += q->n_window;
  220. /* keep some reserve to not confuse empty and full situations */
  221. s -= 2;
  222. if (s < 0)
  223. s = 0;
  224. return s;
  225. }
  226. EXPORT_SYMBOL(iwl_queue_space);
  227. /**
  228. * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
  229. */
  230. static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
  231. int count, int slots_num, u32 id)
  232. {
  233. q->n_bd = count;
  234. q->n_window = slots_num;
  235. q->id = id;
  236. /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
  237. * and iwl_queue_dec_wrap are broken. */
  238. BUG_ON(!is_power_of_2(count));
  239. /* slots_num must be power-of-two size, otherwise
  240. * get_cmd_index is broken. */
  241. BUG_ON(!is_power_of_2(slots_num));
  242. q->low_mark = q->n_window / 4;
  243. if (q->low_mark < 4)
  244. q->low_mark = 4;
  245. q->high_mark = q->n_window / 8;
  246. if (q->high_mark < 2)
  247. q->high_mark = 2;
  248. q->write_ptr = q->read_ptr = 0;
  249. return 0;
  250. }
  251. /**
  252. * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
  253. */
  254. static int iwl_tx_queue_alloc(struct iwl_priv *priv,
  255. struct iwl_tx_queue *txq, u32 id)
  256. {
  257. struct device *dev = &priv->pci_dev->dev;
  258. size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
  259. /* Driver private data, only for Tx (not command) queues,
  260. * not shared with device. */
  261. if (id != IWL_CMD_QUEUE_NUM) {
  262. txq->txb = kmalloc(sizeof(txq->txb[0]) *
  263. TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
  264. if (!txq->txb) {
  265. IWL_ERR(priv, "kmalloc for auxiliary BD "
  266. "structures failed\n");
  267. goto error;
  268. }
  269. } else {
  270. txq->txb = NULL;
  271. }
  272. /* Circular buffer of transmit frame descriptors (TFDs),
  273. * shared with device */
  274. txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
  275. GFP_KERNEL);
  276. if (!txq->tfds) {
  277. IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
  278. goto error;
  279. }
  280. txq->q.id = id;
  281. return 0;
  282. error:
  283. kfree(txq->txb);
  284. txq->txb = NULL;
  285. return -ENOMEM;
  286. }
  287. /**
  288. * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
  289. */
  290. int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
  291. int slots_num, u32 txq_id)
  292. {
  293. int i, len;
  294. int ret;
  295. int actual_slots = slots_num;
  296. /*
  297. * Alloc buffer array for commands (Tx or other types of commands).
  298. * For the command queue (#4), allocate command space + one big
  299. * command for scan, since scan command is very huge; the system will
  300. * not have two scans at the same time, so only one is needed.
  301. * For normal Tx queues (all other queues), no super-size command
  302. * space is needed.
  303. */
  304. if (txq_id == IWL_CMD_QUEUE_NUM)
  305. actual_slots++;
  306. txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
  307. GFP_KERNEL);
  308. txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
  309. GFP_KERNEL);
  310. if (!txq->meta || !txq->cmd)
  311. goto out_free_arrays;
  312. len = sizeof(struct iwl_device_cmd);
  313. for (i = 0; i < actual_slots; i++) {
  314. /* only happens for cmd queue */
  315. if (i == slots_num)
  316. len = IWL_MAX_CMD_SIZE;
  317. txq->cmd[i] = kmalloc(len, GFP_KERNEL);
  318. if (!txq->cmd[i])
  319. goto err;
  320. }
  321. /* Alloc driver data array and TFD circular buffer */
  322. ret = iwl_tx_queue_alloc(priv, txq, txq_id);
  323. if (ret)
  324. goto err;
  325. txq->need_update = 0;
  326. /*
  327. * Aggregation TX queues will get their ID when aggregation begins;
  328. * they overwrite the setting done here. The command FIFO doesn't
  329. * need an swq_id so don't set one to catch errors, all others can
  330. * be set up to the identity mapping.
  331. */
  332. if (txq_id != IWL_CMD_QUEUE_NUM)
  333. txq->swq_id = txq_id;
  334. /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
  335. * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
  336. BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
  337. /* Initialize queue's high/low-water marks, and head/tail indexes */
  338. iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
  339. /* Tell device where to find queue */
  340. priv->cfg->ops->lib->txq_init(priv, txq);
  341. return 0;
  342. err:
  343. for (i = 0; i < actual_slots; i++)
  344. kfree(txq->cmd[i]);
  345. out_free_arrays:
  346. kfree(txq->meta);
  347. kfree(txq->cmd);
  348. return -ENOMEM;
  349. }
  350. EXPORT_SYMBOL(iwl_tx_queue_init);
  351. /**
  352. * iwl_hw_txq_ctx_free - Free TXQ Context
  353. *
  354. * Destroy all TX DMA queues and structures
  355. */
  356. void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
  357. {
  358. int txq_id;
  359. /* Tx queues */
  360. if (priv->txq) {
  361. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
  362. txq_id++)
  363. if (txq_id == IWL_CMD_QUEUE_NUM)
  364. iwl_cmd_queue_free(priv);
  365. else
  366. iwl_tx_queue_free(priv, txq_id);
  367. }
  368. iwl_free_dma_ptr(priv, &priv->kw);
  369. iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
  370. /* free tx queue structure */
  371. iwl_free_txq_mem(priv);
  372. }
  373. EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
  374. /**
  375. * iwl_txq_ctx_reset - Reset TX queue context
  376. * Destroys all DMA structures and initialize them again
  377. *
  378. * @param priv
  379. * @return error code
  380. */
  381. int iwl_txq_ctx_reset(struct iwl_priv *priv)
  382. {
  383. int ret = 0;
  384. int txq_id, slots_num;
  385. unsigned long flags;
  386. /* Free all tx/cmd queues and keep-warm buffer */
  387. iwl_hw_txq_ctx_free(priv);
  388. ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
  389. priv->hw_params.scd_bc_tbls_size);
  390. if (ret) {
  391. IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
  392. goto error_bc_tbls;
  393. }
  394. /* Alloc keep-warm buffer */
  395. ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
  396. if (ret) {
  397. IWL_ERR(priv, "Keep Warm allocation failed\n");
  398. goto error_kw;
  399. }
  400. /* allocate tx queue structure */
  401. ret = iwl_alloc_txq_mem(priv);
  402. if (ret)
  403. goto error;
  404. spin_lock_irqsave(&priv->lock, flags);
  405. /* Turn off all Tx DMA fifos */
  406. priv->cfg->ops->lib->txq_set_sched(priv, 0);
  407. /* Tell NIC where to find the "keep warm" buffer */
  408. iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
  409. spin_unlock_irqrestore(&priv->lock, flags);
  410. /* Alloc and init all Tx queues, including the command queue (#4) */
  411. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
  412. slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
  413. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  414. ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
  415. txq_id);
  416. if (ret) {
  417. IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
  418. goto error;
  419. }
  420. }
  421. return ret;
  422. error:
  423. iwl_hw_txq_ctx_free(priv);
  424. iwl_free_dma_ptr(priv, &priv->kw);
  425. error_kw:
  426. iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
  427. error_bc_tbls:
  428. return ret;
  429. }
  430. /**
  431. * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
  432. */
  433. void iwl_txq_ctx_stop(struct iwl_priv *priv)
  434. {
  435. int ch;
  436. unsigned long flags;
  437. /* Turn off all Tx DMA fifos */
  438. spin_lock_irqsave(&priv->lock, flags);
  439. priv->cfg->ops->lib->txq_set_sched(priv, 0);
  440. /* Stop each Tx DMA channel, and wait for it to be idle */
  441. for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
  442. iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
  443. iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
  444. FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
  445. 1000);
  446. }
  447. spin_unlock_irqrestore(&priv->lock, flags);
  448. /* Deallocate memory for all Tx queues */
  449. iwl_hw_txq_ctx_free(priv);
  450. }
  451. EXPORT_SYMBOL(iwl_txq_ctx_stop);
  452. /*
  453. * handle build REPLY_TX command notification.
  454. */
  455. static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
  456. struct iwl_tx_cmd *tx_cmd,
  457. struct ieee80211_tx_info *info,
  458. struct ieee80211_hdr *hdr,
  459. u8 std_id)
  460. {
  461. __le16 fc = hdr->frame_control;
  462. __le32 tx_flags = tx_cmd->tx_flags;
  463. tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
  464. if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
  465. tx_flags |= TX_CMD_FLG_ACK_MSK;
  466. if (ieee80211_is_mgmt(fc))
  467. tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
  468. if (ieee80211_is_probe_resp(fc) &&
  469. !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
  470. tx_flags |= TX_CMD_FLG_TSF_MSK;
  471. } else {
  472. tx_flags &= (~TX_CMD_FLG_ACK_MSK);
  473. tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
  474. }
  475. if (ieee80211_is_back_req(fc))
  476. tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
  477. tx_cmd->sta_id = std_id;
  478. if (ieee80211_has_morefrags(fc))
  479. tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
  480. if (ieee80211_is_data_qos(fc)) {
  481. u8 *qc = ieee80211_get_qos_ctl(hdr);
  482. tx_cmd->tid_tspec = qc[0] & 0xf;
  483. tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
  484. } else {
  485. tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
  486. }
  487. priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
  488. if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
  489. tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
  490. tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
  491. if (ieee80211_is_mgmt(fc)) {
  492. if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
  493. tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
  494. else
  495. tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
  496. } else {
  497. tx_cmd->timeout.pm_frame_timeout = 0;
  498. }
  499. tx_cmd->driver_txop = 0;
  500. tx_cmd->tx_flags = tx_flags;
  501. tx_cmd->next_frame_len = 0;
  502. }
  503. #define RTS_HCCA_RETRY_LIMIT 3
  504. #define RTS_DFAULT_RETRY_LIMIT 60
  505. static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
  506. struct iwl_tx_cmd *tx_cmd,
  507. struct ieee80211_tx_info *info,
  508. __le16 fc, int is_hcca)
  509. {
  510. u32 rate_flags;
  511. int rate_idx;
  512. u8 rts_retry_limit;
  513. u8 data_retry_limit;
  514. u8 rate_plcp;
  515. /* Set retry limit on DATA packets and Probe Responses*/
  516. if (ieee80211_is_probe_resp(fc))
  517. data_retry_limit = 3;
  518. else
  519. data_retry_limit = IWL_DEFAULT_TX_RETRY;
  520. tx_cmd->data_retry_limit = data_retry_limit;
  521. /* Set retry limit on RTS packets */
  522. rts_retry_limit = (is_hcca) ? RTS_HCCA_RETRY_LIMIT :
  523. RTS_DFAULT_RETRY_LIMIT;
  524. if (data_retry_limit < rts_retry_limit)
  525. rts_retry_limit = data_retry_limit;
  526. tx_cmd->rts_retry_limit = rts_retry_limit;
  527. /* DATA packets will use the uCode station table for rate/antenna
  528. * selection */
  529. if (ieee80211_is_data(fc)) {
  530. tx_cmd->initial_rate_index = 0;
  531. tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
  532. return;
  533. }
  534. /**
  535. * If the current TX rate stored in mac80211 has the MCS bit set, it's
  536. * not really a TX rate. Thus, we use the lowest supported rate for
  537. * this band. Also use the lowest supported rate if the stored rate
  538. * index is invalid.
  539. */
  540. rate_idx = info->control.rates[0].idx;
  541. if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
  542. (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
  543. rate_idx = rate_lowest_index(&priv->bands[info->band],
  544. info->control.sta);
  545. /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
  546. if (info->band == IEEE80211_BAND_5GHZ)
  547. rate_idx += IWL_FIRST_OFDM_RATE;
  548. /* Get PLCP rate for tx_cmd->rate_n_flags */
  549. rate_plcp = iwl_rates[rate_idx].plcp;
  550. /* Zero out flags for this packet */
  551. rate_flags = 0;
  552. /* Set CCK flag as needed */
  553. if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
  554. rate_flags |= RATE_MCS_CCK_MSK;
  555. /* Set up RTS and CTS flags for certain packets */
  556. switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
  557. case cpu_to_le16(IEEE80211_STYPE_AUTH):
  558. case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
  559. case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
  560. case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
  561. if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
  562. tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
  563. tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
  564. }
  565. break;
  566. default:
  567. break;
  568. }
  569. /* Set up antennas */
  570. priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
  571. rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
  572. /* Set the rate in the TX cmd */
  573. tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
  574. }
  575. static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
  576. struct ieee80211_tx_info *info,
  577. struct iwl_tx_cmd *tx_cmd,
  578. struct sk_buff *skb_frag,
  579. int sta_id)
  580. {
  581. struct ieee80211_key_conf *keyconf = info->control.hw_key;
  582. switch (keyconf->alg) {
  583. case ALG_CCMP:
  584. tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
  585. memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
  586. if (info->flags & IEEE80211_TX_CTL_AMPDU)
  587. tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
  588. IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
  589. break;
  590. case ALG_TKIP:
  591. tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
  592. ieee80211_get_tkip_key(keyconf, skb_frag,
  593. IEEE80211_TKIP_P2_KEY, tx_cmd->key);
  594. IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
  595. break;
  596. case ALG_WEP:
  597. tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
  598. (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
  599. if (keyconf->keylen == WEP_KEY_LEN_128)
  600. tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
  601. memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
  602. IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
  603. "with key %d\n", keyconf->keyidx);
  604. break;
  605. default:
  606. IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
  607. break;
  608. }
  609. }
  610. /*
  611. * start REPLY_TX command process
  612. */
  613. int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
  614. {
  615. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  616. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  617. struct ieee80211_sta *sta = info->control.sta;
  618. struct iwl_station_priv *sta_priv = NULL;
  619. struct iwl_tx_queue *txq;
  620. struct iwl_queue *q;
  621. struct iwl_device_cmd *out_cmd;
  622. struct iwl_cmd_meta *out_meta;
  623. struct iwl_tx_cmd *tx_cmd;
  624. int swq_id, txq_id;
  625. dma_addr_t phys_addr;
  626. dma_addr_t txcmd_phys;
  627. dma_addr_t scratch_phys;
  628. u16 len, len_org, firstlen, secondlen;
  629. u16 seq_number = 0;
  630. __le16 fc;
  631. u8 hdr_len;
  632. u8 sta_id;
  633. u8 wait_write_ptr = 0;
  634. u8 tid = 0;
  635. u8 *qc = NULL;
  636. unsigned long flags;
  637. spin_lock_irqsave(&priv->lock, flags);
  638. if (iwl_is_rfkill(priv)) {
  639. IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
  640. goto drop_unlock;
  641. }
  642. fc = hdr->frame_control;
  643. #ifdef CONFIG_IWLWIFI_DEBUG
  644. if (ieee80211_is_auth(fc))
  645. IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
  646. else if (ieee80211_is_assoc_req(fc))
  647. IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
  648. else if (ieee80211_is_reassoc_req(fc))
  649. IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
  650. #endif
  651. /* drop all non-injected data frame if we are not associated */
  652. if (ieee80211_is_data(fc) &&
  653. !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
  654. (!iwl_is_associated(priv) ||
  655. ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
  656. !priv->assoc_station_added)) {
  657. IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
  658. goto drop_unlock;
  659. }
  660. hdr_len = ieee80211_hdrlen(fc);
  661. /* Find (or create) index into station table for destination station */
  662. if (info->flags & IEEE80211_TX_CTL_INJECTED)
  663. sta_id = priv->hw_params.bcast_sta_id;
  664. else
  665. sta_id = iwl_get_sta_id(priv, hdr);
  666. if (sta_id == IWL_INVALID_STATION) {
  667. IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
  668. hdr->addr1);
  669. goto drop_unlock;
  670. }
  671. IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
  672. if (sta)
  673. sta_priv = (void *)sta->drv_priv;
  674. if (sta_priv && sta_id != priv->hw_params.bcast_sta_id &&
  675. sta_priv->asleep) {
  676. WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
  677. /*
  678. * This sends an asynchronous command to the device,
  679. * but we can rely on it being processed before the
  680. * next frame is processed -- and the next frame to
  681. * this station is the one that will consume this
  682. * counter.
  683. * For now set the counter to just 1 since we do not
  684. * support uAPSD yet.
  685. */
  686. iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
  687. }
  688. txq_id = skb_get_queue_mapping(skb);
  689. if (ieee80211_is_data_qos(fc)) {
  690. qc = ieee80211_get_qos_ctl(hdr);
  691. tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
  692. if (unlikely(tid >= MAX_TID_COUNT))
  693. goto drop_unlock;
  694. seq_number = priv->stations[sta_id].tid[tid].seq_number;
  695. seq_number &= IEEE80211_SCTL_SEQ;
  696. hdr->seq_ctrl = hdr->seq_ctrl &
  697. cpu_to_le16(IEEE80211_SCTL_FRAG);
  698. hdr->seq_ctrl |= cpu_to_le16(seq_number);
  699. seq_number += 0x10;
  700. /* aggregation is on for this <sta,tid> */
  701. if (info->flags & IEEE80211_TX_CTL_AMPDU &&
  702. priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
  703. txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
  704. }
  705. }
  706. txq = &priv->txq[txq_id];
  707. swq_id = txq->swq_id;
  708. q = &txq->q;
  709. if (unlikely(iwl_queue_space(q) < q->high_mark))
  710. goto drop_unlock;
  711. if (ieee80211_is_data_qos(fc))
  712. priv->stations[sta_id].tid[tid].tfds_in_queue++;
  713. /* Set up driver data for this TFD */
  714. memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
  715. txq->txb[q->write_ptr].skb[0] = skb;
  716. /* Set up first empty entry in queue's array of Tx/cmd buffers */
  717. out_cmd = txq->cmd[q->write_ptr];
  718. out_meta = &txq->meta[q->write_ptr];
  719. tx_cmd = &out_cmd->cmd.tx;
  720. memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
  721. memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
  722. /*
  723. * Set up the Tx-command (not MAC!) header.
  724. * Store the chosen Tx queue and TFD index within the sequence field;
  725. * after Tx, uCode's Tx response will return this value so driver can
  726. * locate the frame within the tx queue and do post-tx processing.
  727. */
  728. out_cmd->hdr.cmd = REPLY_TX;
  729. out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
  730. INDEX_TO_SEQ(q->write_ptr)));
  731. /* Copy MAC header from skb into command buffer */
  732. memcpy(tx_cmd->hdr, hdr, hdr_len);
  733. /* Total # bytes to be transmitted */
  734. len = (u16)skb->len;
  735. tx_cmd->len = cpu_to_le16(len);
  736. if (info->control.hw_key)
  737. iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
  738. /* TODO need this for burst mode later on */
  739. iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
  740. iwl_dbg_log_tx_data_frame(priv, len, hdr);
  741. /* set is_hcca to 0; it probably will never be implemented */
  742. iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, 0);
  743. iwl_update_stats(priv, true, fc, len);
  744. /*
  745. * Use the first empty entry in this queue's command buffer array
  746. * to contain the Tx command and MAC header concatenated together
  747. * (payload data will be in another buffer).
  748. * Size of this varies, due to varying MAC header length.
  749. * If end is not dword aligned, we'll have 2 extra bytes at the end
  750. * of the MAC header (device reads on dword boundaries).
  751. * We'll tell device about this padding later.
  752. */
  753. len = sizeof(struct iwl_tx_cmd) +
  754. sizeof(struct iwl_cmd_header) + hdr_len;
  755. len_org = len;
  756. firstlen = len = (len + 3) & ~3;
  757. if (len_org != len)
  758. len_org = 1;
  759. else
  760. len_org = 0;
  761. /* Tell NIC about any 2-byte padding after MAC header */
  762. if (len_org)
  763. tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
  764. /* Physical address of this Tx command's header (not MAC header!),
  765. * within command buffer array. */
  766. txcmd_phys = pci_map_single(priv->pci_dev,
  767. &out_cmd->hdr, len,
  768. PCI_DMA_BIDIRECTIONAL);
  769. pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
  770. pci_unmap_len_set(out_meta, len, len);
  771. /* Add buffer containing Tx command and MAC(!) header to TFD's
  772. * first entry */
  773. priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
  774. txcmd_phys, len, 1, 0);
  775. if (!ieee80211_has_morefrags(hdr->frame_control)) {
  776. txq->need_update = 1;
  777. if (qc)
  778. priv->stations[sta_id].tid[tid].seq_number = seq_number;
  779. } else {
  780. wait_write_ptr = 1;
  781. txq->need_update = 0;
  782. }
  783. /* Set up TFD's 2nd entry to point directly to remainder of skb,
  784. * if any (802.11 null frames have no payload). */
  785. secondlen = len = skb->len - hdr_len;
  786. if (len) {
  787. phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
  788. len, PCI_DMA_TODEVICE);
  789. priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
  790. phys_addr, len,
  791. 0, 0);
  792. }
  793. scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
  794. offsetof(struct iwl_tx_cmd, scratch);
  795. len = sizeof(struct iwl_tx_cmd) +
  796. sizeof(struct iwl_cmd_header) + hdr_len;
  797. /* take back ownership of DMA buffer to enable update */
  798. pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
  799. len, PCI_DMA_BIDIRECTIONAL);
  800. tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
  801. tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
  802. IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
  803. le16_to_cpu(out_cmd->hdr.sequence));
  804. IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
  805. iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
  806. iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
  807. /* Set up entry for this TFD in Tx byte-count array */
  808. if (info->flags & IEEE80211_TX_CTL_AMPDU)
  809. priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
  810. le16_to_cpu(tx_cmd->len));
  811. pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
  812. len, PCI_DMA_BIDIRECTIONAL);
  813. trace_iwlwifi_dev_tx(priv,
  814. &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
  815. sizeof(struct iwl_tfd),
  816. &out_cmd->hdr, firstlen,
  817. skb->data + hdr_len, secondlen);
  818. /* Tell device the write index *just past* this latest filled TFD */
  819. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  820. iwl_txq_update_write_ptr(priv, txq);
  821. spin_unlock_irqrestore(&priv->lock, flags);
  822. /*
  823. * At this point the frame is "transmitted" successfully
  824. * and we will get a TX status notification eventually,
  825. * regardless of the value of ret. "ret" only indicates
  826. * whether or not we should update the write pointer.
  827. */
  828. /* avoid atomic ops if it isn't an associated client */
  829. if (sta_priv && sta_priv->client)
  830. atomic_inc(&sta_priv->pending_frames);
  831. if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
  832. if (wait_write_ptr) {
  833. spin_lock_irqsave(&priv->lock, flags);
  834. txq->need_update = 1;
  835. iwl_txq_update_write_ptr(priv, txq);
  836. spin_unlock_irqrestore(&priv->lock, flags);
  837. } else {
  838. iwl_stop_queue(priv, txq->swq_id);
  839. }
  840. }
  841. return 0;
  842. drop_unlock:
  843. spin_unlock_irqrestore(&priv->lock, flags);
  844. return -1;
  845. }
  846. EXPORT_SYMBOL(iwl_tx_skb);
  847. /*************** HOST COMMAND QUEUE FUNCTIONS *****/
  848. /**
  849. * iwl_enqueue_hcmd - enqueue a uCode command
  850. * @priv: device private data point
  851. * @cmd: a point to the ucode command structure
  852. *
  853. * The function returns < 0 values to indicate the operation is
  854. * failed. On success, it turns the index (> 0) of command in the
  855. * command queue.
  856. */
  857. int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
  858. {
  859. struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
  860. struct iwl_queue *q = &txq->q;
  861. struct iwl_device_cmd *out_cmd;
  862. struct iwl_cmd_meta *out_meta;
  863. dma_addr_t phys_addr;
  864. unsigned long flags;
  865. int len;
  866. u32 idx;
  867. u16 fix_size;
  868. cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
  869. fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
  870. /* If any of the command structures end up being larger than
  871. * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
  872. * we will need to increase the size of the TFD entries
  873. * Also, check to see if command buffer should not exceed the size
  874. * of device_cmd and max_cmd_size. */
  875. BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
  876. !(cmd->flags & CMD_SIZE_HUGE));
  877. BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
  878. if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
  879. IWL_WARN(priv, "Not sending command - %s KILL\n",
  880. iwl_is_rfkill(priv) ? "RF" : "CT");
  881. return -EIO;
  882. }
  883. if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
  884. IWL_ERR(priv, "No space in command queue\n");
  885. if (iwl_within_ct_kill_margin(priv))
  886. iwl_tt_enter_ct_kill(priv);
  887. else {
  888. IWL_ERR(priv, "Restarting adapter due to queue full\n");
  889. queue_work(priv->workqueue, &priv->restart);
  890. }
  891. return -ENOSPC;
  892. }
  893. spin_lock_irqsave(&priv->hcmd_lock, flags);
  894. idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
  895. out_cmd = txq->cmd[idx];
  896. out_meta = &txq->meta[idx];
  897. memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
  898. out_meta->flags = cmd->flags;
  899. if (cmd->flags & CMD_WANT_SKB)
  900. out_meta->source = cmd;
  901. if (cmd->flags & CMD_ASYNC)
  902. out_meta->callback = cmd->callback;
  903. out_cmd->hdr.cmd = cmd->id;
  904. memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
  905. /* At this point, the out_cmd now has all of the incoming cmd
  906. * information */
  907. out_cmd->hdr.flags = 0;
  908. out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
  909. INDEX_TO_SEQ(q->write_ptr));
  910. if (cmd->flags & CMD_SIZE_HUGE)
  911. out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
  912. len = sizeof(struct iwl_device_cmd);
  913. if (idx == TFD_CMD_SLOTS)
  914. len = IWL_MAX_CMD_SIZE;
  915. #ifdef CONFIG_IWLWIFI_DEBUG
  916. switch (out_cmd->hdr.cmd) {
  917. case REPLY_TX_LINK_QUALITY_CMD:
  918. case SENSITIVITY_CMD:
  919. IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, "
  920. "%d bytes at %d[%d]:%d\n",
  921. get_cmd_string(out_cmd->hdr.cmd),
  922. out_cmd->hdr.cmd,
  923. le16_to_cpu(out_cmd->hdr.sequence), fix_size,
  924. q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
  925. break;
  926. default:
  927. IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
  928. "%d bytes at %d[%d]:%d\n",
  929. get_cmd_string(out_cmd->hdr.cmd),
  930. out_cmd->hdr.cmd,
  931. le16_to_cpu(out_cmd->hdr.sequence), fix_size,
  932. q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
  933. }
  934. #endif
  935. txq->need_update = 1;
  936. if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
  937. /* Set up entry in queue's byte count circular buffer */
  938. priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
  939. phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
  940. fix_size, PCI_DMA_BIDIRECTIONAL);
  941. pci_unmap_addr_set(out_meta, mapping, phys_addr);
  942. pci_unmap_len_set(out_meta, len, fix_size);
  943. trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags);
  944. priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
  945. phys_addr, fix_size, 1,
  946. U32_PAD(cmd->len));
  947. /* Increment and update queue's write index */
  948. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  949. iwl_txq_update_write_ptr(priv, txq);
  950. spin_unlock_irqrestore(&priv->hcmd_lock, flags);
  951. return idx;
  952. }
  953. static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
  954. {
  955. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  956. struct ieee80211_sta *sta;
  957. struct iwl_station_priv *sta_priv;
  958. sta = ieee80211_find_sta(priv->vif, hdr->addr1);
  959. if (sta) {
  960. sta_priv = (void *)sta->drv_priv;
  961. /* avoid atomic ops if this isn't a client */
  962. if (sta_priv->client &&
  963. atomic_dec_return(&sta_priv->pending_frames) == 0)
  964. ieee80211_sta_block_awake(priv->hw, sta, false);
  965. }
  966. ieee80211_tx_status_irqsafe(priv->hw, skb);
  967. }
  968. int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
  969. {
  970. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  971. struct iwl_queue *q = &txq->q;
  972. struct iwl_tx_info *tx_info;
  973. int nfreed = 0;
  974. struct ieee80211_hdr *hdr;
  975. if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
  976. IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
  977. "is out of range [0-%d] %d %d.\n", txq_id,
  978. index, q->n_bd, q->write_ptr, q->read_ptr);
  979. return 0;
  980. }
  981. for (index = iwl_queue_inc_wrap(index, q->n_bd);
  982. q->read_ptr != index;
  983. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
  984. tx_info = &txq->txb[txq->q.read_ptr];
  985. iwl_tx_status(priv, tx_info->skb[0]);
  986. hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
  987. if (hdr && ieee80211_is_data_qos(hdr->frame_control))
  988. nfreed++;
  989. tx_info->skb[0] = NULL;
  990. if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
  991. priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
  992. priv->cfg->ops->lib->txq_free_tfd(priv, txq);
  993. }
  994. return nfreed;
  995. }
  996. EXPORT_SYMBOL(iwl_tx_queue_reclaim);
  997. /**
  998. * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
  999. *
  1000. * When FW advances 'R' index, all entries between old and new 'R' index
  1001. * need to be reclaimed. As result, some free space forms. If there is
  1002. * enough free space (> low mark), wake the stack that feeds us.
  1003. */
  1004. static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
  1005. int idx, int cmd_idx)
  1006. {
  1007. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  1008. struct iwl_queue *q = &txq->q;
  1009. int nfreed = 0;
  1010. if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
  1011. IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
  1012. "is out of range [0-%d] %d %d.\n", txq_id,
  1013. idx, q->n_bd, q->write_ptr, q->read_ptr);
  1014. return;
  1015. }
  1016. for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
  1017. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
  1018. if (nfreed++ > 0) {
  1019. IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
  1020. q->write_ptr, q->read_ptr);
  1021. queue_work(priv->workqueue, &priv->restart);
  1022. }
  1023. }
  1024. }
  1025. /**
  1026. * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
  1027. * @rxb: Rx buffer to reclaim
  1028. *
  1029. * If an Rx buffer has an async callback associated with it the callback
  1030. * will be executed. The attached skb (if present) will only be freed
  1031. * if the callback returns 1
  1032. */
  1033. void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
  1034. {
  1035. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  1036. u16 sequence = le16_to_cpu(pkt->hdr.sequence);
  1037. int txq_id = SEQ_TO_QUEUE(sequence);
  1038. int index = SEQ_TO_INDEX(sequence);
  1039. int cmd_index;
  1040. bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
  1041. struct iwl_device_cmd *cmd;
  1042. struct iwl_cmd_meta *meta;
  1043. /* If a Tx command is being handled and it isn't in the actual
  1044. * command queue then there a command routing bug has been introduced
  1045. * in the queue management code. */
  1046. if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
  1047. "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
  1048. txq_id, sequence,
  1049. priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
  1050. priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
  1051. iwl_print_hex_error(priv, pkt, 32);
  1052. return;
  1053. }
  1054. cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
  1055. cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
  1056. meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index];
  1057. pci_unmap_single(priv->pci_dev,
  1058. pci_unmap_addr(meta, mapping),
  1059. pci_unmap_len(meta, len),
  1060. PCI_DMA_BIDIRECTIONAL);
  1061. /* Input error checking is done when commands are added to queue. */
  1062. if (meta->flags & CMD_WANT_SKB) {
  1063. meta->source->reply_page = (unsigned long)rxb_addr(rxb);
  1064. rxb->page = NULL;
  1065. } else if (meta->callback)
  1066. meta->callback(priv, cmd, pkt);
  1067. iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
  1068. if (!(meta->flags & CMD_ASYNC)) {
  1069. clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
  1070. IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n",
  1071. get_cmd_string(cmd->hdr.cmd));
  1072. wake_up_interruptible(&priv->wait_command_queue);
  1073. }
  1074. }
  1075. EXPORT_SYMBOL(iwl_tx_cmd_complete);
  1076. /*
  1077. * Find first available (lowest unused) Tx Queue, mark it "active".
  1078. * Called only when finding queue for aggregation.
  1079. * Should never return anything < 7, because they should already
  1080. * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
  1081. */
  1082. static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
  1083. {
  1084. int txq_id;
  1085. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
  1086. if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
  1087. return txq_id;
  1088. return -1;
  1089. }
  1090. int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
  1091. {
  1092. int sta_id;
  1093. int tx_fifo;
  1094. int txq_id;
  1095. int ret;
  1096. unsigned long flags;
  1097. struct iwl_tid_data *tid_data;
  1098. if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
  1099. tx_fifo = default_tid_to_tx_fifo[tid];
  1100. else
  1101. return -EINVAL;
  1102. IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
  1103. __func__, ra, tid);
  1104. sta_id = iwl_find_station(priv, ra);
  1105. if (sta_id == IWL_INVALID_STATION) {
  1106. IWL_ERR(priv, "Start AGG on invalid station\n");
  1107. return -ENXIO;
  1108. }
  1109. if (unlikely(tid >= MAX_TID_COUNT))
  1110. return -EINVAL;
  1111. if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
  1112. IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
  1113. return -ENXIO;
  1114. }
  1115. txq_id = iwl_txq_ctx_activate_free(priv);
  1116. if (txq_id == -1) {
  1117. IWL_ERR(priv, "No free aggregation queue available\n");
  1118. return -ENXIO;
  1119. }
  1120. spin_lock_irqsave(&priv->sta_lock, flags);
  1121. tid_data = &priv->stations[sta_id].tid[tid];
  1122. *ssn = SEQ_TO_SN(tid_data->seq_number);
  1123. tid_data->agg.txq_id = txq_id;
  1124. priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id);
  1125. spin_unlock_irqrestore(&priv->sta_lock, flags);
  1126. ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
  1127. sta_id, tid, *ssn);
  1128. if (ret)
  1129. return ret;
  1130. if (tid_data->tfds_in_queue == 0) {
  1131. IWL_DEBUG_HT(priv, "HW queue is empty\n");
  1132. tid_data->agg.state = IWL_AGG_ON;
  1133. ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid);
  1134. } else {
  1135. IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
  1136. tid_data->tfds_in_queue);
  1137. tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
  1138. }
  1139. return ret;
  1140. }
  1141. EXPORT_SYMBOL(iwl_tx_agg_start);
  1142. int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
  1143. {
  1144. int tx_fifo_id, txq_id, sta_id, ssn = -1;
  1145. struct iwl_tid_data *tid_data;
  1146. int write_ptr, read_ptr;
  1147. unsigned long flags;
  1148. if (!ra) {
  1149. IWL_ERR(priv, "ra = NULL\n");
  1150. return -EINVAL;
  1151. }
  1152. if (unlikely(tid >= MAX_TID_COUNT))
  1153. return -EINVAL;
  1154. if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
  1155. tx_fifo_id = default_tid_to_tx_fifo[tid];
  1156. else
  1157. return -EINVAL;
  1158. sta_id = iwl_find_station(priv, ra);
  1159. if (sta_id == IWL_INVALID_STATION) {
  1160. IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
  1161. return -ENXIO;
  1162. }
  1163. if (priv->stations[sta_id].tid[tid].agg.state ==
  1164. IWL_EMPTYING_HW_QUEUE_ADDBA) {
  1165. IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
  1166. ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
  1167. priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
  1168. return 0;
  1169. }
  1170. if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
  1171. IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
  1172. tid_data = &priv->stations[sta_id].tid[tid];
  1173. ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
  1174. txq_id = tid_data->agg.txq_id;
  1175. write_ptr = priv->txq[txq_id].q.write_ptr;
  1176. read_ptr = priv->txq[txq_id].q.read_ptr;
  1177. /* The queue is not empty */
  1178. if (write_ptr != read_ptr) {
  1179. IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
  1180. priv->stations[sta_id].tid[tid].agg.state =
  1181. IWL_EMPTYING_HW_QUEUE_DELBA;
  1182. return 0;
  1183. }
  1184. IWL_DEBUG_HT(priv, "HW queue is empty\n");
  1185. priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
  1186. spin_lock_irqsave(&priv->lock, flags);
  1187. /*
  1188. * the only reason this call can fail is queue number out of range,
  1189. * which can happen if uCode is reloaded and all the station
  1190. * information are lost. if it is outside the range, there is no need
  1191. * to deactivate the uCode queue, just return "success" to allow
  1192. * mac80211 to clean up it own data.
  1193. */
  1194. priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
  1195. tx_fifo_id);
  1196. spin_unlock_irqrestore(&priv->lock, flags);
  1197. ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
  1198. return 0;
  1199. }
  1200. EXPORT_SYMBOL(iwl_tx_agg_stop);
  1201. int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
  1202. {
  1203. struct iwl_queue *q = &priv->txq[txq_id].q;
  1204. u8 *addr = priv->stations[sta_id].sta.sta.addr;
  1205. struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
  1206. switch (priv->stations[sta_id].tid[tid].agg.state) {
  1207. case IWL_EMPTYING_HW_QUEUE_DELBA:
  1208. /* We are reclaiming the last packet of the */
  1209. /* aggregated HW queue */
  1210. if ((txq_id == tid_data->agg.txq_id) &&
  1211. (q->read_ptr == q->write_ptr)) {
  1212. u16 ssn = SEQ_TO_SN(tid_data->seq_number);
  1213. int tx_fifo = default_tid_to_tx_fifo[tid];
  1214. IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
  1215. priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
  1216. ssn, tx_fifo);
  1217. tid_data->agg.state = IWL_AGG_OFF;
  1218. ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid);
  1219. }
  1220. break;
  1221. case IWL_EMPTYING_HW_QUEUE_ADDBA:
  1222. /* We are reclaiming the last packet of the queue */
  1223. if (tid_data->tfds_in_queue == 0) {
  1224. IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
  1225. tid_data->agg.state = IWL_AGG_ON;
  1226. ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid);
  1227. }
  1228. break;
  1229. }
  1230. return 0;
  1231. }
  1232. EXPORT_SYMBOL(iwl_txq_check_empty);
  1233. /**
  1234. * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
  1235. *
  1236. * Go through block-ack's bitmap of ACK'd frames, update driver's record of
  1237. * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
  1238. */
  1239. static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
  1240. struct iwl_ht_agg *agg,
  1241. struct iwl_compressed_ba_resp *ba_resp)
  1242. {
  1243. int i, sh, ack;
  1244. u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
  1245. u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
  1246. u64 bitmap;
  1247. int successes = 0;
  1248. struct ieee80211_tx_info *info;
  1249. if (unlikely(!agg->wait_for_ba)) {
  1250. IWL_ERR(priv, "Received BA when not expected\n");
  1251. return -EINVAL;
  1252. }
  1253. /* Mark that the expected block-ack response arrived */
  1254. agg->wait_for_ba = 0;
  1255. IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
  1256. /* Calculate shift to align block-ack bits with our Tx window bits */
  1257. sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
  1258. if (sh < 0) /* tbw something is wrong with indices */
  1259. sh += 0x100;
  1260. /* don't use 64-bit values for now */
  1261. bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
  1262. if (agg->frame_count > (64 - sh)) {
  1263. IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
  1264. return -1;
  1265. }
  1266. /* check for success or failure according to the
  1267. * transmitted bitmap and block-ack bitmap */
  1268. bitmap &= agg->bitmap;
  1269. /* For each frame attempted in aggregation,
  1270. * update driver's record of tx frame's status. */
  1271. for (i = 0; i < agg->frame_count ; i++) {
  1272. ack = bitmap & (1ULL << i);
  1273. successes += !!ack;
  1274. IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
  1275. ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
  1276. agg->start_idx + i);
  1277. }
  1278. info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
  1279. memset(&info->status, 0, sizeof(info->status));
  1280. info->flags |= IEEE80211_TX_STAT_ACK;
  1281. info->flags |= IEEE80211_TX_STAT_AMPDU;
  1282. info->status.ampdu_ack_map = successes;
  1283. info->status.ampdu_ack_len = agg->frame_count;
  1284. iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
  1285. IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
  1286. return 0;
  1287. }
  1288. /**
  1289. * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
  1290. *
  1291. * Handles block-acknowledge notification from device, which reports success
  1292. * of frames sent via aggregation.
  1293. */
  1294. void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
  1295. struct iwl_rx_mem_buffer *rxb)
  1296. {
  1297. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  1298. struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
  1299. struct iwl_tx_queue *txq = NULL;
  1300. struct iwl_ht_agg *agg;
  1301. int index;
  1302. int sta_id;
  1303. int tid;
  1304. /* "flow" corresponds to Tx queue */
  1305. u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
  1306. /* "ssn" is start of block-ack Tx window, corresponds to index
  1307. * (in Tx queue's circular buffer) of first TFD/frame in window */
  1308. u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
  1309. if (scd_flow >= priv->hw_params.max_txq_num) {
  1310. IWL_ERR(priv,
  1311. "BUG_ON scd_flow is bigger than number of queues\n");
  1312. return;
  1313. }
  1314. txq = &priv->txq[scd_flow];
  1315. sta_id = ba_resp->sta_id;
  1316. tid = ba_resp->tid;
  1317. agg = &priv->stations[sta_id].tid[tid].agg;
  1318. /* Find index just before block-ack window */
  1319. index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
  1320. /* TODO: Need to get this copy more safely - now good for debug */
  1321. IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
  1322. "sta_id = %d\n",
  1323. agg->wait_for_ba,
  1324. (u8 *) &ba_resp->sta_addr_lo32,
  1325. ba_resp->sta_id);
  1326. IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
  1327. "%d, scd_ssn = %d\n",
  1328. ba_resp->tid,
  1329. ba_resp->seq_ctl,
  1330. (unsigned long long)le64_to_cpu(ba_resp->bitmap),
  1331. ba_resp->scd_flow,
  1332. ba_resp->scd_ssn);
  1333. IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n",
  1334. agg->start_idx,
  1335. (unsigned long long)agg->bitmap);
  1336. /* Update driver's record of ACK vs. not for each frame in window */
  1337. iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
  1338. /* Release all TFDs before the SSN, i.e. all TFDs in front of
  1339. * block-ack window (we assume that they've been successfully
  1340. * transmitted ... if not, it's too late anyway). */
  1341. if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
  1342. /* calculate mac80211 ampdu sw queue to wake */
  1343. int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
  1344. iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
  1345. if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
  1346. priv->mac80211_registered &&
  1347. (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
  1348. iwl_wake_queue(priv, txq->swq_id);
  1349. iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
  1350. }
  1351. }
  1352. EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
  1353. #ifdef CONFIG_IWLWIFI_DEBUG
  1354. #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
  1355. const char *iwl_get_tx_fail_reason(u32 status)
  1356. {
  1357. switch (status & TX_STATUS_MSK) {
  1358. case TX_STATUS_SUCCESS:
  1359. return "SUCCESS";
  1360. TX_STATUS_ENTRY(SHORT_LIMIT);
  1361. TX_STATUS_ENTRY(LONG_LIMIT);
  1362. TX_STATUS_ENTRY(FIFO_UNDERRUN);
  1363. TX_STATUS_ENTRY(MGMNT_ABORT);
  1364. TX_STATUS_ENTRY(NEXT_FRAG);
  1365. TX_STATUS_ENTRY(LIFE_EXPIRE);
  1366. TX_STATUS_ENTRY(DEST_PS);
  1367. TX_STATUS_ENTRY(ABORTED);
  1368. TX_STATUS_ENTRY(BT_RETRY);
  1369. TX_STATUS_ENTRY(STA_INVALID);
  1370. TX_STATUS_ENTRY(FRAG_DROPPED);
  1371. TX_STATUS_ENTRY(TID_DISABLE);
  1372. TX_STATUS_ENTRY(FRAME_FLUSHED);
  1373. TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
  1374. TX_STATUS_ENTRY(TX_LOCKED);
  1375. TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
  1376. }
  1377. return "UNKNOWN";
  1378. }
  1379. EXPORT_SYMBOL(iwl_get_tx_fail_reason);
  1380. #endif /* CONFIG_IWLWIFI_DEBUG */