iwl-tx.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
  4. *
  5. * Portions of this file are derived from the ipw3945 project, as well
  6. * as portions of the ieee80211 subsystem header files.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of version 2 of the GNU General Public License as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20. *
  21. * The full GNU General Public License is included in this distribution in the
  22. * file called LICENSE.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/etherdevice.h>
  30. #include <net/mac80211.h>
  31. #include "iwl-eeprom.h"
  32. #include "iwl-dev.h"
  33. #include "iwl-core.h"
  34. #include "iwl-sta.h"
  35. #include "iwl-io.h"
  36. #include "iwl-helpers.h"
  37. static const u16 default_tid_to_tx_fifo[] = {
  38. IWL_TX_FIFO_AC1,
  39. IWL_TX_FIFO_AC0,
  40. IWL_TX_FIFO_AC0,
  41. IWL_TX_FIFO_AC1,
  42. IWL_TX_FIFO_AC2,
  43. IWL_TX_FIFO_AC2,
  44. IWL_TX_FIFO_AC3,
  45. IWL_TX_FIFO_AC3,
  46. IWL_TX_FIFO_NONE,
  47. IWL_TX_FIFO_NONE,
  48. IWL_TX_FIFO_NONE,
  49. IWL_TX_FIFO_NONE,
  50. IWL_TX_FIFO_NONE,
  51. IWL_TX_FIFO_NONE,
  52. IWL_TX_FIFO_NONE,
  53. IWL_TX_FIFO_NONE,
  54. IWL_TX_FIFO_AC3
  55. };
  56. static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
  57. struct iwl_dma_ptr *ptr, size_t size)
  58. {
  59. ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma);
  60. if (!ptr->addr)
  61. return -ENOMEM;
  62. ptr->size = size;
  63. return 0;
  64. }
  65. static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
  66. struct iwl_dma_ptr *ptr)
  67. {
  68. if (unlikely(!ptr->addr))
  69. return;
  70. pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma);
  71. memset(ptr, 0, sizeof(*ptr));
  72. }
  73. /**
  74. * iwl_txq_update_write_ptr - Send new write index to hardware
  75. */
  76. int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
  77. {
  78. u32 reg = 0;
  79. int ret = 0;
  80. int txq_id = txq->q.id;
  81. if (txq->need_update == 0)
  82. return ret;
  83. /* if we're trying to save power */
  84. if (test_bit(STATUS_POWER_PMI, &priv->status)) {
  85. /* wake up nic if it's powered down ...
  86. * uCode will wake up, and interrupt us again, so next
  87. * time we'll skip this part. */
  88. reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
  89. if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
  90. IWL_DEBUG_INFO(priv, "Requesting wakeup, GP1 = 0x%x\n", reg);
  91. iwl_set_bit(priv, CSR_GP_CNTRL,
  92. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  93. return ret;
  94. }
  95. iwl_write_direct32(priv, HBUS_TARG_WRPTR,
  96. txq->q.write_ptr | (txq_id << 8));
  97. /* else not in power-save mode, uCode will never sleep when we're
  98. * trying to tx (during RFKILL, we're not trying to tx). */
  99. } else
  100. iwl_write32(priv, HBUS_TARG_WRPTR,
  101. txq->q.write_ptr | (txq_id << 8));
  102. txq->need_update = 0;
  103. return ret;
  104. }
  105. EXPORT_SYMBOL(iwl_txq_update_write_ptr);
  106. /**
  107. * iwl_tx_queue_free - Deallocate DMA queue.
  108. * @txq: Transmit queue to deallocate.
  109. *
  110. * Empty queue by removing and destroying all BD's.
  111. * Free all buffers.
  112. * 0-fill, but do not free "txq" descriptor structure.
  113. */
  114. void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
  115. {
  116. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  117. struct iwl_queue *q = &txq->q;
  118. struct pci_dev *dev = priv->pci_dev;
  119. int i, len;
  120. if (q->n_bd == 0)
  121. return;
  122. /* first, empty all BD's */
  123. for (; q->write_ptr != q->read_ptr;
  124. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
  125. priv->cfg->ops->lib->txq_free_tfd(priv, txq);
  126. len = sizeof(struct iwl_device_cmd) * q->n_window;
  127. /* De-alloc array of command/tx buffers */
  128. for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
  129. kfree(txq->cmd[i]);
  130. /* De-alloc circular buffer of TFDs */
  131. if (txq->q.n_bd)
  132. pci_free_consistent(dev, priv->hw_params.tfd_size *
  133. txq->q.n_bd, txq->tfds, txq->q.dma_addr);
  134. /* De-alloc array of per-TFD driver data */
  135. kfree(txq->txb);
  136. txq->txb = NULL;
  137. /* deallocate arrays */
  138. kfree(txq->cmd);
  139. kfree(txq->meta);
  140. txq->cmd = NULL;
  141. txq->meta = NULL;
  142. /* 0-fill queue descriptor structure */
  143. memset(txq, 0, sizeof(*txq));
  144. }
  145. EXPORT_SYMBOL(iwl_tx_queue_free);
  146. /**
  147. * iwl_cmd_queue_free - Deallocate DMA queue.
  148. * @txq: Transmit queue to deallocate.
  149. *
  150. * Empty queue by removing and destroying all BD's.
  151. * Free all buffers.
  152. * 0-fill, but do not free "txq" descriptor structure.
  153. */
  154. void iwl_cmd_queue_free(struct iwl_priv *priv)
  155. {
  156. struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
  157. struct iwl_queue *q = &txq->q;
  158. struct pci_dev *dev = priv->pci_dev;
  159. int i, len;
  160. if (q->n_bd == 0)
  161. return;
  162. len = sizeof(struct iwl_device_cmd) * q->n_window;
  163. len += IWL_MAX_SCAN_SIZE;
  164. /* De-alloc array of command/tx buffers */
  165. for (i = 0; i <= TFD_CMD_SLOTS; i++)
  166. kfree(txq->cmd[i]);
  167. /* De-alloc circular buffer of TFDs */
  168. if (txq->q.n_bd)
  169. pci_free_consistent(dev, priv->hw_params.tfd_size *
  170. txq->q.n_bd, txq->tfds, txq->q.dma_addr);
  171. /* 0-fill queue descriptor structure */
  172. memset(txq, 0, sizeof(*txq));
  173. }
  174. EXPORT_SYMBOL(iwl_cmd_queue_free);
  175. /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
  176. * DMA services
  177. *
  178. * Theory of operation
  179. *
  180. * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
  181. * of buffer descriptors, each of which points to one or more data buffers for
  182. * the device to read from or fill. Driver and device exchange status of each
  183. * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
  184. * entries in each circular buffer, to protect against confusing empty and full
  185. * queue states.
  186. *
  187. * The device reads or writes the data in the queues via the device's several
  188. * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
  189. *
  190. * For Tx queue, there are low mark and high mark limits. If, after queuing
  191. * the packet for Tx, free space become < low mark, Tx queue stopped. When
  192. * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
  193. * Tx queue resumed.
  194. *
  195. * See more detailed info in iwl-4965-hw.h.
  196. ***************************************************/
  197. int iwl_queue_space(const struct iwl_queue *q)
  198. {
  199. int s = q->read_ptr - q->write_ptr;
  200. if (q->read_ptr > q->write_ptr)
  201. s -= q->n_bd;
  202. if (s <= 0)
  203. s += q->n_window;
  204. /* keep some reserve to not confuse empty and full situations */
  205. s -= 2;
  206. if (s < 0)
  207. s = 0;
  208. return s;
  209. }
  210. EXPORT_SYMBOL(iwl_queue_space);
  211. /**
  212. * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
  213. */
  214. static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
  215. int count, int slots_num, u32 id)
  216. {
  217. q->n_bd = count;
  218. q->n_window = slots_num;
  219. q->id = id;
  220. /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
  221. * and iwl_queue_dec_wrap are broken. */
  222. BUG_ON(!is_power_of_2(count));
  223. /* slots_num must be power-of-two size, otherwise
  224. * get_cmd_index is broken. */
  225. BUG_ON(!is_power_of_2(slots_num));
  226. q->low_mark = q->n_window / 4;
  227. if (q->low_mark < 4)
  228. q->low_mark = 4;
  229. q->high_mark = q->n_window / 8;
  230. if (q->high_mark < 2)
  231. q->high_mark = 2;
  232. q->write_ptr = q->read_ptr = 0;
  233. return 0;
  234. }
  235. /**
  236. * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
  237. */
  238. static int iwl_tx_queue_alloc(struct iwl_priv *priv,
  239. struct iwl_tx_queue *txq, u32 id)
  240. {
  241. struct pci_dev *dev = priv->pci_dev;
  242. size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
  243. /* Driver private data, only for Tx (not command) queues,
  244. * not shared with device. */
  245. if (id != IWL_CMD_QUEUE_NUM) {
  246. txq->txb = kmalloc(sizeof(txq->txb[0]) *
  247. TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
  248. if (!txq->txb) {
  249. IWL_ERR(priv, "kmalloc for auxiliary BD "
  250. "structures failed\n");
  251. goto error;
  252. }
  253. } else {
  254. txq->txb = NULL;
  255. }
  256. /* Circular buffer of transmit frame descriptors (TFDs),
  257. * shared with device */
  258. txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr);
  259. if (!txq->tfds) {
  260. IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
  261. goto error;
  262. }
  263. txq->q.id = id;
  264. return 0;
  265. error:
  266. kfree(txq->txb);
  267. txq->txb = NULL;
  268. return -ENOMEM;
  269. }
  270. /**
  271. * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
  272. */
  273. int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
  274. int slots_num, u32 txq_id)
  275. {
  276. int i, len;
  277. int ret;
  278. int actual_slots = slots_num;
  279. /*
  280. * Alloc buffer array for commands (Tx or other types of commands).
  281. * For the command queue (#4), allocate command space + one big
  282. * command for scan, since scan command is very huge; the system will
  283. * not have two scans at the same time, so only one is needed.
  284. * For normal Tx queues (all other queues), no super-size command
  285. * space is needed.
  286. */
  287. if (txq_id == IWL_CMD_QUEUE_NUM)
  288. actual_slots++;
  289. txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
  290. GFP_KERNEL);
  291. txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
  292. GFP_KERNEL);
  293. if (!txq->meta || !txq->cmd)
  294. goto out_free_arrays;
  295. len = sizeof(struct iwl_device_cmd);
  296. for (i = 0; i < actual_slots; i++) {
  297. /* only happens for cmd queue */
  298. if (i == slots_num)
  299. len += IWL_MAX_SCAN_SIZE;
  300. txq->cmd[i] = kmalloc(len, GFP_KERNEL);
  301. if (!txq->cmd[i])
  302. goto err;
  303. }
  304. /* Alloc driver data array and TFD circular buffer */
  305. ret = iwl_tx_queue_alloc(priv, txq, txq_id);
  306. if (ret)
  307. goto err;
  308. txq->need_update = 0;
  309. /* aggregation TX queues will get their ID when aggregation begins */
  310. if (txq_id <= IWL_TX_FIFO_AC3)
  311. txq->swq_id = txq_id;
  312. /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
  313. * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
  314. BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
  315. /* Initialize queue's high/low-water marks, and head/tail indexes */
  316. iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
  317. /* Tell device where to find queue */
  318. priv->cfg->ops->lib->txq_init(priv, txq);
  319. return 0;
  320. err:
  321. for (i = 0; i < actual_slots; i++)
  322. kfree(txq->cmd[i]);
  323. out_free_arrays:
  324. kfree(txq->meta);
  325. kfree(txq->cmd);
  326. return -ENOMEM;
  327. }
  328. EXPORT_SYMBOL(iwl_tx_queue_init);
  329. /**
  330. * iwl_hw_txq_ctx_free - Free TXQ Context
  331. *
  332. * Destroy all TX DMA queues and structures
  333. */
  334. void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
  335. {
  336. int txq_id;
  337. /* Tx queues */
  338. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
  339. if (txq_id == IWL_CMD_QUEUE_NUM)
  340. iwl_cmd_queue_free(priv);
  341. else
  342. iwl_tx_queue_free(priv, txq_id);
  343. iwl_free_dma_ptr(priv, &priv->kw);
  344. iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
  345. }
  346. EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
  347. /**
  348. * iwl_txq_ctx_reset - Reset TX queue context
  349. * Destroys all DMA structures and initialize them again
  350. *
  351. * @param priv
  352. * @return error code
  353. */
  354. int iwl_txq_ctx_reset(struct iwl_priv *priv)
  355. {
  356. int ret = 0;
  357. int txq_id, slots_num;
  358. unsigned long flags;
  359. /* Free all tx/cmd queues and keep-warm buffer */
  360. iwl_hw_txq_ctx_free(priv);
  361. ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
  362. priv->hw_params.scd_bc_tbls_size);
  363. if (ret) {
  364. IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
  365. goto error_bc_tbls;
  366. }
  367. /* Alloc keep-warm buffer */
  368. ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
  369. if (ret) {
  370. IWL_ERR(priv, "Keep Warm allocation failed\n");
  371. goto error_kw;
  372. }
  373. spin_lock_irqsave(&priv->lock, flags);
  374. /* Turn off all Tx DMA fifos */
  375. priv->cfg->ops->lib->txq_set_sched(priv, 0);
  376. /* Tell NIC where to find the "keep warm" buffer */
  377. iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
  378. spin_unlock_irqrestore(&priv->lock, flags);
  379. /* Alloc and init all Tx queues, including the command queue (#4) */
  380. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
  381. slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
  382. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  383. ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
  384. txq_id);
  385. if (ret) {
  386. IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
  387. goto error;
  388. }
  389. }
  390. return ret;
  391. error:
  392. iwl_hw_txq_ctx_free(priv);
  393. iwl_free_dma_ptr(priv, &priv->kw);
  394. error_kw:
  395. iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
  396. error_bc_tbls:
  397. return ret;
  398. }
  399. /**
  400. * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
  401. */
  402. void iwl_txq_ctx_stop(struct iwl_priv *priv)
  403. {
  404. int ch;
  405. unsigned long flags;
  406. /* Turn off all Tx DMA fifos */
  407. spin_lock_irqsave(&priv->lock, flags);
  408. priv->cfg->ops->lib->txq_set_sched(priv, 0);
  409. /* Stop each Tx DMA channel, and wait for it to be idle */
  410. for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
  411. iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
  412. iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
  413. FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
  414. 1000);
  415. }
  416. spin_unlock_irqrestore(&priv->lock, flags);
  417. /* Deallocate memory for all Tx queues */
  418. iwl_hw_txq_ctx_free(priv);
  419. }
  420. EXPORT_SYMBOL(iwl_txq_ctx_stop);
  421. /*
  422. * handle build REPLY_TX command notification.
  423. */
  424. static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
  425. struct iwl_tx_cmd *tx_cmd,
  426. struct ieee80211_tx_info *info,
  427. struct ieee80211_hdr *hdr,
  428. u8 std_id)
  429. {
  430. __le16 fc = hdr->frame_control;
  431. __le32 tx_flags = tx_cmd->tx_flags;
  432. tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
  433. if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
  434. tx_flags |= TX_CMD_FLG_ACK_MSK;
  435. if (ieee80211_is_mgmt(fc))
  436. tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
  437. if (ieee80211_is_probe_resp(fc) &&
  438. !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
  439. tx_flags |= TX_CMD_FLG_TSF_MSK;
  440. } else {
  441. tx_flags &= (~TX_CMD_FLG_ACK_MSK);
  442. tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
  443. }
  444. if (ieee80211_is_back_req(fc))
  445. tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
  446. tx_cmd->sta_id = std_id;
  447. if (ieee80211_has_morefrags(fc))
  448. tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
  449. if (ieee80211_is_data_qos(fc)) {
  450. u8 *qc = ieee80211_get_qos_ctl(hdr);
  451. tx_cmd->tid_tspec = qc[0] & 0xf;
  452. tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
  453. } else {
  454. tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
  455. }
  456. priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
  457. if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
  458. tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
  459. tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
  460. if (ieee80211_is_mgmt(fc)) {
  461. if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
  462. tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
  463. else
  464. tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
  465. } else {
  466. tx_cmd->timeout.pm_frame_timeout = 0;
  467. }
  468. tx_cmd->driver_txop = 0;
  469. tx_cmd->tx_flags = tx_flags;
  470. tx_cmd->next_frame_len = 0;
  471. }
  472. #define RTS_HCCA_RETRY_LIMIT 3
  473. #define RTS_DFAULT_RETRY_LIMIT 60
  474. static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
  475. struct iwl_tx_cmd *tx_cmd,
  476. struct ieee80211_tx_info *info,
  477. __le16 fc, int sta_id,
  478. int is_hcca)
  479. {
  480. u32 rate_flags = 0;
  481. int rate_idx;
  482. u8 rts_retry_limit = 0;
  483. u8 data_retry_limit = 0;
  484. u8 rate_plcp;
  485. rate_idx = min(ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xffff,
  486. IWL_RATE_COUNT - 1);
  487. rate_plcp = iwl_rates[rate_idx].plcp;
  488. rts_retry_limit = (is_hcca) ?
  489. RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
  490. if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
  491. rate_flags |= RATE_MCS_CCK_MSK;
  492. if (ieee80211_is_probe_resp(fc)) {
  493. data_retry_limit = 3;
  494. if (data_retry_limit < rts_retry_limit)
  495. rts_retry_limit = data_retry_limit;
  496. } else
  497. data_retry_limit = IWL_DEFAULT_TX_RETRY;
  498. if (priv->data_retry_limit != -1)
  499. data_retry_limit = priv->data_retry_limit;
  500. if (ieee80211_is_data(fc)) {
  501. tx_cmd->initial_rate_index = 0;
  502. tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
  503. } else {
  504. switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
  505. case cpu_to_le16(IEEE80211_STYPE_AUTH):
  506. case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
  507. case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
  508. case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
  509. if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
  510. tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
  511. tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
  512. }
  513. break;
  514. default:
  515. break;
  516. }
  517. priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
  518. rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
  519. }
  520. tx_cmd->rts_retry_limit = rts_retry_limit;
  521. tx_cmd->data_retry_limit = data_retry_limit;
  522. tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
  523. }
  524. static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
  525. struct ieee80211_tx_info *info,
  526. struct iwl_tx_cmd *tx_cmd,
  527. struct sk_buff *skb_frag,
  528. int sta_id)
  529. {
  530. struct ieee80211_key_conf *keyconf = info->control.hw_key;
  531. switch (keyconf->alg) {
  532. case ALG_CCMP:
  533. tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
  534. memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
  535. if (info->flags & IEEE80211_TX_CTL_AMPDU)
  536. tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
  537. IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
  538. break;
  539. case ALG_TKIP:
  540. tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
  541. ieee80211_get_tkip_key(keyconf, skb_frag,
  542. IEEE80211_TKIP_P2_KEY, tx_cmd->key);
  543. IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
  544. break;
  545. case ALG_WEP:
  546. tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
  547. (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
  548. if (keyconf->keylen == WEP_KEY_LEN_128)
  549. tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
  550. memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
  551. IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
  552. "with key %d\n", keyconf->keyidx);
  553. break;
  554. default:
  555. IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
  556. break;
  557. }
  558. }
  559. static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len)
  560. {
  561. /* 0 - mgmt, 1 - cnt, 2 - data */
  562. int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
  563. priv->tx_stats[idx].cnt++;
  564. priv->tx_stats[idx].bytes += len;
  565. }
  566. /*
  567. * start REPLY_TX command process
  568. */
  569. int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
  570. {
  571. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  572. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  573. struct iwl_tx_queue *txq;
  574. struct iwl_queue *q;
  575. struct iwl_device_cmd *out_cmd;
  576. struct iwl_cmd_meta *out_meta;
  577. struct iwl_tx_cmd *tx_cmd;
  578. int swq_id, txq_id;
  579. dma_addr_t phys_addr;
  580. dma_addr_t txcmd_phys;
  581. dma_addr_t scratch_phys;
  582. u16 len, len_org;
  583. u16 seq_number = 0;
  584. __le16 fc;
  585. u8 hdr_len;
  586. u8 sta_id;
  587. u8 wait_write_ptr = 0;
  588. u8 tid = 0;
  589. u8 *qc = NULL;
  590. unsigned long flags;
  591. int ret;
  592. spin_lock_irqsave(&priv->lock, flags);
  593. if (iwl_is_rfkill(priv)) {
  594. IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
  595. goto drop_unlock;
  596. }
  597. if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) ==
  598. IWL_INVALID_RATE) {
  599. IWL_ERR(priv, "ERROR: No TX rate available.\n");
  600. goto drop_unlock;
  601. }
  602. fc = hdr->frame_control;
  603. #ifdef CONFIG_IWLWIFI_DEBUG
  604. if (ieee80211_is_auth(fc))
  605. IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
  606. else if (ieee80211_is_assoc_req(fc))
  607. IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
  608. else if (ieee80211_is_reassoc_req(fc))
  609. IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
  610. #endif
  611. /* drop all data frame if we are not associated */
  612. if (ieee80211_is_data(fc) &&
  613. (!iwl_is_monitor_mode(priv) ||
  614. !(info->flags & IEEE80211_TX_CTL_INJECTED)) && /* packet injection */
  615. (!iwl_is_associated(priv) ||
  616. ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
  617. !priv->assoc_station_added)) {
  618. IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
  619. goto drop_unlock;
  620. }
  621. hdr_len = ieee80211_hdrlen(fc);
  622. /* Find (or create) index into station table for destination station */
  623. sta_id = iwl_get_sta_id(priv, hdr);
  624. if (sta_id == IWL_INVALID_STATION) {
  625. IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
  626. hdr->addr1);
  627. goto drop_unlock;
  628. }
  629. IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
  630. txq_id = skb_get_queue_mapping(skb);
  631. if (ieee80211_is_data_qos(fc)) {
  632. qc = ieee80211_get_qos_ctl(hdr);
  633. tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
  634. seq_number = priv->stations[sta_id].tid[tid].seq_number;
  635. seq_number &= IEEE80211_SCTL_SEQ;
  636. hdr->seq_ctrl = hdr->seq_ctrl &
  637. cpu_to_le16(IEEE80211_SCTL_FRAG);
  638. hdr->seq_ctrl |= cpu_to_le16(seq_number);
  639. seq_number += 0x10;
  640. /* aggregation is on for this <sta,tid> */
  641. if (info->flags & IEEE80211_TX_CTL_AMPDU)
  642. txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
  643. }
  644. txq = &priv->txq[txq_id];
  645. swq_id = txq->swq_id;
  646. q = &txq->q;
  647. if (unlikely(iwl_queue_space(q) < q->high_mark))
  648. goto drop_unlock;
  649. if (ieee80211_is_data_qos(fc))
  650. priv->stations[sta_id].tid[tid].tfds_in_queue++;
  651. /* Set up driver data for this TFD */
  652. memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
  653. txq->txb[q->write_ptr].skb[0] = skb;
  654. /* Set up first empty entry in queue's array of Tx/cmd buffers */
  655. out_cmd = txq->cmd[q->write_ptr];
  656. out_meta = &txq->meta[q->write_ptr];
  657. tx_cmd = &out_cmd->cmd.tx;
  658. memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
  659. memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
  660. /*
  661. * Set up the Tx-command (not MAC!) header.
  662. * Store the chosen Tx queue and TFD index within the sequence field;
  663. * after Tx, uCode's Tx response will return this value so driver can
  664. * locate the frame within the tx queue and do post-tx processing.
  665. */
  666. out_cmd->hdr.cmd = REPLY_TX;
  667. out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
  668. INDEX_TO_SEQ(q->write_ptr)));
  669. /* Copy MAC header from skb into command buffer */
  670. memcpy(tx_cmd->hdr, hdr, hdr_len);
  671. /* Total # bytes to be transmitted */
  672. len = (u16)skb->len;
  673. tx_cmd->len = cpu_to_le16(len);
  674. if (info->control.hw_key)
  675. iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
  676. /* TODO need this for burst mode later on */
  677. iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
  678. iwl_dbg_log_tx_data_frame(priv, len, hdr);
  679. /* set is_hcca to 0; it probably will never be implemented */
  680. iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
  681. iwl_update_tx_stats(priv, le16_to_cpu(fc), len);
  682. /*
  683. * Use the first empty entry in this queue's command buffer array
  684. * to contain the Tx command and MAC header concatenated together
  685. * (payload data will be in another buffer).
  686. * Size of this varies, due to varying MAC header length.
  687. * If end is not dword aligned, we'll have 2 extra bytes at the end
  688. * of the MAC header (device reads on dword boundaries).
  689. * We'll tell device about this padding later.
  690. */
  691. len = sizeof(struct iwl_tx_cmd) +
  692. sizeof(struct iwl_cmd_header) + hdr_len;
  693. len_org = len;
  694. len = (len + 3) & ~3;
  695. if (len_org != len)
  696. len_org = 1;
  697. else
  698. len_org = 0;
  699. /* Tell NIC about any 2-byte padding after MAC header */
  700. if (len_org)
  701. tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
  702. /* Physical address of this Tx command's header (not MAC header!),
  703. * within command buffer array. */
  704. txcmd_phys = pci_map_single(priv->pci_dev,
  705. &out_cmd->hdr, len,
  706. PCI_DMA_BIDIRECTIONAL);
  707. pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
  708. pci_unmap_len_set(out_meta, len, len);
  709. /* Add buffer containing Tx command and MAC(!) header to TFD's
  710. * first entry */
  711. priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
  712. txcmd_phys, len, 1, 0);
  713. if (!ieee80211_has_morefrags(hdr->frame_control)) {
  714. txq->need_update = 1;
  715. if (qc)
  716. priv->stations[sta_id].tid[tid].seq_number = seq_number;
  717. } else {
  718. wait_write_ptr = 1;
  719. txq->need_update = 0;
  720. }
  721. /* Set up TFD's 2nd entry to point directly to remainder of skb,
  722. * if any (802.11 null frames have no payload). */
  723. len = skb->len - hdr_len;
  724. if (len) {
  725. phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
  726. len, PCI_DMA_TODEVICE);
  727. priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
  728. phys_addr, len,
  729. 0, 0);
  730. }
  731. scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
  732. offsetof(struct iwl_tx_cmd, scratch);
  733. len = sizeof(struct iwl_tx_cmd) +
  734. sizeof(struct iwl_cmd_header) + hdr_len;
  735. /* take back ownership of DMA buffer to enable update */
  736. pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
  737. len, PCI_DMA_BIDIRECTIONAL);
  738. tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
  739. tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
  740. IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
  741. le16_to_cpu(out_cmd->hdr.sequence));
  742. IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
  743. iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
  744. iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
  745. /* Set up entry for this TFD in Tx byte-count array */
  746. if (info->flags & IEEE80211_TX_CTL_AMPDU)
  747. priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
  748. le16_to_cpu(tx_cmd->len));
  749. pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
  750. len, PCI_DMA_BIDIRECTIONAL);
  751. /* Tell device the write index *just past* this latest filled TFD */
  752. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  753. ret = iwl_txq_update_write_ptr(priv, txq);
  754. spin_unlock_irqrestore(&priv->lock, flags);
  755. if (ret)
  756. return ret;
  757. if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
  758. if (wait_write_ptr) {
  759. spin_lock_irqsave(&priv->lock, flags);
  760. txq->need_update = 1;
  761. iwl_txq_update_write_ptr(priv, txq);
  762. spin_unlock_irqrestore(&priv->lock, flags);
  763. } else {
  764. iwl_stop_queue(priv, txq->swq_id);
  765. }
  766. }
  767. return 0;
  768. drop_unlock:
  769. spin_unlock_irqrestore(&priv->lock, flags);
  770. return -1;
  771. }
  772. EXPORT_SYMBOL(iwl_tx_skb);
  773. /*************** HOST COMMAND QUEUE FUNCTIONS *****/
  774. /**
  775. * iwl_enqueue_hcmd - enqueue a uCode command
  776. * @priv: device private data point
  777. * @cmd: a point to the ucode command structure
  778. *
  779. * The function returns < 0 values to indicate the operation is
  780. * failed. On success, it turns the index (> 0) of command in the
  781. * command queue.
  782. */
  783. int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
  784. {
  785. struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
  786. struct iwl_queue *q = &txq->q;
  787. struct iwl_device_cmd *out_cmd;
  788. struct iwl_cmd_meta *out_meta;
  789. dma_addr_t phys_addr;
  790. unsigned long flags;
  791. int len, ret;
  792. u32 idx;
  793. u16 fix_size;
  794. cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
  795. fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
  796. /* If any of the command structures end up being larger than
  797. * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
  798. * we will need to increase the size of the TFD entries */
  799. BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
  800. !(cmd->flags & CMD_SIZE_HUGE));
  801. if (iwl_is_rfkill(priv)) {
  802. IWL_DEBUG_INFO(priv, "Not sending command - RF KILL\n");
  803. return -EIO;
  804. }
  805. if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
  806. IWL_ERR(priv, "No space for Tx\n");
  807. return -ENOSPC;
  808. }
  809. spin_lock_irqsave(&priv->hcmd_lock, flags);
  810. idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
  811. out_cmd = txq->cmd[idx];
  812. out_meta = &txq->meta[idx];
  813. memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
  814. out_meta->flags = cmd->flags;
  815. if (cmd->flags & CMD_WANT_SKB)
  816. out_meta->source = cmd;
  817. if (cmd->flags & CMD_ASYNC)
  818. out_meta->callback = cmd->callback;
  819. out_cmd->hdr.cmd = cmd->id;
  820. memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
  821. /* At this point, the out_cmd now has all of the incoming cmd
  822. * information */
  823. out_cmd->hdr.flags = 0;
  824. out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
  825. INDEX_TO_SEQ(q->write_ptr));
  826. if (cmd->flags & CMD_SIZE_HUGE)
  827. out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
  828. len = sizeof(struct iwl_device_cmd);
  829. len += (idx == TFD_CMD_SLOTS) ? IWL_MAX_SCAN_SIZE : 0;
  830. #ifdef CONFIG_IWLWIFI_DEBUG
  831. switch (out_cmd->hdr.cmd) {
  832. case REPLY_TX_LINK_QUALITY_CMD:
  833. case SENSITIVITY_CMD:
  834. IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, "
  835. "%d bytes at %d[%d]:%d\n",
  836. get_cmd_string(out_cmd->hdr.cmd),
  837. out_cmd->hdr.cmd,
  838. le16_to_cpu(out_cmd->hdr.sequence), fix_size,
  839. q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
  840. break;
  841. default:
  842. IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
  843. "%d bytes at %d[%d]:%d\n",
  844. get_cmd_string(out_cmd->hdr.cmd),
  845. out_cmd->hdr.cmd,
  846. le16_to_cpu(out_cmd->hdr.sequence), fix_size,
  847. q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
  848. }
  849. #endif
  850. txq->need_update = 1;
  851. if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
  852. /* Set up entry in queue's byte count circular buffer */
  853. priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
  854. phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
  855. fix_size, PCI_DMA_BIDIRECTIONAL);
  856. pci_unmap_addr_set(out_meta, mapping, phys_addr);
  857. pci_unmap_len_set(out_meta, len, fix_size);
  858. priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
  859. phys_addr, fix_size, 1,
  860. U32_PAD(cmd->len));
  861. /* Increment and update queue's write index */
  862. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  863. ret = iwl_txq_update_write_ptr(priv, txq);
  864. spin_unlock_irqrestore(&priv->hcmd_lock, flags);
  865. return ret ? ret : idx;
  866. }
  867. int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
  868. {
  869. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  870. struct iwl_queue *q = &txq->q;
  871. struct iwl_tx_info *tx_info;
  872. int nfreed = 0;
  873. if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
  874. IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
  875. "is out of range [0-%d] %d %d.\n", txq_id,
  876. index, q->n_bd, q->write_ptr, q->read_ptr);
  877. return 0;
  878. }
  879. for (index = iwl_queue_inc_wrap(index, q->n_bd);
  880. q->read_ptr != index;
  881. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
  882. tx_info = &txq->txb[txq->q.read_ptr];
  883. ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
  884. tx_info->skb[0] = NULL;
  885. if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
  886. priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
  887. priv->cfg->ops->lib->txq_free_tfd(priv, txq);
  888. nfreed++;
  889. }
  890. return nfreed;
  891. }
  892. EXPORT_SYMBOL(iwl_tx_queue_reclaim);
  893. /**
  894. * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
  895. *
  896. * When FW advances 'R' index, all entries between old and new 'R' index
  897. * need to be reclaimed. As result, some free space forms. If there is
  898. * enough free space (> low mark), wake the stack that feeds us.
  899. */
  900. static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
  901. int idx, int cmd_idx)
  902. {
  903. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  904. struct iwl_queue *q = &txq->q;
  905. int nfreed = 0;
  906. if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
  907. IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
  908. "is out of range [0-%d] %d %d.\n", txq_id,
  909. idx, q->n_bd, q->write_ptr, q->read_ptr);
  910. return;
  911. }
  912. pci_unmap_single(priv->pci_dev,
  913. pci_unmap_addr(&txq->meta[cmd_idx], mapping),
  914. pci_unmap_len(&txq->meta[cmd_idx], len),
  915. PCI_DMA_BIDIRECTIONAL);
  916. for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
  917. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
  918. if (nfreed++ > 0) {
  919. IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
  920. q->write_ptr, q->read_ptr);
  921. queue_work(priv->workqueue, &priv->restart);
  922. }
  923. }
  924. }
  925. /**
  926. * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
  927. * @rxb: Rx buffer to reclaim
  928. *
  929. * If an Rx buffer has an async callback associated with it the callback
  930. * will be executed. The attached skb (if present) will only be freed
  931. * if the callback returns 1
  932. */
  933. void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
  934. {
  935. struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
  936. u16 sequence = le16_to_cpu(pkt->hdr.sequence);
  937. int txq_id = SEQ_TO_QUEUE(sequence);
  938. int index = SEQ_TO_INDEX(sequence);
  939. int cmd_index;
  940. bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
  941. struct iwl_device_cmd *cmd;
  942. struct iwl_cmd_meta *meta;
  943. /* If a Tx command is being handled and it isn't in the actual
  944. * command queue then there a command routing bug has been introduced
  945. * in the queue management code. */
  946. if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
  947. "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
  948. txq_id, sequence,
  949. priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
  950. priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
  951. iwl_print_hex_error(priv, pkt, 32);
  952. return;
  953. }
  954. cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
  955. cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
  956. meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index];
  957. /* Input error checking is done when commands are added to queue. */
  958. if (meta->flags & CMD_WANT_SKB) {
  959. meta->source->reply_skb = rxb->skb;
  960. rxb->skb = NULL;
  961. } else if (meta->callback)
  962. meta->callback(priv, cmd, rxb->skb);
  963. iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
  964. if (!(meta->flags & CMD_ASYNC)) {
  965. clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
  966. wake_up_interruptible(&priv->wait_command_queue);
  967. }
  968. }
  969. EXPORT_SYMBOL(iwl_tx_cmd_complete);
  970. /*
  971. * Find first available (lowest unused) Tx Queue, mark it "active".
  972. * Called only when finding queue for aggregation.
  973. * Should never return anything < 7, because they should already
  974. * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
  975. */
  976. static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
  977. {
  978. int txq_id;
  979. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
  980. if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
  981. return txq_id;
  982. return -1;
  983. }
  984. int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
  985. {
  986. int sta_id;
  987. int tx_fifo;
  988. int txq_id;
  989. int ret;
  990. unsigned long flags;
  991. struct iwl_tid_data *tid_data;
  992. if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
  993. tx_fifo = default_tid_to_tx_fifo[tid];
  994. else
  995. return -EINVAL;
  996. IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
  997. __func__, ra, tid);
  998. sta_id = iwl_find_station(priv, ra);
  999. if (sta_id == IWL_INVALID_STATION) {
  1000. IWL_ERR(priv, "Start AGG on invalid station\n");
  1001. return -ENXIO;
  1002. }
  1003. if (unlikely(tid >= MAX_TID_COUNT))
  1004. return -EINVAL;
  1005. if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
  1006. IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
  1007. return -ENXIO;
  1008. }
  1009. txq_id = iwl_txq_ctx_activate_free(priv);
  1010. if (txq_id == -1) {
  1011. IWL_ERR(priv, "No free aggregation queue available\n");
  1012. return -ENXIO;
  1013. }
  1014. spin_lock_irqsave(&priv->sta_lock, flags);
  1015. tid_data = &priv->stations[sta_id].tid[tid];
  1016. *ssn = SEQ_TO_SN(tid_data->seq_number);
  1017. tid_data->agg.txq_id = txq_id;
  1018. priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id);
  1019. spin_unlock_irqrestore(&priv->sta_lock, flags);
  1020. ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
  1021. sta_id, tid, *ssn);
  1022. if (ret)
  1023. return ret;
  1024. if (tid_data->tfds_in_queue == 0) {
  1025. IWL_DEBUG_HT(priv, "HW queue is empty\n");
  1026. tid_data->agg.state = IWL_AGG_ON;
  1027. ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid);
  1028. } else {
  1029. IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
  1030. tid_data->tfds_in_queue);
  1031. tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
  1032. }
  1033. return ret;
  1034. }
  1035. EXPORT_SYMBOL(iwl_tx_agg_start);
  1036. int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
  1037. {
  1038. int tx_fifo_id, txq_id, sta_id, ssn = -1;
  1039. struct iwl_tid_data *tid_data;
  1040. int ret, write_ptr, read_ptr;
  1041. unsigned long flags;
  1042. if (!ra) {
  1043. IWL_ERR(priv, "ra = NULL\n");
  1044. return -EINVAL;
  1045. }
  1046. if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
  1047. tx_fifo_id = default_tid_to_tx_fifo[tid];
  1048. else
  1049. return -EINVAL;
  1050. sta_id = iwl_find_station(priv, ra);
  1051. if (sta_id == IWL_INVALID_STATION) {
  1052. IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
  1053. return -ENXIO;
  1054. }
  1055. if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
  1056. IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n");
  1057. tid_data = &priv->stations[sta_id].tid[tid];
  1058. ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
  1059. txq_id = tid_data->agg.txq_id;
  1060. write_ptr = priv->txq[txq_id].q.write_ptr;
  1061. read_ptr = priv->txq[txq_id].q.read_ptr;
  1062. /* The queue is not empty */
  1063. if (write_ptr != read_ptr) {
  1064. IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
  1065. priv->stations[sta_id].tid[tid].agg.state =
  1066. IWL_EMPTYING_HW_QUEUE_DELBA;
  1067. return 0;
  1068. }
  1069. IWL_DEBUG_HT(priv, "HW queue is empty\n");
  1070. priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
  1071. spin_lock_irqsave(&priv->lock, flags);
  1072. ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
  1073. tx_fifo_id);
  1074. spin_unlock_irqrestore(&priv->lock, flags);
  1075. if (ret)
  1076. return ret;
  1077. ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
  1078. return 0;
  1079. }
  1080. EXPORT_SYMBOL(iwl_tx_agg_stop);
  1081. int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
  1082. {
  1083. struct iwl_queue *q = &priv->txq[txq_id].q;
  1084. u8 *addr = priv->stations[sta_id].sta.sta.addr;
  1085. struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
  1086. switch (priv->stations[sta_id].tid[tid].agg.state) {
  1087. case IWL_EMPTYING_HW_QUEUE_DELBA:
  1088. /* We are reclaiming the last packet of the */
  1089. /* aggregated HW queue */
  1090. if ((txq_id == tid_data->agg.txq_id) &&
  1091. (q->read_ptr == q->write_ptr)) {
  1092. u16 ssn = SEQ_TO_SN(tid_data->seq_number);
  1093. int tx_fifo = default_tid_to_tx_fifo[tid];
  1094. IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
  1095. priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
  1096. ssn, tx_fifo);
  1097. tid_data->agg.state = IWL_AGG_OFF;
  1098. ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
  1099. }
  1100. break;
  1101. case IWL_EMPTYING_HW_QUEUE_ADDBA:
  1102. /* We are reclaiming the last packet of the queue */
  1103. if (tid_data->tfds_in_queue == 0) {
  1104. IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
  1105. tid_data->agg.state = IWL_AGG_ON;
  1106. ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
  1107. }
  1108. break;
  1109. }
  1110. return 0;
  1111. }
  1112. EXPORT_SYMBOL(iwl_txq_check_empty);
  1113. /**
  1114. * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
  1115. *
  1116. * Go through block-ack's bitmap of ACK'd frames, update driver's record of
  1117. * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
  1118. */
  1119. static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
  1120. struct iwl_ht_agg *agg,
  1121. struct iwl_compressed_ba_resp *ba_resp)
  1122. {
  1123. int i, sh, ack;
  1124. u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
  1125. u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
  1126. u64 bitmap;
  1127. int successes = 0;
  1128. struct ieee80211_tx_info *info;
  1129. if (unlikely(!agg->wait_for_ba)) {
  1130. IWL_ERR(priv, "Received BA when not expected\n");
  1131. return -EINVAL;
  1132. }
  1133. /* Mark that the expected block-ack response arrived */
  1134. agg->wait_for_ba = 0;
  1135. IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
  1136. /* Calculate shift to align block-ack bits with our Tx window bits */
  1137. sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
  1138. if (sh < 0) /* tbw something is wrong with indices */
  1139. sh += 0x100;
  1140. /* don't use 64-bit values for now */
  1141. bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
  1142. if (agg->frame_count > (64 - sh)) {
  1143. IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
  1144. return -1;
  1145. }
  1146. /* check for success or failure according to the
  1147. * transmitted bitmap and block-ack bitmap */
  1148. bitmap &= agg->bitmap;
  1149. /* For each frame attempted in aggregation,
  1150. * update driver's record of tx frame's status. */
  1151. for (i = 0; i < agg->frame_count ; i++) {
  1152. ack = bitmap & (1ULL << i);
  1153. successes += !!ack;
  1154. IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
  1155. ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
  1156. agg->start_idx + i);
  1157. }
  1158. info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
  1159. memset(&info->status, 0, sizeof(info->status));
  1160. info->flags = IEEE80211_TX_STAT_ACK;
  1161. info->flags |= IEEE80211_TX_STAT_AMPDU;
  1162. info->status.ampdu_ack_map = successes;
  1163. info->status.ampdu_ack_len = agg->frame_count;
  1164. iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
  1165. IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
  1166. return 0;
  1167. }
  1168. /**
  1169. * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
  1170. *
  1171. * Handles block-acknowledge notification from device, which reports success
  1172. * of frames sent via aggregation.
  1173. */
  1174. void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
  1175. struct iwl_rx_mem_buffer *rxb)
  1176. {
  1177. struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
  1178. struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
  1179. struct iwl_tx_queue *txq = NULL;
  1180. struct iwl_ht_agg *agg;
  1181. int index;
  1182. int sta_id;
  1183. int tid;
  1184. /* "flow" corresponds to Tx queue */
  1185. u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
  1186. /* "ssn" is start of block-ack Tx window, corresponds to index
  1187. * (in Tx queue's circular buffer) of first TFD/frame in window */
  1188. u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
  1189. if (scd_flow >= priv->hw_params.max_txq_num) {
  1190. IWL_ERR(priv,
  1191. "BUG_ON scd_flow is bigger than number of queues\n");
  1192. return;
  1193. }
  1194. txq = &priv->txq[scd_flow];
  1195. sta_id = ba_resp->sta_id;
  1196. tid = ba_resp->tid;
  1197. agg = &priv->stations[sta_id].tid[tid].agg;
  1198. /* Find index just before block-ack window */
  1199. index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
  1200. /* TODO: Need to get this copy more safely - now good for debug */
  1201. IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
  1202. "sta_id = %d\n",
  1203. agg->wait_for_ba,
  1204. (u8 *) &ba_resp->sta_addr_lo32,
  1205. ba_resp->sta_id);
  1206. IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
  1207. "%d, scd_ssn = %d\n",
  1208. ba_resp->tid,
  1209. ba_resp->seq_ctl,
  1210. (unsigned long long)le64_to_cpu(ba_resp->bitmap),
  1211. ba_resp->scd_flow,
  1212. ba_resp->scd_ssn);
  1213. IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n",
  1214. agg->start_idx,
  1215. (unsigned long long)agg->bitmap);
  1216. /* Update driver's record of ACK vs. not for each frame in window */
  1217. iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
  1218. /* Release all TFDs before the SSN, i.e. all TFDs in front of
  1219. * block-ack window (we assume that they've been successfully
  1220. * transmitted ... if not, it's too late anyway). */
  1221. if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
  1222. /* calculate mac80211 ampdu sw queue to wake */
  1223. int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
  1224. priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
  1225. if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
  1226. priv->mac80211_registered &&
  1227. (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
  1228. iwl_wake_queue(priv, txq->swq_id);
  1229. iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
  1230. }
  1231. }
  1232. EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
  1233. #ifdef CONFIG_IWLWIFI_DEBUG
  1234. #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
  1235. const char *iwl_get_tx_fail_reason(u32 status)
  1236. {
  1237. switch (status & TX_STATUS_MSK) {
  1238. case TX_STATUS_SUCCESS:
  1239. return "SUCCESS";
  1240. TX_STATUS_ENTRY(SHORT_LIMIT);
  1241. TX_STATUS_ENTRY(LONG_LIMIT);
  1242. TX_STATUS_ENTRY(FIFO_UNDERRUN);
  1243. TX_STATUS_ENTRY(MGMNT_ABORT);
  1244. TX_STATUS_ENTRY(NEXT_FRAG);
  1245. TX_STATUS_ENTRY(LIFE_EXPIRE);
  1246. TX_STATUS_ENTRY(DEST_PS);
  1247. TX_STATUS_ENTRY(ABORTED);
  1248. TX_STATUS_ENTRY(BT_RETRY);
  1249. TX_STATUS_ENTRY(STA_INVALID);
  1250. TX_STATUS_ENTRY(FRAG_DROPPED);
  1251. TX_STATUS_ENTRY(TID_DISABLE);
  1252. TX_STATUS_ENTRY(FRAME_FLUSHED);
  1253. TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
  1254. TX_STATUS_ENTRY(TX_LOCKED);
  1255. TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
  1256. }
  1257. return "UNKNOWN";
  1258. }
  1259. EXPORT_SYMBOL(iwl_get_tx_fail_reason);
  1260. #endif /* CONFIG_IWLWIFI_DEBUG */