iwl-tx.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
  4. *
  5. * Portions of this file are derived from the ipw3945 project, as well
  6. * as portions of the ieee80211 subsystem header files.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of version 2 of the GNU General Public License as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20. *
  21. * The full GNU General Public License is included in this distribution in the
  22. * file called LICENSE.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/etherdevice.h>
  30. #include <net/mac80211.h>
  31. #include "iwl-eeprom.h"
  32. #include "iwl-dev.h"
  33. #include "iwl-core.h"
  34. #include "iwl-sta.h"
  35. #include "iwl-io.h"
  36. #include "iwl-helpers.h"
  37. static const u16 default_tid_to_tx_fifo[] = {
  38. IWL_TX_FIFO_AC1,
  39. IWL_TX_FIFO_AC0,
  40. IWL_TX_FIFO_AC0,
  41. IWL_TX_FIFO_AC1,
  42. IWL_TX_FIFO_AC2,
  43. IWL_TX_FIFO_AC2,
  44. IWL_TX_FIFO_AC3,
  45. IWL_TX_FIFO_AC3,
  46. IWL_TX_FIFO_NONE,
  47. IWL_TX_FIFO_NONE,
  48. IWL_TX_FIFO_NONE,
  49. IWL_TX_FIFO_NONE,
  50. IWL_TX_FIFO_NONE,
  51. IWL_TX_FIFO_NONE,
  52. IWL_TX_FIFO_NONE,
  53. IWL_TX_FIFO_NONE,
  54. IWL_TX_FIFO_AC3
  55. };
  56. static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
  57. struct iwl_dma_ptr *ptr, size_t size)
  58. {
  59. ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma);
  60. if (!ptr->addr)
  61. return -ENOMEM;
  62. ptr->size = size;
  63. return 0;
  64. }
  65. static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
  66. struct iwl_dma_ptr *ptr)
  67. {
  68. if (unlikely(!ptr->addr))
  69. return;
  70. pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma);
  71. memset(ptr, 0, sizeof(*ptr));
  72. }
  73. /**
  74. * iwl_txq_update_write_ptr - Send new write index to hardware
  75. */
  76. int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
  77. {
  78. u32 reg = 0;
  79. int ret = 0;
  80. int txq_id = txq->q.id;
  81. if (txq->need_update == 0)
  82. return ret;
  83. /* if we're trying to save power */
  84. if (test_bit(STATUS_POWER_PMI, &priv->status)) {
  85. /* wake up nic if it's powered down ...
  86. * uCode will wake up, and interrupt us again, so next
  87. * time we'll skip this part. */
  88. reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
  89. if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
  90. IWL_DEBUG_INFO(priv, "Requesting wakeup, GP1 = 0x%x\n", reg);
  91. iwl_set_bit(priv, CSR_GP_CNTRL,
  92. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  93. return ret;
  94. }
  95. iwl_write_direct32(priv, HBUS_TARG_WRPTR,
  96. txq->q.write_ptr | (txq_id << 8));
  97. /* else not in power-save mode, uCode will never sleep when we're
  98. * trying to tx (during RFKILL, we're not trying to tx). */
  99. } else
  100. iwl_write32(priv, HBUS_TARG_WRPTR,
  101. txq->q.write_ptr | (txq_id << 8));
  102. txq->need_update = 0;
  103. return ret;
  104. }
  105. EXPORT_SYMBOL(iwl_txq_update_write_ptr);
  106. /**
  107. * iwl_tx_queue_free - Deallocate DMA queue.
  108. * @txq: Transmit queue to deallocate.
  109. *
  110. * Empty queue by removing and destroying all BD's.
  111. * Free all buffers.
  112. * 0-fill, but do not free "txq" descriptor structure.
  113. */
  114. void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
  115. {
  116. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  117. struct iwl_queue *q = &txq->q;
  118. struct pci_dev *dev = priv->pci_dev;
  119. int i, len;
  120. if (q->n_bd == 0)
  121. return;
  122. /* first, empty all BD's */
  123. for (; q->write_ptr != q->read_ptr;
  124. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
  125. priv->cfg->ops->lib->txq_free_tfd(priv, txq);
  126. len = sizeof(struct iwl_cmd) * q->n_window;
  127. /* De-alloc array of command/tx buffers */
  128. for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
  129. kfree(txq->cmd[i]);
  130. /* De-alloc circular buffer of TFDs */
  131. if (txq->q.n_bd)
  132. pci_free_consistent(dev, priv->hw_params.tfd_size *
  133. txq->q.n_bd, txq->tfds, txq->q.dma_addr);
  134. /* De-alloc array of per-TFD driver data */
  135. kfree(txq->txb);
  136. txq->txb = NULL;
  137. /* 0-fill queue descriptor structure */
  138. memset(txq, 0, sizeof(*txq));
  139. }
  140. EXPORT_SYMBOL(iwl_tx_queue_free);
  141. /**
  142. * iwl_cmd_queue_free - Deallocate DMA queue.
  143. * @txq: Transmit queue to deallocate.
  144. *
  145. * Empty queue by removing and destroying all BD's.
  146. * Free all buffers.
  147. * 0-fill, but do not free "txq" descriptor structure.
  148. */
  149. void iwl_cmd_queue_free(struct iwl_priv *priv)
  150. {
  151. struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
  152. struct iwl_queue *q = &txq->q;
  153. struct pci_dev *dev = priv->pci_dev;
  154. int i, len;
  155. if (q->n_bd == 0)
  156. return;
  157. len = sizeof(struct iwl_cmd) * q->n_window;
  158. len += IWL_MAX_SCAN_SIZE;
  159. /* De-alloc array of command/tx buffers */
  160. for (i = 0; i <= TFD_CMD_SLOTS; i++)
  161. kfree(txq->cmd[i]);
  162. /* De-alloc circular buffer of TFDs */
  163. if (txq->q.n_bd)
  164. pci_free_consistent(dev, priv->hw_params.tfd_size *
  165. txq->q.n_bd, txq->tfds, txq->q.dma_addr);
  166. /* 0-fill queue descriptor structure */
  167. memset(txq, 0, sizeof(*txq));
  168. }
  169. EXPORT_SYMBOL(iwl_cmd_queue_free);
  170. /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
  171. * DMA services
  172. *
  173. * Theory of operation
  174. *
  175. * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
  176. * of buffer descriptors, each of which points to one or more data buffers for
  177. * the device to read from or fill. Driver and device exchange status of each
  178. * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
  179. * entries in each circular buffer, to protect against confusing empty and full
  180. * queue states.
  181. *
  182. * The device reads or writes the data in the queues via the device's several
  183. * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
  184. *
  185. * For Tx queue, there are low mark and high mark limits. If, after queuing
  186. * the packet for Tx, free space become < low mark, Tx queue stopped. When
  187. * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
  188. * Tx queue resumed.
  189. *
  190. * See more detailed info in iwl-4965-hw.h.
  191. ***************************************************/
  192. int iwl_queue_space(const struct iwl_queue *q)
  193. {
  194. int s = q->read_ptr - q->write_ptr;
  195. if (q->read_ptr > q->write_ptr)
  196. s -= q->n_bd;
  197. if (s <= 0)
  198. s += q->n_window;
  199. /* keep some reserve to not confuse empty and full situations */
  200. s -= 2;
  201. if (s < 0)
  202. s = 0;
  203. return s;
  204. }
  205. EXPORT_SYMBOL(iwl_queue_space);
  206. /**
  207. * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
  208. */
  209. static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
  210. int count, int slots_num, u32 id)
  211. {
  212. q->n_bd = count;
  213. q->n_window = slots_num;
  214. q->id = id;
  215. /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
  216. * and iwl_queue_dec_wrap are broken. */
  217. BUG_ON(!is_power_of_2(count));
  218. /* slots_num must be power-of-two size, otherwise
  219. * get_cmd_index is broken. */
  220. BUG_ON(!is_power_of_2(slots_num));
  221. q->low_mark = q->n_window / 4;
  222. if (q->low_mark < 4)
  223. q->low_mark = 4;
  224. q->high_mark = q->n_window / 8;
  225. if (q->high_mark < 2)
  226. q->high_mark = 2;
  227. q->write_ptr = q->read_ptr = 0;
  228. return 0;
  229. }
  230. /**
  231. * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
  232. */
  233. static int iwl_tx_queue_alloc(struct iwl_priv *priv,
  234. struct iwl_tx_queue *txq, u32 id)
  235. {
  236. struct pci_dev *dev = priv->pci_dev;
  237. size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
  238. /* Driver private data, only for Tx (not command) queues,
  239. * not shared with device. */
  240. if (id != IWL_CMD_QUEUE_NUM) {
  241. txq->txb = kmalloc(sizeof(txq->txb[0]) *
  242. TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
  243. if (!txq->txb) {
  244. IWL_ERR(priv, "kmalloc for auxiliary BD "
  245. "structures failed\n");
  246. goto error;
  247. }
  248. } else {
  249. txq->txb = NULL;
  250. }
  251. /* Circular buffer of transmit frame descriptors (TFDs),
  252. * shared with device */
  253. txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr);
  254. if (!txq->tfds) {
  255. IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
  256. goto error;
  257. }
  258. txq->q.id = id;
  259. return 0;
  260. error:
  261. kfree(txq->txb);
  262. txq->txb = NULL;
  263. return -ENOMEM;
  264. }
  265. /**
  266. * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
  267. */
  268. int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
  269. int slots_num, u32 txq_id)
  270. {
  271. int i, len;
  272. int ret;
  273. /*
  274. * Alloc buffer array for commands (Tx or other types of commands).
  275. * For the command queue (#4), allocate command space + one big
  276. * command for scan, since scan command is very huge; the system will
  277. * not have two scans at the same time, so only one is needed.
  278. * For normal Tx queues (all other queues), no super-size command
  279. * space is needed.
  280. */
  281. len = sizeof(struct iwl_cmd);
  282. for (i = 0; i <= slots_num; i++) {
  283. if (i == slots_num) {
  284. if (txq_id == IWL_CMD_QUEUE_NUM)
  285. len += IWL_MAX_SCAN_SIZE;
  286. else
  287. continue;
  288. }
  289. txq->cmd[i] = kmalloc(len, GFP_KERNEL);
  290. if (!txq->cmd[i])
  291. goto err;
  292. }
  293. /* Alloc driver data array and TFD circular buffer */
  294. ret = iwl_tx_queue_alloc(priv, txq, txq_id);
  295. if (ret)
  296. goto err;
  297. txq->need_update = 0;
  298. /* aggregation TX queues will get their ID when aggregation begins */
  299. if (txq_id <= IWL_TX_FIFO_AC3)
  300. txq->swq_id = txq_id;
  301. /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
  302. * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
  303. BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
  304. /* Initialize queue's high/low-water marks, and head/tail indexes */
  305. iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
  306. /* Tell device where to find queue */
  307. priv->cfg->ops->lib->txq_init(priv, txq);
  308. return 0;
  309. err:
  310. for (i = 0; i < slots_num; i++) {
  311. kfree(txq->cmd[i]);
  312. txq->cmd[i] = NULL;
  313. }
  314. if (txq_id == IWL_CMD_QUEUE_NUM) {
  315. kfree(txq->cmd[slots_num]);
  316. txq->cmd[slots_num] = NULL;
  317. }
  318. return -ENOMEM;
  319. }
  320. EXPORT_SYMBOL(iwl_tx_queue_init);
  321. /**
  322. * iwl_hw_txq_ctx_free - Free TXQ Context
  323. *
  324. * Destroy all TX DMA queues and structures
  325. */
  326. void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
  327. {
  328. int txq_id;
  329. /* Tx queues */
  330. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
  331. if (txq_id == IWL_CMD_QUEUE_NUM)
  332. iwl_cmd_queue_free(priv);
  333. else
  334. iwl_tx_queue_free(priv, txq_id);
  335. iwl_free_dma_ptr(priv, &priv->kw);
  336. iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
  337. }
  338. EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
  339. /**
  340. * iwl_txq_ctx_reset - Reset TX queue context
  341. * Destroys all DMA structures and initialize them again
  342. *
  343. * @param priv
  344. * @return error code
  345. */
  346. int iwl_txq_ctx_reset(struct iwl_priv *priv)
  347. {
  348. int ret = 0;
  349. int txq_id, slots_num;
  350. unsigned long flags;
  351. /* Free all tx/cmd queues and keep-warm buffer */
  352. iwl_hw_txq_ctx_free(priv);
  353. ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
  354. priv->hw_params.scd_bc_tbls_size);
  355. if (ret) {
  356. IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
  357. goto error_bc_tbls;
  358. }
  359. /* Alloc keep-warm buffer */
  360. ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
  361. if (ret) {
  362. IWL_ERR(priv, "Keep Warm allocation failed\n");
  363. goto error_kw;
  364. }
  365. spin_lock_irqsave(&priv->lock, flags);
  366. /* Turn off all Tx DMA fifos */
  367. priv->cfg->ops->lib->txq_set_sched(priv, 0);
  368. /* Tell NIC where to find the "keep warm" buffer */
  369. iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
  370. spin_unlock_irqrestore(&priv->lock, flags);
  371. /* Alloc and init all Tx queues, including the command queue (#4) */
  372. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
  373. slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
  374. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  375. ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
  376. txq_id);
  377. if (ret) {
  378. IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
  379. goto error;
  380. }
  381. }
  382. return ret;
  383. error:
  384. iwl_hw_txq_ctx_free(priv);
  385. iwl_free_dma_ptr(priv, &priv->kw);
  386. error_kw:
  387. iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
  388. error_bc_tbls:
  389. return ret;
  390. }
  391. /**
  392. * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
  393. */
  394. void iwl_txq_ctx_stop(struct iwl_priv *priv)
  395. {
  396. int ch;
  397. unsigned long flags;
  398. /* Turn off all Tx DMA fifos */
  399. spin_lock_irqsave(&priv->lock, flags);
  400. priv->cfg->ops->lib->txq_set_sched(priv, 0);
  401. /* Stop each Tx DMA channel, and wait for it to be idle */
  402. for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
  403. iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
  404. iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
  405. FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
  406. 1000);
  407. }
  408. spin_unlock_irqrestore(&priv->lock, flags);
  409. /* Deallocate memory for all Tx queues */
  410. iwl_hw_txq_ctx_free(priv);
  411. }
  412. EXPORT_SYMBOL(iwl_txq_ctx_stop);
  413. /*
  414. * handle build REPLY_TX command notification.
  415. */
  416. static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
  417. struct iwl_tx_cmd *tx_cmd,
  418. struct ieee80211_tx_info *info,
  419. struct ieee80211_hdr *hdr,
  420. u8 std_id)
  421. {
  422. __le16 fc = hdr->frame_control;
  423. __le32 tx_flags = tx_cmd->tx_flags;
  424. tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
  425. if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
  426. tx_flags |= TX_CMD_FLG_ACK_MSK;
  427. if (ieee80211_is_mgmt(fc))
  428. tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
  429. if (ieee80211_is_probe_resp(fc) &&
  430. !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
  431. tx_flags |= TX_CMD_FLG_TSF_MSK;
  432. } else {
  433. tx_flags &= (~TX_CMD_FLG_ACK_MSK);
  434. tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
  435. }
  436. if (ieee80211_is_back_req(fc))
  437. tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
  438. tx_cmd->sta_id = std_id;
  439. if (ieee80211_has_morefrags(fc))
  440. tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
  441. if (ieee80211_is_data_qos(fc)) {
  442. u8 *qc = ieee80211_get_qos_ctl(hdr);
  443. tx_cmd->tid_tspec = qc[0] & 0xf;
  444. tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
  445. } else {
  446. tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
  447. }
  448. priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
  449. if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
  450. tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
  451. tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
  452. if (ieee80211_is_mgmt(fc)) {
  453. if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
  454. tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
  455. else
  456. tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
  457. } else {
  458. tx_cmd->timeout.pm_frame_timeout = 0;
  459. }
  460. tx_cmd->driver_txop = 0;
  461. tx_cmd->tx_flags = tx_flags;
  462. tx_cmd->next_frame_len = 0;
  463. }
  464. #define RTS_HCCA_RETRY_LIMIT 3
  465. #define RTS_DFAULT_RETRY_LIMIT 60
  466. static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
  467. struct iwl_tx_cmd *tx_cmd,
  468. struct ieee80211_tx_info *info,
  469. __le16 fc, int sta_id,
  470. int is_hcca)
  471. {
  472. u32 rate_flags = 0;
  473. int rate_idx;
  474. u8 rts_retry_limit = 0;
  475. u8 data_retry_limit = 0;
  476. u8 rate_plcp;
  477. rate_idx = min(ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xffff,
  478. IWL_RATE_COUNT - 1);
  479. rate_plcp = iwl_rates[rate_idx].plcp;
  480. rts_retry_limit = (is_hcca) ?
  481. RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
  482. if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
  483. rate_flags |= RATE_MCS_CCK_MSK;
  484. if (ieee80211_is_probe_resp(fc)) {
  485. data_retry_limit = 3;
  486. if (data_retry_limit < rts_retry_limit)
  487. rts_retry_limit = data_retry_limit;
  488. } else
  489. data_retry_limit = IWL_DEFAULT_TX_RETRY;
  490. if (priv->data_retry_limit != -1)
  491. data_retry_limit = priv->data_retry_limit;
  492. if (ieee80211_is_data(fc)) {
  493. tx_cmd->initial_rate_index = 0;
  494. tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
  495. } else {
  496. switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
  497. case cpu_to_le16(IEEE80211_STYPE_AUTH):
  498. case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
  499. case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
  500. case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
  501. if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
  502. tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
  503. tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
  504. }
  505. break;
  506. default:
  507. break;
  508. }
  509. priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
  510. rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
  511. }
  512. tx_cmd->rts_retry_limit = rts_retry_limit;
  513. tx_cmd->data_retry_limit = data_retry_limit;
  514. tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
  515. }
  516. static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
  517. struct ieee80211_tx_info *info,
  518. struct iwl_tx_cmd *tx_cmd,
  519. struct sk_buff *skb_frag,
  520. int sta_id)
  521. {
  522. struct ieee80211_key_conf *keyconf = info->control.hw_key;
  523. switch (keyconf->alg) {
  524. case ALG_CCMP:
  525. tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
  526. memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
  527. if (info->flags & IEEE80211_TX_CTL_AMPDU)
  528. tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
  529. IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
  530. break;
  531. case ALG_TKIP:
  532. tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
  533. ieee80211_get_tkip_key(keyconf, skb_frag,
  534. IEEE80211_TKIP_P2_KEY, tx_cmd->key);
  535. IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
  536. break;
  537. case ALG_WEP:
  538. tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
  539. (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
  540. if (keyconf->keylen == WEP_KEY_LEN_128)
  541. tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
  542. memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
  543. IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
  544. "with key %d\n", keyconf->keyidx);
  545. break;
  546. default:
  547. IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
  548. break;
  549. }
  550. }
  551. static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len)
  552. {
  553. /* 0 - mgmt, 1 - cnt, 2 - data */
  554. int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
  555. priv->tx_stats[idx].cnt++;
  556. priv->tx_stats[idx].bytes += len;
  557. }
  558. /*
  559. * start REPLY_TX command process
  560. */
  561. int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
  562. {
  563. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  564. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  565. struct iwl_tx_queue *txq;
  566. struct iwl_queue *q;
  567. struct iwl_cmd *out_cmd;
  568. struct iwl_tx_cmd *tx_cmd;
  569. int swq_id, txq_id;
  570. dma_addr_t phys_addr;
  571. dma_addr_t txcmd_phys;
  572. dma_addr_t scratch_phys;
  573. u16 len, len_org;
  574. u16 seq_number = 0;
  575. __le16 fc;
  576. u8 hdr_len;
  577. u8 sta_id;
  578. u8 wait_write_ptr = 0;
  579. u8 tid = 0;
  580. u8 *qc = NULL;
  581. unsigned long flags;
  582. int ret;
  583. spin_lock_irqsave(&priv->lock, flags);
  584. if (iwl_is_rfkill(priv)) {
  585. IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
  586. goto drop_unlock;
  587. }
  588. if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) ==
  589. IWL_INVALID_RATE) {
  590. IWL_ERR(priv, "ERROR: No TX rate available.\n");
  591. goto drop_unlock;
  592. }
  593. fc = hdr->frame_control;
  594. #ifdef CONFIG_IWLWIFI_DEBUG
  595. if (ieee80211_is_auth(fc))
  596. IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
  597. else if (ieee80211_is_assoc_req(fc))
  598. IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
  599. else if (ieee80211_is_reassoc_req(fc))
  600. IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
  601. #endif
  602. /* drop all data frame if we are not associated */
  603. if (ieee80211_is_data(fc) &&
  604. (!iwl_is_monitor_mode(priv) ||
  605. !(info->flags & IEEE80211_TX_CTL_INJECTED)) && /* packet injection */
  606. (!iwl_is_associated(priv) ||
  607. ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
  608. !priv->assoc_station_added)) {
  609. IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
  610. goto drop_unlock;
  611. }
  612. hdr_len = ieee80211_hdrlen(fc);
  613. /* Find (or create) index into station table for destination station */
  614. sta_id = iwl_get_sta_id(priv, hdr);
  615. if (sta_id == IWL_INVALID_STATION) {
  616. IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
  617. hdr->addr1);
  618. goto drop_unlock;
  619. }
  620. IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
  621. txq_id = skb_get_queue_mapping(skb);
  622. if (ieee80211_is_data_qos(fc)) {
  623. qc = ieee80211_get_qos_ctl(hdr);
  624. tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
  625. seq_number = priv->stations[sta_id].tid[tid].seq_number;
  626. seq_number &= IEEE80211_SCTL_SEQ;
  627. hdr->seq_ctrl = hdr->seq_ctrl &
  628. cpu_to_le16(IEEE80211_SCTL_FRAG);
  629. hdr->seq_ctrl |= cpu_to_le16(seq_number);
  630. seq_number += 0x10;
  631. /* aggregation is on for this <sta,tid> */
  632. if (info->flags & IEEE80211_TX_CTL_AMPDU)
  633. txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
  634. }
  635. txq = &priv->txq[txq_id];
  636. swq_id = txq->swq_id;
  637. q = &txq->q;
  638. if (unlikely(iwl_queue_space(q) < q->high_mark))
  639. goto drop_unlock;
  640. if (ieee80211_is_data_qos(fc))
  641. priv->stations[sta_id].tid[tid].tfds_in_queue++;
  642. /* Set up driver data for this TFD */
  643. memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
  644. txq->txb[q->write_ptr].skb[0] = skb;
  645. /* Set up first empty entry in queue's array of Tx/cmd buffers */
  646. out_cmd = txq->cmd[q->write_ptr];
  647. tx_cmd = &out_cmd->cmd.tx;
  648. memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
  649. memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
  650. /*
  651. * Set up the Tx-command (not MAC!) header.
  652. * Store the chosen Tx queue and TFD index within the sequence field;
  653. * after Tx, uCode's Tx response will return this value so driver can
  654. * locate the frame within the tx queue and do post-tx processing.
  655. */
  656. out_cmd->hdr.cmd = REPLY_TX;
  657. out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
  658. INDEX_TO_SEQ(q->write_ptr)));
  659. /* Copy MAC header from skb into command buffer */
  660. memcpy(tx_cmd->hdr, hdr, hdr_len);
  661. /* Total # bytes to be transmitted */
  662. len = (u16)skb->len;
  663. tx_cmd->len = cpu_to_le16(len);
  664. if (info->control.hw_key)
  665. iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
  666. /* TODO need this for burst mode later on */
  667. iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
  668. /* set is_hcca to 0; it probably will never be implemented */
  669. iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
  670. iwl_update_tx_stats(priv, le16_to_cpu(fc), len);
  671. /*
  672. * Use the first empty entry in this queue's command buffer array
  673. * to contain the Tx command and MAC header concatenated together
  674. * (payload data will be in another buffer).
  675. * Size of this varies, due to varying MAC header length.
  676. * If end is not dword aligned, we'll have 2 extra bytes at the end
  677. * of the MAC header (device reads on dword boundaries).
  678. * We'll tell device about this padding later.
  679. */
  680. len = sizeof(struct iwl_tx_cmd) +
  681. sizeof(struct iwl_cmd_header) + hdr_len;
  682. len_org = len;
  683. len = (len + 3) & ~3;
  684. if (len_org != len)
  685. len_org = 1;
  686. else
  687. len_org = 0;
  688. /* Tell NIC about any 2-byte padding after MAC header */
  689. if (len_org)
  690. tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
  691. /* Physical address of this Tx command's header (not MAC header!),
  692. * within command buffer array. */
  693. txcmd_phys = pci_map_single(priv->pci_dev,
  694. &out_cmd->hdr, len,
  695. PCI_DMA_BIDIRECTIONAL);
  696. pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
  697. pci_unmap_len_set(&out_cmd->meta, len, len);
  698. /* Add buffer containing Tx command and MAC(!) header to TFD's
  699. * first entry */
  700. priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
  701. txcmd_phys, len, 1, 0);
  702. if (!ieee80211_has_morefrags(hdr->frame_control)) {
  703. txq->need_update = 1;
  704. if (qc)
  705. priv->stations[sta_id].tid[tid].seq_number = seq_number;
  706. } else {
  707. wait_write_ptr = 1;
  708. txq->need_update = 0;
  709. }
  710. /* Set up TFD's 2nd entry to point directly to remainder of skb,
  711. * if any (802.11 null frames have no payload). */
  712. len = skb->len - hdr_len;
  713. if (len) {
  714. phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
  715. len, PCI_DMA_TODEVICE);
  716. priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
  717. phys_addr, len,
  718. 0, 0);
  719. }
  720. scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
  721. offsetof(struct iwl_tx_cmd, scratch);
  722. len = sizeof(struct iwl_tx_cmd) +
  723. sizeof(struct iwl_cmd_header) + hdr_len;
  724. /* take back ownership of DMA buffer to enable update */
  725. pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
  726. len, PCI_DMA_BIDIRECTIONAL);
  727. tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
  728. tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
  729. IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
  730. le16_to_cpu(out_cmd->hdr.sequence));
  731. IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
  732. iwl_print_hex_dump(IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
  733. iwl_print_hex_dump(IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
  734. /* Set up entry for this TFD in Tx byte-count array */
  735. if (info->flags & IEEE80211_TX_CTL_AMPDU)
  736. priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
  737. le16_to_cpu(tx_cmd->len));
  738. pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
  739. len, PCI_DMA_BIDIRECTIONAL);
  740. /* Tell device the write index *just past* this latest filled TFD */
  741. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  742. ret = iwl_txq_update_write_ptr(priv, txq);
  743. spin_unlock_irqrestore(&priv->lock, flags);
  744. if (ret)
  745. return ret;
  746. if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
  747. if (wait_write_ptr) {
  748. spin_lock_irqsave(&priv->lock, flags);
  749. txq->need_update = 1;
  750. iwl_txq_update_write_ptr(priv, txq);
  751. spin_unlock_irqrestore(&priv->lock, flags);
  752. } else {
  753. iwl_stop_queue(priv, txq->swq_id);
  754. }
  755. }
  756. return 0;
  757. drop_unlock:
  758. spin_unlock_irqrestore(&priv->lock, flags);
  759. return -1;
  760. }
  761. EXPORT_SYMBOL(iwl_tx_skb);
  762. /*************** HOST COMMAND QUEUE FUNCTIONS *****/
  763. /**
  764. * iwl_enqueue_hcmd - enqueue a uCode command
  765. * @priv: device private data point
  766. * @cmd: a point to the ucode command structure
  767. *
  768. * The function returns < 0 values to indicate the operation is
  769. * failed. On success, it turns the index (> 0) of command in the
  770. * command queue.
  771. */
  772. int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
  773. {
  774. struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
  775. struct iwl_queue *q = &txq->q;
  776. struct iwl_cmd *out_cmd;
  777. dma_addr_t phys_addr;
  778. unsigned long flags;
  779. int len, ret;
  780. u32 idx;
  781. u16 fix_size;
  782. cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
  783. fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
  784. /* If any of the command structures end up being larger than
  785. * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
  786. * we will need to increase the size of the TFD entries */
  787. BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
  788. !(cmd->meta.flags & CMD_SIZE_HUGE));
  789. if (iwl_is_rfkill(priv)) {
  790. IWL_DEBUG_INFO(priv, "Not sending command - RF KILL\n");
  791. return -EIO;
  792. }
  793. if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
  794. IWL_ERR(priv, "No space for Tx\n");
  795. return -ENOSPC;
  796. }
  797. spin_lock_irqsave(&priv->hcmd_lock, flags);
  798. idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
  799. out_cmd = txq->cmd[idx];
  800. out_cmd->hdr.cmd = cmd->id;
  801. memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
  802. memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
  803. /* At this point, the out_cmd now has all of the incoming cmd
  804. * information */
  805. out_cmd->hdr.flags = 0;
  806. out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
  807. INDEX_TO_SEQ(q->write_ptr));
  808. if (out_cmd->meta.flags & CMD_SIZE_HUGE)
  809. out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
  810. len = sizeof(struct iwl_cmd) - sizeof(struct iwl_cmd_meta);
  811. len += (idx == TFD_CMD_SLOTS) ? IWL_MAX_SCAN_SIZE : 0;
  812. #ifdef CONFIG_IWLWIFI_DEBUG
  813. switch (out_cmd->hdr.cmd) {
  814. case REPLY_TX_LINK_QUALITY_CMD:
  815. case SENSITIVITY_CMD:
  816. IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, "
  817. "%d bytes at %d[%d]:%d\n",
  818. get_cmd_string(out_cmd->hdr.cmd),
  819. out_cmd->hdr.cmd,
  820. le16_to_cpu(out_cmd->hdr.sequence), fix_size,
  821. q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
  822. break;
  823. default:
  824. IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
  825. "%d bytes at %d[%d]:%d\n",
  826. get_cmd_string(out_cmd->hdr.cmd),
  827. out_cmd->hdr.cmd,
  828. le16_to_cpu(out_cmd->hdr.sequence), fix_size,
  829. q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
  830. }
  831. #endif
  832. txq->need_update = 1;
  833. if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
  834. /* Set up entry in queue's byte count circular buffer */
  835. priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
  836. phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
  837. fix_size, PCI_DMA_BIDIRECTIONAL);
  838. pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
  839. pci_unmap_len_set(&out_cmd->meta, len, fix_size);
  840. priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
  841. phys_addr, fix_size, 1,
  842. U32_PAD(cmd->len));
  843. /* Increment and update queue's write index */
  844. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  845. ret = iwl_txq_update_write_ptr(priv, txq);
  846. spin_unlock_irqrestore(&priv->hcmd_lock, flags);
  847. return ret ? ret : idx;
  848. }
  849. int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
  850. {
  851. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  852. struct iwl_queue *q = &txq->q;
  853. struct iwl_tx_info *tx_info;
  854. int nfreed = 0;
  855. if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
  856. IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
  857. "is out of range [0-%d] %d %d.\n", txq_id,
  858. index, q->n_bd, q->write_ptr, q->read_ptr);
  859. return 0;
  860. }
  861. for (index = iwl_queue_inc_wrap(index, q->n_bd);
  862. q->read_ptr != index;
  863. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
  864. tx_info = &txq->txb[txq->q.read_ptr];
  865. ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
  866. tx_info->skb[0] = NULL;
  867. if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
  868. priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
  869. priv->cfg->ops->lib->txq_free_tfd(priv, txq);
  870. nfreed++;
  871. }
  872. return nfreed;
  873. }
  874. EXPORT_SYMBOL(iwl_tx_queue_reclaim);
  875. /**
  876. * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
  877. *
  878. * When FW advances 'R' index, all entries between old and new 'R' index
  879. * need to be reclaimed. As result, some free space forms. If there is
  880. * enough free space (> low mark), wake the stack that feeds us.
  881. */
  882. static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
  883. int idx, int cmd_idx)
  884. {
  885. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  886. struct iwl_queue *q = &txq->q;
  887. int nfreed = 0;
  888. if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
  889. IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
  890. "is out of range [0-%d] %d %d.\n", txq_id,
  891. idx, q->n_bd, q->write_ptr, q->read_ptr);
  892. return;
  893. }
  894. pci_unmap_single(priv->pci_dev,
  895. pci_unmap_addr(&txq->cmd[cmd_idx]->meta, mapping),
  896. pci_unmap_len(&txq->cmd[cmd_idx]->meta, len),
  897. PCI_DMA_BIDIRECTIONAL);
  898. for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
  899. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
  900. if (nfreed++ > 0) {
  901. IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
  902. q->write_ptr, q->read_ptr);
  903. queue_work(priv->workqueue, &priv->restart);
  904. }
  905. }
  906. }
  907. /**
  908. * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
  909. * @rxb: Rx buffer to reclaim
  910. *
  911. * If an Rx buffer has an async callback associated with it the callback
  912. * will be executed. The attached skb (if present) will only be freed
  913. * if the callback returns 1
  914. */
  915. void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
  916. {
  917. struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
  918. u16 sequence = le16_to_cpu(pkt->hdr.sequence);
  919. int txq_id = SEQ_TO_QUEUE(sequence);
  920. int index = SEQ_TO_INDEX(sequence);
  921. int cmd_index;
  922. bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
  923. struct iwl_cmd *cmd;
  924. /* If a Tx command is being handled and it isn't in the actual
  925. * command queue then there a command routing bug has been introduced
  926. * in the queue management code. */
  927. if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
  928. "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
  929. txq_id, sequence,
  930. priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
  931. priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
  932. iwl_print_hex_error(priv, rxb, 32);
  933. return;
  934. }
  935. cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
  936. cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
  937. /* Input error checking is done when commands are added to queue. */
  938. if (cmd->meta.flags & CMD_WANT_SKB) {
  939. cmd->meta.source->u.skb = rxb->skb;
  940. rxb->skb = NULL;
  941. } else if (cmd->meta.u.callback &&
  942. !cmd->meta.u.callback(priv, cmd, rxb->skb))
  943. rxb->skb = NULL;
  944. iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
  945. if (!(cmd->meta.flags & CMD_ASYNC)) {
  946. clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
  947. wake_up_interruptible(&priv->wait_command_queue);
  948. }
  949. }
  950. EXPORT_SYMBOL(iwl_tx_cmd_complete);
  951. /*
  952. * Find first available (lowest unused) Tx Queue, mark it "active".
  953. * Called only when finding queue for aggregation.
  954. * Should never return anything < 7, because they should already
  955. * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
  956. */
  957. static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
  958. {
  959. int txq_id;
  960. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
  961. if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
  962. return txq_id;
  963. return -1;
  964. }
  965. int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
  966. {
  967. int sta_id;
  968. int tx_fifo;
  969. int txq_id;
  970. int ret;
  971. unsigned long flags;
  972. struct iwl_tid_data *tid_data;
  973. if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
  974. tx_fifo = default_tid_to_tx_fifo[tid];
  975. else
  976. return -EINVAL;
  977. IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
  978. __func__, ra, tid);
  979. sta_id = iwl_find_station(priv, ra);
  980. if (sta_id == IWL_INVALID_STATION) {
  981. IWL_ERR(priv, "Start AGG on invalid station\n");
  982. return -ENXIO;
  983. }
  984. if (unlikely(tid >= MAX_TID_COUNT))
  985. return -EINVAL;
  986. if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
  987. IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
  988. return -ENXIO;
  989. }
  990. txq_id = iwl_txq_ctx_activate_free(priv);
  991. if (txq_id == -1) {
  992. IWL_ERR(priv, "No free aggregation queue available\n");
  993. return -ENXIO;
  994. }
  995. spin_lock_irqsave(&priv->sta_lock, flags);
  996. tid_data = &priv->stations[sta_id].tid[tid];
  997. *ssn = SEQ_TO_SN(tid_data->seq_number);
  998. tid_data->agg.txq_id = txq_id;
  999. priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id);
  1000. spin_unlock_irqrestore(&priv->sta_lock, flags);
  1001. ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
  1002. sta_id, tid, *ssn);
  1003. if (ret)
  1004. return ret;
  1005. if (tid_data->tfds_in_queue == 0) {
  1006. IWL_DEBUG_HT(priv, "HW queue is empty\n");
  1007. tid_data->agg.state = IWL_AGG_ON;
  1008. ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid);
  1009. } else {
  1010. IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
  1011. tid_data->tfds_in_queue);
  1012. tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
  1013. }
  1014. return ret;
  1015. }
  1016. EXPORT_SYMBOL(iwl_tx_agg_start);
  1017. int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
  1018. {
  1019. int tx_fifo_id, txq_id, sta_id, ssn = -1;
  1020. struct iwl_tid_data *tid_data;
  1021. int ret, write_ptr, read_ptr;
  1022. unsigned long flags;
  1023. if (!ra) {
  1024. IWL_ERR(priv, "ra = NULL\n");
  1025. return -EINVAL;
  1026. }
  1027. if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
  1028. tx_fifo_id = default_tid_to_tx_fifo[tid];
  1029. else
  1030. return -EINVAL;
  1031. sta_id = iwl_find_station(priv, ra);
  1032. if (sta_id == IWL_INVALID_STATION) {
  1033. IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
  1034. return -ENXIO;
  1035. }
  1036. if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
  1037. IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n");
  1038. tid_data = &priv->stations[sta_id].tid[tid];
  1039. ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
  1040. txq_id = tid_data->agg.txq_id;
  1041. write_ptr = priv->txq[txq_id].q.write_ptr;
  1042. read_ptr = priv->txq[txq_id].q.read_ptr;
  1043. /* The queue is not empty */
  1044. if (write_ptr != read_ptr) {
  1045. IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
  1046. priv->stations[sta_id].tid[tid].agg.state =
  1047. IWL_EMPTYING_HW_QUEUE_DELBA;
  1048. return 0;
  1049. }
  1050. IWL_DEBUG_HT(priv, "HW queue is empty\n");
  1051. priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
  1052. spin_lock_irqsave(&priv->lock, flags);
  1053. ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
  1054. tx_fifo_id);
  1055. spin_unlock_irqrestore(&priv->lock, flags);
  1056. if (ret)
  1057. return ret;
  1058. ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
  1059. return 0;
  1060. }
  1061. EXPORT_SYMBOL(iwl_tx_agg_stop);
  1062. int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
  1063. {
  1064. struct iwl_queue *q = &priv->txq[txq_id].q;
  1065. u8 *addr = priv->stations[sta_id].sta.sta.addr;
  1066. struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
  1067. switch (priv->stations[sta_id].tid[tid].agg.state) {
  1068. case IWL_EMPTYING_HW_QUEUE_DELBA:
  1069. /* We are reclaiming the last packet of the */
  1070. /* aggregated HW queue */
  1071. if ((txq_id == tid_data->agg.txq_id) &&
  1072. (q->read_ptr == q->write_ptr)) {
  1073. u16 ssn = SEQ_TO_SN(tid_data->seq_number);
  1074. int tx_fifo = default_tid_to_tx_fifo[tid];
  1075. IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
  1076. priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
  1077. ssn, tx_fifo);
  1078. tid_data->agg.state = IWL_AGG_OFF;
  1079. ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
  1080. }
  1081. break;
  1082. case IWL_EMPTYING_HW_QUEUE_ADDBA:
  1083. /* We are reclaiming the last packet of the queue */
  1084. if (tid_data->tfds_in_queue == 0) {
  1085. IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
  1086. tid_data->agg.state = IWL_AGG_ON;
  1087. ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
  1088. }
  1089. break;
  1090. }
  1091. return 0;
  1092. }
  1093. EXPORT_SYMBOL(iwl_txq_check_empty);
  1094. /**
  1095. * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
  1096. *
  1097. * Go through block-ack's bitmap of ACK'd frames, update driver's record of
  1098. * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
  1099. */
  1100. static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
  1101. struct iwl_ht_agg *agg,
  1102. struct iwl_compressed_ba_resp *ba_resp)
  1103. {
  1104. int i, sh, ack;
  1105. u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
  1106. u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
  1107. u64 bitmap;
  1108. int successes = 0;
  1109. struct ieee80211_tx_info *info;
  1110. if (unlikely(!agg->wait_for_ba)) {
  1111. IWL_ERR(priv, "Received BA when not expected\n");
  1112. return -EINVAL;
  1113. }
  1114. /* Mark that the expected block-ack response arrived */
  1115. agg->wait_for_ba = 0;
  1116. IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
  1117. /* Calculate shift to align block-ack bits with our Tx window bits */
  1118. sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
  1119. if (sh < 0) /* tbw something is wrong with indices */
  1120. sh += 0x100;
  1121. /* don't use 64-bit values for now */
  1122. bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
  1123. if (agg->frame_count > (64 - sh)) {
  1124. IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
  1125. return -1;
  1126. }
  1127. /* check for success or failure according to the
  1128. * transmitted bitmap and block-ack bitmap */
  1129. bitmap &= agg->bitmap;
  1130. /* For each frame attempted in aggregation,
  1131. * update driver's record of tx frame's status. */
  1132. for (i = 0; i < agg->frame_count ; i++) {
  1133. ack = bitmap & (1ULL << i);
  1134. successes += !!ack;
  1135. IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
  1136. ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
  1137. agg->start_idx + i);
  1138. }
  1139. info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
  1140. memset(&info->status, 0, sizeof(info->status));
  1141. info->flags = IEEE80211_TX_STAT_ACK;
  1142. info->flags |= IEEE80211_TX_STAT_AMPDU;
  1143. info->status.ampdu_ack_map = successes;
  1144. info->status.ampdu_ack_len = agg->frame_count;
  1145. iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
  1146. IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
  1147. return 0;
  1148. }
  1149. /**
  1150. * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
  1151. *
  1152. * Handles block-acknowledge notification from device, which reports success
  1153. * of frames sent via aggregation.
  1154. */
  1155. void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
  1156. struct iwl_rx_mem_buffer *rxb)
  1157. {
  1158. struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
  1159. struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
  1160. struct iwl_tx_queue *txq = NULL;
  1161. struct iwl_ht_agg *agg;
  1162. int index;
  1163. int sta_id;
  1164. int tid;
  1165. /* "flow" corresponds to Tx queue */
  1166. u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
  1167. /* "ssn" is start of block-ack Tx window, corresponds to index
  1168. * (in Tx queue's circular buffer) of first TFD/frame in window */
  1169. u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
  1170. if (scd_flow >= priv->hw_params.max_txq_num) {
  1171. IWL_ERR(priv,
  1172. "BUG_ON scd_flow is bigger than number of queues\n");
  1173. return;
  1174. }
  1175. txq = &priv->txq[scd_flow];
  1176. sta_id = ba_resp->sta_id;
  1177. tid = ba_resp->tid;
  1178. agg = &priv->stations[sta_id].tid[tid].agg;
  1179. /* Find index just before block-ack window */
  1180. index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
  1181. /* TODO: Need to get this copy more safely - now good for debug */
  1182. IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
  1183. "sta_id = %d\n",
  1184. agg->wait_for_ba,
  1185. (u8 *) &ba_resp->sta_addr_lo32,
  1186. ba_resp->sta_id);
  1187. IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
  1188. "%d, scd_ssn = %d\n",
  1189. ba_resp->tid,
  1190. ba_resp->seq_ctl,
  1191. (unsigned long long)le64_to_cpu(ba_resp->bitmap),
  1192. ba_resp->scd_flow,
  1193. ba_resp->scd_ssn);
  1194. IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n",
  1195. agg->start_idx,
  1196. (unsigned long long)agg->bitmap);
  1197. /* Update driver's record of ACK vs. not for each frame in window */
  1198. iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
  1199. /* Release all TFDs before the SSN, i.e. all TFDs in front of
  1200. * block-ack window (we assume that they've been successfully
  1201. * transmitted ... if not, it's too late anyway). */
  1202. if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
  1203. /* calculate mac80211 ampdu sw queue to wake */
  1204. int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
  1205. priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
  1206. if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
  1207. priv->mac80211_registered &&
  1208. (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
  1209. iwl_wake_queue(priv, txq->swq_id);
  1210. iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
  1211. }
  1212. }
  1213. EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
  1214. #ifdef CONFIG_IWLWIFI_DEBUG
  1215. #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
  1216. const char *iwl_get_tx_fail_reason(u32 status)
  1217. {
  1218. switch (status & TX_STATUS_MSK) {
  1219. case TX_STATUS_SUCCESS:
  1220. return "SUCCESS";
  1221. TX_STATUS_ENTRY(SHORT_LIMIT);
  1222. TX_STATUS_ENTRY(LONG_LIMIT);
  1223. TX_STATUS_ENTRY(FIFO_UNDERRUN);
  1224. TX_STATUS_ENTRY(MGMNT_ABORT);
  1225. TX_STATUS_ENTRY(NEXT_FRAG);
  1226. TX_STATUS_ENTRY(LIFE_EXPIRE);
  1227. TX_STATUS_ENTRY(DEST_PS);
  1228. TX_STATUS_ENTRY(ABORTED);
  1229. TX_STATUS_ENTRY(BT_RETRY);
  1230. TX_STATUS_ENTRY(STA_INVALID);
  1231. TX_STATUS_ENTRY(FRAG_DROPPED);
  1232. TX_STATUS_ENTRY(TID_DISABLE);
  1233. TX_STATUS_ENTRY(FRAME_FLUSHED);
  1234. TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
  1235. TX_STATUS_ENTRY(TX_LOCKED);
  1236. TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
  1237. }
  1238. return "UNKNOWN";
  1239. }
  1240. EXPORT_SYMBOL(iwl_get_tx_fail_reason);
  1241. #endif /* CONFIG_IWLWIFI_DEBUG */