iwl-tx.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
  4. *
  5. * Portions of this file are derived from the ipw3945 project, as well
  6. * as portions of the ieee80211 subsystem header files.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of version 2 of the GNU General Public License as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20. *
  21. * The full GNU General Public License is included in this distribution in the
  22. * file called LICENSE.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/etherdevice.h>
  30. #include <linux/sched.h>
  31. #include <linux/slab.h>
  32. #include <net/mac80211.h>
  33. #include "iwl-eeprom.h"
  34. #include "iwl-agn.h"
  35. #include "iwl-dev.h"
  36. #include "iwl-core.h"
  37. #include "iwl-sta.h"
  38. #include "iwl-io.h"
  39. #include "iwl-helpers.h"
  40. /**
  41. * iwl_txq_update_write_ptr - Send new write index to hardware
  42. */
  43. void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
  44. {
  45. u32 reg = 0;
  46. int txq_id = txq->q.id;
  47. if (txq->need_update == 0)
  48. return;
  49. if (priv->cfg->base_params->shadow_reg_enable) {
  50. /* shadow register enabled */
  51. iwl_write32(priv, HBUS_TARG_WRPTR,
  52. txq->q.write_ptr | (txq_id << 8));
  53. } else {
  54. /* if we're trying to save power */
  55. if (test_bit(STATUS_POWER_PMI, &priv->status)) {
  56. /* wake up nic if it's powered down ...
  57. * uCode will wake up, and interrupt us again, so next
  58. * time we'll skip this part. */
  59. reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
  60. if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
  61. IWL_DEBUG_INFO(priv,
  62. "Tx queue %d requesting wakeup,"
  63. " GP1 = 0x%x\n", txq_id, reg);
  64. iwl_set_bit(priv, CSR_GP_CNTRL,
  65. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  66. return;
  67. }
  68. iwl_write_direct32(priv, HBUS_TARG_WRPTR,
  69. txq->q.write_ptr | (txq_id << 8));
  70. /*
  71. * else not in power-save mode,
  72. * uCode will never sleep when we're
  73. * trying to tx (during RFKILL, we're not trying to tx).
  74. */
  75. } else
  76. iwl_write32(priv, HBUS_TARG_WRPTR,
  77. txq->q.write_ptr | (txq_id << 8));
  78. }
  79. txq->need_update = 0;
  80. }
  81. static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
  82. {
  83. struct iwl_tfd_tb *tb = &tfd->tbs[idx];
  84. dma_addr_t addr = get_unaligned_le32(&tb->lo);
  85. if (sizeof(dma_addr_t) > sizeof(u32))
  86. addr |=
  87. ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
  88. return addr;
  89. }
  90. static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
  91. {
  92. struct iwl_tfd_tb *tb = &tfd->tbs[idx];
  93. return le16_to_cpu(tb->hi_n_len) >> 4;
  94. }
  95. static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
  96. dma_addr_t addr, u16 len)
  97. {
  98. struct iwl_tfd_tb *tb = &tfd->tbs[idx];
  99. u16 hi_n_len = len << 4;
  100. put_unaligned_le32(addr, &tb->lo);
  101. if (sizeof(dma_addr_t) > sizeof(u32))
  102. hi_n_len |= ((addr >> 16) >> 16) & 0xF;
  103. tb->hi_n_len = cpu_to_le16(hi_n_len);
  104. tfd->num_tbs = idx + 1;
  105. }
  106. static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
  107. {
  108. return tfd->num_tbs & 0x1f;
  109. }
  110. static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
  111. struct iwl_tfd *tfd)
  112. {
  113. struct pci_dev *dev = priv->pci_dev;
  114. int i;
  115. int num_tbs;
  116. /* Sanity check on number of chunks */
  117. num_tbs = iwl_tfd_get_num_tbs(tfd);
  118. if (num_tbs >= IWL_NUM_OF_TBS) {
  119. IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
  120. /* @todo issue fatal error, it is quite serious situation */
  121. return;
  122. }
  123. /* Unmap tx_cmd */
  124. if (num_tbs)
  125. pci_unmap_single(dev,
  126. dma_unmap_addr(meta, mapping),
  127. dma_unmap_len(meta, len),
  128. PCI_DMA_BIDIRECTIONAL);
  129. /* Unmap chunks, if any. */
  130. for (i = 1; i < num_tbs; i++)
  131. pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
  132. iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
  133. }
  134. /**
  135. * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
  136. * @priv - driver private data
  137. * @txq - tx queue
  138. *
  139. * Does NOT advance any TFD circular buffer read/write indexes
  140. * Does NOT free the TFD itself (which is within circular buffer)
  141. */
  142. void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
  143. {
  144. struct iwl_tfd *tfd_tmp = txq->tfds;
  145. int index = txq->q.read_ptr;
  146. iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index]);
  147. /* free SKB */
  148. if (txq->txb) {
  149. struct sk_buff *skb;
  150. skb = txq->txb[txq->q.read_ptr].skb;
  151. /* can be called from irqs-disabled context */
  152. if (skb) {
  153. dev_kfree_skb_any(skb);
  154. txq->txb[txq->q.read_ptr].skb = NULL;
  155. }
  156. }
  157. }
  158. int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
  159. struct iwl_tx_queue *txq,
  160. dma_addr_t addr, u16 len,
  161. u8 reset)
  162. {
  163. struct iwl_queue *q;
  164. struct iwl_tfd *tfd, *tfd_tmp;
  165. u32 num_tbs;
  166. q = &txq->q;
  167. tfd_tmp = txq->tfds;
  168. tfd = &tfd_tmp[q->write_ptr];
  169. if (reset)
  170. memset(tfd, 0, sizeof(*tfd));
  171. num_tbs = iwl_tfd_get_num_tbs(tfd);
  172. /* Each TFD can point to a maximum 20 Tx buffers */
  173. if (num_tbs >= IWL_NUM_OF_TBS) {
  174. IWL_ERR(priv, "Error can not send more than %d chunks\n",
  175. IWL_NUM_OF_TBS);
  176. return -EINVAL;
  177. }
  178. if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
  179. return -EINVAL;
  180. if (unlikely(addr & ~IWL_TX_DMA_MASK))
  181. IWL_ERR(priv, "Unaligned address = %llx\n",
  182. (unsigned long long)addr);
  183. iwl_tfd_set_tb(tfd, num_tbs, addr, len);
  184. return 0;
  185. }
  186. /*
  187. * Tell nic where to find circular buffer of Tx Frame Descriptors for
  188. * given Tx queue, and enable the DMA channel used for that queue.
  189. *
  190. * supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
  191. * channels supported in hardware.
  192. */
  193. static int iwlagn_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
  194. {
  195. int txq_id = txq->q.id;
  196. /* Circular buffer (TFD queue in DRAM) physical base address */
  197. iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
  198. txq->q.dma_addr >> 8);
  199. return 0;
  200. }
  201. /**
  202. * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
  203. */
  204. void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
  205. {
  206. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  207. struct iwl_queue *q = &txq->q;
  208. if (q->n_bd == 0)
  209. return;
  210. while (q->write_ptr != q->read_ptr) {
  211. iwlagn_txq_free_tfd(priv, txq);
  212. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
  213. }
  214. }
  215. /**
  216. * iwl_tx_queue_free - Deallocate DMA queue.
  217. * @txq: Transmit queue to deallocate.
  218. *
  219. * Empty queue by removing and destroying all BD's.
  220. * Free all buffers.
  221. * 0-fill, but do not free "txq" descriptor structure.
  222. */
  223. void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
  224. {
  225. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  226. struct device *dev = &priv->pci_dev->dev;
  227. int i;
  228. iwl_tx_queue_unmap(priv, txq_id);
  229. /* De-alloc array of command/tx buffers */
  230. for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
  231. kfree(txq->cmd[i]);
  232. /* De-alloc circular buffer of TFDs */
  233. if (txq->q.n_bd)
  234. dma_free_coherent(dev, priv->hw_params.tfd_size *
  235. txq->q.n_bd, txq->tfds, txq->q.dma_addr);
  236. /* De-alloc array of per-TFD driver data */
  237. kfree(txq->txb);
  238. txq->txb = NULL;
  239. /* deallocate arrays */
  240. kfree(txq->cmd);
  241. kfree(txq->meta);
  242. txq->cmd = NULL;
  243. txq->meta = NULL;
  244. /* 0-fill queue descriptor structure */
  245. memset(txq, 0, sizeof(*txq));
  246. }
  247. /**
  248. * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
  249. */
  250. void iwl_cmd_queue_unmap(struct iwl_priv *priv)
  251. {
  252. struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
  253. struct iwl_queue *q = &txq->q;
  254. int i;
  255. if (q->n_bd == 0)
  256. return;
  257. while (q->read_ptr != q->write_ptr) {
  258. i = get_cmd_index(q, q->read_ptr);
  259. if (txq->meta[i].flags & CMD_MAPPED) {
  260. pci_unmap_single(priv->pci_dev,
  261. dma_unmap_addr(&txq->meta[i], mapping),
  262. dma_unmap_len(&txq->meta[i], len),
  263. PCI_DMA_BIDIRECTIONAL);
  264. txq->meta[i].flags = 0;
  265. }
  266. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
  267. }
  268. }
  269. /**
  270. * iwl_cmd_queue_free - Deallocate DMA queue.
  271. * @txq: Transmit queue to deallocate.
  272. *
  273. * Empty queue by removing and destroying all BD's.
  274. * Free all buffers.
  275. * 0-fill, but do not free "txq" descriptor structure.
  276. */
  277. void iwl_cmd_queue_free(struct iwl_priv *priv)
  278. {
  279. struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
  280. struct device *dev = &priv->pci_dev->dev;
  281. int i;
  282. iwl_cmd_queue_unmap(priv);
  283. /* De-alloc array of command/tx buffers */
  284. for (i = 0; i < TFD_CMD_SLOTS; i++)
  285. kfree(txq->cmd[i]);
  286. /* De-alloc circular buffer of TFDs */
  287. if (txq->q.n_bd)
  288. dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
  289. txq->tfds, txq->q.dma_addr);
  290. /* deallocate arrays */
  291. kfree(txq->cmd);
  292. kfree(txq->meta);
  293. txq->cmd = NULL;
  294. txq->meta = NULL;
  295. /* 0-fill queue descriptor structure */
  296. memset(txq, 0, sizeof(*txq));
  297. }
  298. /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
  299. * DMA services
  300. *
  301. * Theory of operation
  302. *
  303. * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
  304. * of buffer descriptors, each of which points to one or more data buffers for
  305. * the device to read from or fill. Driver and device exchange status of each
  306. * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
  307. * entries in each circular buffer, to protect against confusing empty and full
  308. * queue states.
  309. *
  310. * The device reads or writes the data in the queues via the device's several
  311. * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
  312. *
  313. * For Tx queue, there are low mark and high mark limits. If, after queuing
  314. * the packet for Tx, free space become < low mark, Tx queue stopped. When
  315. * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
  316. * Tx queue resumed.
  317. *
  318. ***************************************************/
  319. int iwl_queue_space(const struct iwl_queue *q)
  320. {
  321. int s = q->read_ptr - q->write_ptr;
  322. if (q->read_ptr > q->write_ptr)
  323. s -= q->n_bd;
  324. if (s <= 0)
  325. s += q->n_window;
  326. /* keep some reserve to not confuse empty and full situations */
  327. s -= 2;
  328. if (s < 0)
  329. s = 0;
  330. return s;
  331. }
  332. /**
  333. * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
  334. */
  335. static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
  336. int count, int slots_num, u32 id)
  337. {
  338. q->n_bd = count;
  339. q->n_window = slots_num;
  340. q->id = id;
  341. /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
  342. * and iwl_queue_dec_wrap are broken. */
  343. if (WARN_ON(!is_power_of_2(count)))
  344. return -EINVAL;
  345. /* slots_num must be power-of-two size, otherwise
  346. * get_cmd_index is broken. */
  347. if (WARN_ON(!is_power_of_2(slots_num)))
  348. return -EINVAL;
  349. q->low_mark = q->n_window / 4;
  350. if (q->low_mark < 4)
  351. q->low_mark = 4;
  352. q->high_mark = q->n_window / 8;
  353. if (q->high_mark < 2)
  354. q->high_mark = 2;
  355. q->write_ptr = q->read_ptr = 0;
  356. return 0;
  357. }
  358. /**
  359. * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
  360. */
  361. static int iwl_tx_queue_alloc(struct iwl_priv *priv,
  362. struct iwl_tx_queue *txq, u32 id)
  363. {
  364. struct device *dev = &priv->pci_dev->dev;
  365. size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
  366. /* Driver private data, only for Tx (not command) queues,
  367. * not shared with device. */
  368. if (id != priv->cmd_queue) {
  369. txq->txb = kzalloc(sizeof(txq->txb[0]) *
  370. TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
  371. if (!txq->txb) {
  372. IWL_ERR(priv, "kmalloc for auxiliary BD "
  373. "structures failed\n");
  374. goto error;
  375. }
  376. } else {
  377. txq->txb = NULL;
  378. }
  379. /* Circular buffer of transmit frame descriptors (TFDs),
  380. * shared with device */
  381. txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
  382. GFP_KERNEL);
  383. if (!txq->tfds) {
  384. IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
  385. goto error;
  386. }
  387. txq->q.id = id;
  388. return 0;
  389. error:
  390. kfree(txq->txb);
  391. txq->txb = NULL;
  392. return -ENOMEM;
  393. }
  394. /**
  395. * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
  396. */
  397. int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
  398. int slots_num, u32 txq_id)
  399. {
  400. int i, len;
  401. int ret;
  402. txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * slots_num,
  403. GFP_KERNEL);
  404. txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * slots_num,
  405. GFP_KERNEL);
  406. if (!txq->meta || !txq->cmd)
  407. goto out_free_arrays;
  408. len = sizeof(struct iwl_device_cmd);
  409. for (i = 0; i < slots_num; i++) {
  410. txq->cmd[i] = kmalloc(len, GFP_KERNEL);
  411. if (!txq->cmd[i])
  412. goto err;
  413. }
  414. /* Alloc driver data array and TFD circular buffer */
  415. ret = iwl_tx_queue_alloc(priv, txq, txq_id);
  416. if (ret)
  417. goto err;
  418. txq->need_update = 0;
  419. /*
  420. * For the default queues 0-3, set up the swq_id
  421. * already -- all others need to get one later
  422. * (if they need one at all).
  423. */
  424. if (txq_id < 4)
  425. iwl_set_swq_id(txq, txq_id, txq_id);
  426. /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
  427. * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
  428. BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
  429. /* Initialize queue's high/low-water marks, and head/tail indexes */
  430. ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
  431. if (ret)
  432. return ret;
  433. /* Tell device where to find queue */
  434. iwlagn_tx_queue_init(priv, txq);
  435. return 0;
  436. err:
  437. for (i = 0; i < slots_num; i++)
  438. kfree(txq->cmd[i]);
  439. out_free_arrays:
  440. kfree(txq->meta);
  441. kfree(txq->cmd);
  442. return -ENOMEM;
  443. }
  444. void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
  445. int slots_num, u32 txq_id)
  446. {
  447. int actual_slots = slots_num;
  448. if (txq_id == priv->cmd_queue)
  449. actual_slots++;
  450. memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
  451. txq->need_update = 0;
  452. /* Initialize queue's high/low-water marks, and head/tail indexes */
  453. iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
  454. /* Tell device where to find queue */
  455. iwlagn_tx_queue_init(priv, txq);
  456. }
  457. /*************** HOST COMMAND QUEUE FUNCTIONS *****/
  458. /**
  459. * iwl_enqueue_hcmd - enqueue a uCode command
  460. * @priv: device private data point
  461. * @cmd: a point to the ucode command structure
  462. *
  463. * The function returns < 0 values to indicate the operation is
  464. * failed. On success, it turns the index (> 0) of command in the
  465. * command queue.
  466. */
  467. int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
  468. {
  469. struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
  470. struct iwl_queue *q = &txq->q;
  471. struct iwl_device_cmd *out_cmd;
  472. struct iwl_cmd_meta *out_meta;
  473. dma_addr_t phys_addr;
  474. unsigned long flags;
  475. u32 idx;
  476. u16 copy_size, cmd_size;
  477. bool is_ct_kill = false;
  478. bool had_nocopy = false;
  479. int i;
  480. u8 *cmd_dest;
  481. #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
  482. const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
  483. int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
  484. int trace_idx;
  485. #endif
  486. if (test_bit(STATUS_FW_ERROR, &priv->status)) {
  487. IWL_WARN(priv, "fw recovery, no hcmd send\n");
  488. return -EIO;
  489. }
  490. copy_size = sizeof(out_cmd->hdr);
  491. cmd_size = sizeof(out_cmd->hdr);
  492. /* need one for the header if the first is NOCOPY */
  493. BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
  494. for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
  495. if (!cmd->len[i])
  496. continue;
  497. if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
  498. had_nocopy = true;
  499. } else {
  500. /* NOCOPY must not be followed by normal! */
  501. if (WARN_ON(had_nocopy))
  502. return -EINVAL;
  503. copy_size += cmd->len[i];
  504. }
  505. cmd_size += cmd->len[i];
  506. }
  507. /*
  508. * If any of the command structures end up being larger than
  509. * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
  510. * allocated into separate TFDs, then we will need to
  511. * increase the size of the buffers.
  512. */
  513. if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
  514. return -EINVAL;
  515. if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
  516. IWL_WARN(priv, "Not sending command - %s KILL\n",
  517. iwl_is_rfkill(priv) ? "RF" : "CT");
  518. return -EIO;
  519. }
  520. spin_lock_irqsave(&priv->hcmd_lock, flags);
  521. if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
  522. spin_unlock_irqrestore(&priv->hcmd_lock, flags);
  523. IWL_ERR(priv, "No space in command queue\n");
  524. is_ct_kill = iwl_check_for_ct_kill(priv);
  525. if (!is_ct_kill) {
  526. IWL_ERR(priv, "Restarting adapter due to queue full\n");
  527. iwlagn_fw_error(priv, false);
  528. }
  529. return -ENOSPC;
  530. }
  531. idx = get_cmd_index(q, q->write_ptr);
  532. out_cmd = txq->cmd[idx];
  533. out_meta = &txq->meta[idx];
  534. if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
  535. spin_unlock_irqrestore(&priv->hcmd_lock, flags);
  536. return -ENOSPC;
  537. }
  538. memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
  539. if (cmd->flags & CMD_WANT_SKB)
  540. out_meta->source = cmd;
  541. if (cmd->flags & CMD_ASYNC)
  542. out_meta->callback = cmd->callback;
  543. /* set up the header */
  544. out_cmd->hdr.cmd = cmd->id;
  545. out_cmd->hdr.flags = 0;
  546. out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
  547. INDEX_TO_SEQ(q->write_ptr));
  548. /* and copy the data that needs to be copied */
  549. cmd_dest = &out_cmd->cmd.payload[0];
  550. for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
  551. if (!cmd->len[i])
  552. continue;
  553. if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
  554. break;
  555. memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
  556. cmd_dest += cmd->len[i];
  557. }
  558. IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
  559. "%d bytes at %d[%d]:%d\n",
  560. get_cmd_string(out_cmd->hdr.cmd),
  561. out_cmd->hdr.cmd,
  562. le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
  563. q->write_ptr, idx, priv->cmd_queue);
  564. phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
  565. copy_size, PCI_DMA_BIDIRECTIONAL);
  566. if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) {
  567. idx = -ENOMEM;
  568. goto out;
  569. }
  570. dma_unmap_addr_set(out_meta, mapping, phys_addr);
  571. dma_unmap_len_set(out_meta, len, copy_size);
  572. iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1);
  573. #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
  574. trace_bufs[0] = &out_cmd->hdr;
  575. trace_lens[0] = copy_size;
  576. trace_idx = 1;
  577. #endif
  578. for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
  579. if (!cmd->len[i])
  580. continue;
  581. if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
  582. continue;
  583. phys_addr = pci_map_single(priv->pci_dev, (void *)cmd->data[i],
  584. cmd->len[i], PCI_DMA_TODEVICE);
  585. if (pci_dma_mapping_error(priv->pci_dev, phys_addr)) {
  586. iwlagn_unmap_tfd(priv, out_meta,
  587. &txq->tfds[q->write_ptr]);
  588. idx = -ENOMEM;
  589. goto out;
  590. }
  591. iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
  592. cmd->len[i], 0);
  593. #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
  594. trace_bufs[trace_idx] = cmd->data[i];
  595. trace_lens[trace_idx] = cmd->len[i];
  596. trace_idx++;
  597. #endif
  598. }
  599. out_meta->flags = cmd->flags | CMD_MAPPED;
  600. txq->need_update = 1;
  601. /* check that tracing gets all possible blocks */
  602. BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
  603. #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
  604. trace_iwlwifi_dev_hcmd(priv, cmd->flags,
  605. trace_bufs[0], trace_lens[0],
  606. trace_bufs[1], trace_lens[1],
  607. trace_bufs[2], trace_lens[2]);
  608. #endif
  609. /* Increment and update queue's write index */
  610. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  611. iwl_txq_update_write_ptr(priv, txq);
  612. out:
  613. spin_unlock_irqrestore(&priv->hcmd_lock, flags);
  614. return idx;
  615. }
  616. /**
  617. * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
  618. *
  619. * When FW advances 'R' index, all entries between old and new 'R' index
  620. * need to be reclaimed. As result, some free space forms. If there is
  621. * enough free space (> low mark), wake the stack that feeds us.
  622. */
  623. static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx)
  624. {
  625. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  626. struct iwl_queue *q = &txq->q;
  627. int nfreed = 0;
  628. if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
  629. IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
  630. "is out of range [0-%d] %d %d.\n", txq_id,
  631. idx, q->n_bd, q->write_ptr, q->read_ptr);
  632. return;
  633. }
  634. for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
  635. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
  636. if (nfreed++ > 0) {
  637. IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
  638. q->write_ptr, q->read_ptr);
  639. iwlagn_fw_error(priv, false);
  640. }
  641. }
  642. }
  643. /**
  644. * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
  645. * @rxb: Rx buffer to reclaim
  646. *
  647. * If an Rx buffer has an async callback associated with it the callback
  648. * will be executed. The attached skb (if present) will only be freed
  649. * if the callback returns 1
  650. */
  651. void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
  652. {
  653. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  654. u16 sequence = le16_to_cpu(pkt->hdr.sequence);
  655. int txq_id = SEQ_TO_QUEUE(sequence);
  656. int index = SEQ_TO_INDEX(sequence);
  657. int cmd_index;
  658. struct iwl_device_cmd *cmd;
  659. struct iwl_cmd_meta *meta;
  660. struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
  661. unsigned long flags;
  662. /* If a Tx command is being handled and it isn't in the actual
  663. * command queue then there a command routing bug has been introduced
  664. * in the queue management code. */
  665. if (WARN(txq_id != priv->cmd_queue,
  666. "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
  667. txq_id, priv->cmd_queue, sequence,
  668. priv->txq[priv->cmd_queue].q.read_ptr,
  669. priv->txq[priv->cmd_queue].q.write_ptr)) {
  670. iwl_print_hex_error(priv, pkt, 32);
  671. return;
  672. }
  673. cmd_index = get_cmd_index(&txq->q, index);
  674. cmd = txq->cmd[cmd_index];
  675. meta = &txq->meta[cmd_index];
  676. iwlagn_unmap_tfd(priv, meta, &txq->tfds[index]);
  677. /* Input error checking is done when commands are added to queue. */
  678. if (meta->flags & CMD_WANT_SKB) {
  679. meta->source->reply_page = (unsigned long)rxb_addr(rxb);
  680. rxb->page = NULL;
  681. } else if (meta->callback)
  682. meta->callback(priv, cmd, pkt);
  683. spin_lock_irqsave(&priv->hcmd_lock, flags);
  684. iwl_hcmd_queue_reclaim(priv, txq_id, index);
  685. if (!(meta->flags & CMD_ASYNC)) {
  686. clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
  687. IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
  688. get_cmd_string(cmd->hdr.cmd));
  689. wake_up_interruptible(&priv->wait_command_queue);
  690. }
  691. /* Mark as unmapped */
  692. meta->flags = 0;
  693. spin_unlock_irqrestore(&priv->hcmd_lock, flags);
  694. }