iwl-tx.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
  4. *
  5. * Portions of this file are derived from the ipw3945 project, as well
  6. * as portions of the ieee80211 subsystem header files.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of version 2 of the GNU General Public License as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20. *
  21. * The full GNU General Public License is included in this distribution in the
  22. * file called LICENSE.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/etherdevice.h>
  30. #include <linux/sched.h>
  31. #include <linux/slab.h>
  32. #include <net/mac80211.h>
  33. #include "iwl-eeprom.h"
  34. #include "iwl-agn.h"
  35. #include "iwl-dev.h"
  36. #include "iwl-core.h"
  37. #include "iwl-sta.h"
  38. #include "iwl-io.h"
  39. #include "iwl-helpers.h"
  40. /**
  41. * iwl_txq_update_write_ptr - Send new write index to hardware
  42. */
  43. void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
  44. {
  45. u32 reg = 0;
  46. int txq_id = txq->q.id;
  47. if (txq->need_update == 0)
  48. return;
  49. if (priv->cfg->base_params->shadow_reg_enable) {
  50. /* shadow register enabled */
  51. iwl_write32(priv, HBUS_TARG_WRPTR,
  52. txq->q.write_ptr | (txq_id << 8));
  53. } else {
  54. /* if we're trying to save power */
  55. if (test_bit(STATUS_POWER_PMI, &priv->status)) {
  56. /* wake up nic if it's powered down ...
  57. * uCode will wake up, and interrupt us again, so next
  58. * time we'll skip this part. */
  59. reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
  60. if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
  61. IWL_DEBUG_INFO(priv,
  62. "Tx queue %d requesting wakeup,"
  63. " GP1 = 0x%x\n", txq_id, reg);
  64. iwl_set_bit(priv, CSR_GP_CNTRL,
  65. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  66. return;
  67. }
  68. iwl_write_direct32(priv, HBUS_TARG_WRPTR,
  69. txq->q.write_ptr | (txq_id << 8));
  70. /*
  71. * else not in power-save mode,
  72. * uCode will never sleep when we're
  73. * trying to tx (during RFKILL, we're not trying to tx).
  74. */
  75. } else
  76. iwl_write32(priv, HBUS_TARG_WRPTR,
  77. txq->q.write_ptr | (txq_id << 8));
  78. }
  79. txq->need_update = 0;
  80. }
  81. static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
  82. {
  83. struct iwl_tfd_tb *tb = &tfd->tbs[idx];
  84. dma_addr_t addr = get_unaligned_le32(&tb->lo);
  85. if (sizeof(dma_addr_t) > sizeof(u32))
  86. addr |=
  87. ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
  88. return addr;
  89. }
  90. static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
  91. {
  92. struct iwl_tfd_tb *tb = &tfd->tbs[idx];
  93. return le16_to_cpu(tb->hi_n_len) >> 4;
  94. }
  95. static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
  96. dma_addr_t addr, u16 len)
  97. {
  98. struct iwl_tfd_tb *tb = &tfd->tbs[idx];
  99. u16 hi_n_len = len << 4;
  100. put_unaligned_le32(addr, &tb->lo);
  101. if (sizeof(dma_addr_t) > sizeof(u32))
  102. hi_n_len |= ((addr >> 16) >> 16) & 0xF;
  103. tb->hi_n_len = cpu_to_le16(hi_n_len);
  104. tfd->num_tbs = idx + 1;
  105. }
  106. static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
  107. {
  108. return tfd->num_tbs & 0x1f;
  109. }
  110. static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
  111. struct iwl_tfd *tfd)
  112. {
  113. struct pci_dev *dev = priv->pci_dev;
  114. int i;
  115. int num_tbs;
  116. /* Sanity check on number of chunks */
  117. num_tbs = iwl_tfd_get_num_tbs(tfd);
  118. if (num_tbs >= IWL_NUM_OF_TBS) {
  119. IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
  120. /* @todo issue fatal error, it is quite serious situation */
  121. return;
  122. }
  123. /* Unmap tx_cmd */
  124. if (num_tbs)
  125. pci_unmap_single(dev,
  126. dma_unmap_addr(meta, mapping),
  127. dma_unmap_len(meta, len),
  128. PCI_DMA_BIDIRECTIONAL);
  129. /* Unmap chunks, if any. */
  130. for (i = 1; i < num_tbs; i++)
  131. pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
  132. iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
  133. }
  134. /**
  135. * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
  136. * @priv - driver private data
  137. * @txq - tx queue
  138. *
  139. * Does NOT advance any TFD circular buffer read/write indexes
  140. * Does NOT free the TFD itself (which is within circular buffer)
  141. */
  142. void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
  143. {
  144. struct iwl_tfd *tfd_tmp = txq->tfds;
  145. int index = txq->q.read_ptr;
  146. iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index]);
  147. /* free SKB */
  148. if (txq->txb) {
  149. struct sk_buff *skb;
  150. skb = txq->txb[txq->q.read_ptr].skb;
  151. /* can be called from irqs-disabled context */
  152. if (skb) {
  153. dev_kfree_skb_any(skb);
  154. txq->txb[txq->q.read_ptr].skb = NULL;
  155. }
  156. }
  157. }
  158. int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
  159. struct iwl_tx_queue *txq,
  160. dma_addr_t addr, u16 len,
  161. u8 reset)
  162. {
  163. struct iwl_queue *q;
  164. struct iwl_tfd *tfd, *tfd_tmp;
  165. u32 num_tbs;
  166. q = &txq->q;
  167. tfd_tmp = txq->tfds;
  168. tfd = &tfd_tmp[q->write_ptr];
  169. if (reset)
  170. memset(tfd, 0, sizeof(*tfd));
  171. num_tbs = iwl_tfd_get_num_tbs(tfd);
  172. /* Each TFD can point to a maximum 20 Tx buffers */
  173. if (num_tbs >= IWL_NUM_OF_TBS) {
  174. IWL_ERR(priv, "Error can not send more than %d chunks\n",
  175. IWL_NUM_OF_TBS);
  176. return -EINVAL;
  177. }
  178. if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
  179. return -EINVAL;
  180. if (unlikely(addr & ~IWL_TX_DMA_MASK))
  181. IWL_ERR(priv, "Unaligned address = %llx\n",
  182. (unsigned long long)addr);
  183. iwl_tfd_set_tb(tfd, num_tbs, addr, len);
  184. return 0;
  185. }
  186. /*
  187. * Tell nic where to find circular buffer of Tx Frame Descriptors for
  188. * given Tx queue, and enable the DMA channel used for that queue.
  189. *
  190. * supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
  191. * channels supported in hardware.
  192. */
  193. static int iwlagn_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
  194. {
  195. int txq_id = txq->q.id;
  196. /* Circular buffer (TFD queue in DRAM) physical base address */
  197. iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
  198. txq->q.dma_addr >> 8);
  199. return 0;
  200. }
  201. /**
  202. * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
  203. */
  204. void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
  205. {
  206. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  207. struct iwl_queue *q = &txq->q;
  208. if (q->n_bd == 0)
  209. return;
  210. while (q->write_ptr != q->read_ptr) {
  211. iwlagn_txq_free_tfd(priv, txq);
  212. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
  213. }
  214. }
  215. /**
  216. * iwl_tx_queue_free - Deallocate DMA queue.
  217. * @txq: Transmit queue to deallocate.
  218. *
  219. * Empty queue by removing and destroying all BD's.
  220. * Free all buffers.
  221. * 0-fill, but do not free "txq" descriptor structure.
  222. */
  223. void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
  224. {
  225. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  226. struct device *dev = &priv->pci_dev->dev;
  227. int i;
  228. iwl_tx_queue_unmap(priv, txq_id);
  229. /* De-alloc array of command/tx buffers */
  230. for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
  231. kfree(txq->cmd[i]);
  232. /* De-alloc circular buffer of TFDs */
  233. if (txq->q.n_bd)
  234. dma_free_coherent(dev, priv->hw_params.tfd_size *
  235. txq->q.n_bd, txq->tfds, txq->q.dma_addr);
  236. /* De-alloc array of per-TFD driver data */
  237. kfree(txq->txb);
  238. txq->txb = NULL;
  239. /* deallocate arrays */
  240. kfree(txq->cmd);
  241. kfree(txq->meta);
  242. txq->cmd = NULL;
  243. txq->meta = NULL;
  244. /* 0-fill queue descriptor structure */
  245. memset(txq, 0, sizeof(*txq));
  246. }
  247. /**
  248. * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
  249. */
  250. void iwl_cmd_queue_unmap(struct iwl_priv *priv)
  251. {
  252. struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
  253. struct iwl_queue *q = &txq->q;
  254. int i;
  255. if (q->n_bd == 0)
  256. return;
  257. while (q->read_ptr != q->write_ptr) {
  258. i = get_cmd_index(q, q->read_ptr);
  259. if (txq->meta[i].flags & CMD_MAPPED) {
  260. iwlagn_unmap_tfd(priv, &txq->meta[i], &txq->tfds[i]);
  261. txq->meta[i].flags = 0;
  262. }
  263. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
  264. }
  265. }
  266. /**
  267. * iwl_cmd_queue_free - Deallocate DMA queue.
  268. * @txq: Transmit queue to deallocate.
  269. *
  270. * Empty queue by removing and destroying all BD's.
  271. * Free all buffers.
  272. * 0-fill, but do not free "txq" descriptor structure.
  273. */
  274. void iwl_cmd_queue_free(struct iwl_priv *priv)
  275. {
  276. struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
  277. struct device *dev = &priv->pci_dev->dev;
  278. int i;
  279. iwl_cmd_queue_unmap(priv);
  280. /* De-alloc array of command/tx buffers */
  281. for (i = 0; i < TFD_CMD_SLOTS; i++)
  282. kfree(txq->cmd[i]);
  283. /* De-alloc circular buffer of TFDs */
  284. if (txq->q.n_bd)
  285. dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
  286. txq->tfds, txq->q.dma_addr);
  287. /* deallocate arrays */
  288. kfree(txq->cmd);
  289. kfree(txq->meta);
  290. txq->cmd = NULL;
  291. txq->meta = NULL;
  292. /* 0-fill queue descriptor structure */
  293. memset(txq, 0, sizeof(*txq));
  294. }
  295. /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
  296. * DMA services
  297. *
  298. * Theory of operation
  299. *
  300. * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
  301. * of buffer descriptors, each of which points to one or more data buffers for
  302. * the device to read from or fill. Driver and device exchange status of each
  303. * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
  304. * entries in each circular buffer, to protect against confusing empty and full
  305. * queue states.
  306. *
  307. * The device reads or writes the data in the queues via the device's several
  308. * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
  309. *
  310. * For Tx queue, there are low mark and high mark limits. If, after queuing
  311. * the packet for Tx, free space become < low mark, Tx queue stopped. When
  312. * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
  313. * Tx queue resumed.
  314. *
  315. ***************************************************/
  316. int iwl_queue_space(const struct iwl_queue *q)
  317. {
  318. int s = q->read_ptr - q->write_ptr;
  319. if (q->read_ptr > q->write_ptr)
  320. s -= q->n_bd;
  321. if (s <= 0)
  322. s += q->n_window;
  323. /* keep some reserve to not confuse empty and full situations */
  324. s -= 2;
  325. if (s < 0)
  326. s = 0;
  327. return s;
  328. }
  329. /**
  330. * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
  331. */
  332. static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
  333. int count, int slots_num, u32 id)
  334. {
  335. q->n_bd = count;
  336. q->n_window = slots_num;
  337. q->id = id;
  338. /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
  339. * and iwl_queue_dec_wrap are broken. */
  340. if (WARN_ON(!is_power_of_2(count)))
  341. return -EINVAL;
  342. /* slots_num must be power-of-two size, otherwise
  343. * get_cmd_index is broken. */
  344. if (WARN_ON(!is_power_of_2(slots_num)))
  345. return -EINVAL;
  346. q->low_mark = q->n_window / 4;
  347. if (q->low_mark < 4)
  348. q->low_mark = 4;
  349. q->high_mark = q->n_window / 8;
  350. if (q->high_mark < 2)
  351. q->high_mark = 2;
  352. q->write_ptr = q->read_ptr = 0;
  353. return 0;
  354. }
  355. /**
  356. * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
  357. */
  358. static int iwl_tx_queue_alloc(struct iwl_priv *priv,
  359. struct iwl_tx_queue *txq, u32 id)
  360. {
  361. struct device *dev = &priv->pci_dev->dev;
  362. size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
  363. /* Driver private data, only for Tx (not command) queues,
  364. * not shared with device. */
  365. if (id != priv->cmd_queue) {
  366. txq->txb = kzalloc(sizeof(txq->txb[0]) *
  367. TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
  368. if (!txq->txb) {
  369. IWL_ERR(priv, "kmalloc for auxiliary BD "
  370. "structures failed\n");
  371. goto error;
  372. }
  373. } else {
  374. txq->txb = NULL;
  375. }
  376. /* Circular buffer of transmit frame descriptors (TFDs),
  377. * shared with device */
  378. txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
  379. GFP_KERNEL);
  380. if (!txq->tfds) {
  381. IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
  382. goto error;
  383. }
  384. txq->q.id = id;
  385. return 0;
  386. error:
  387. kfree(txq->txb);
  388. txq->txb = NULL;
  389. return -ENOMEM;
  390. }
  391. /**
  392. * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
  393. */
  394. int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
  395. int slots_num, u32 txq_id)
  396. {
  397. int i, len;
  398. int ret;
  399. txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * slots_num,
  400. GFP_KERNEL);
  401. txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * slots_num,
  402. GFP_KERNEL);
  403. if (!txq->meta || !txq->cmd)
  404. goto out_free_arrays;
  405. len = sizeof(struct iwl_device_cmd);
  406. for (i = 0; i < slots_num; i++) {
  407. txq->cmd[i] = kmalloc(len, GFP_KERNEL);
  408. if (!txq->cmd[i])
  409. goto err;
  410. }
  411. /* Alloc driver data array and TFD circular buffer */
  412. ret = iwl_tx_queue_alloc(priv, txq, txq_id);
  413. if (ret)
  414. goto err;
  415. txq->need_update = 0;
  416. /*
  417. * For the default queues 0-3, set up the swq_id
  418. * already -- all others need to get one later
  419. * (if they need one at all).
  420. */
  421. if (txq_id < 4)
  422. iwl_set_swq_id(txq, txq_id, txq_id);
  423. /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
  424. * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
  425. BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
  426. /* Initialize queue's high/low-water marks, and head/tail indexes */
  427. ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
  428. if (ret)
  429. return ret;
  430. /* Tell device where to find queue */
  431. iwlagn_tx_queue_init(priv, txq);
  432. return 0;
  433. err:
  434. for (i = 0; i < slots_num; i++)
  435. kfree(txq->cmd[i]);
  436. out_free_arrays:
  437. kfree(txq->meta);
  438. kfree(txq->cmd);
  439. return -ENOMEM;
  440. }
  441. void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
  442. int slots_num, u32 txq_id)
  443. {
  444. memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * slots_num);
  445. txq->need_update = 0;
  446. /* Initialize queue's high/low-water marks, and head/tail indexes */
  447. iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
  448. /* Tell device where to find queue */
  449. iwlagn_tx_queue_init(priv, txq);
  450. }
  451. /*************** HOST COMMAND QUEUE FUNCTIONS *****/
  452. /**
  453. * iwl_enqueue_hcmd - enqueue a uCode command
  454. * @priv: device private data point
  455. * @cmd: a point to the ucode command structure
  456. *
  457. * The function returns < 0 values to indicate the operation is
  458. * failed. On success, it turns the index (> 0) of command in the
  459. * command queue.
  460. */
  461. int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
  462. {
  463. struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
  464. struct iwl_queue *q = &txq->q;
  465. struct iwl_device_cmd *out_cmd;
  466. struct iwl_cmd_meta *out_meta;
  467. dma_addr_t phys_addr;
  468. unsigned long flags;
  469. u32 idx;
  470. u16 copy_size, cmd_size;
  471. bool is_ct_kill = false;
  472. bool had_nocopy = false;
  473. int i;
  474. u8 *cmd_dest;
  475. #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
  476. const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
  477. int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
  478. int trace_idx;
  479. #endif
  480. if (test_bit(STATUS_FW_ERROR, &priv->status)) {
  481. IWL_WARN(priv, "fw recovery, no hcmd send\n");
  482. return -EIO;
  483. }
  484. copy_size = sizeof(out_cmd->hdr);
  485. cmd_size = sizeof(out_cmd->hdr);
  486. /* need one for the header if the first is NOCOPY */
  487. BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
  488. for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
  489. if (!cmd->len[i])
  490. continue;
  491. if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
  492. had_nocopy = true;
  493. } else {
  494. /* NOCOPY must not be followed by normal! */
  495. if (WARN_ON(had_nocopy))
  496. return -EINVAL;
  497. copy_size += cmd->len[i];
  498. }
  499. cmd_size += cmd->len[i];
  500. }
  501. /*
  502. * If any of the command structures end up being larger than
  503. * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
  504. * allocated into separate TFDs, then we will need to
  505. * increase the size of the buffers.
  506. */
  507. if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
  508. return -EINVAL;
  509. if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
  510. IWL_WARN(priv, "Not sending command - %s KILL\n",
  511. iwl_is_rfkill(priv) ? "RF" : "CT");
  512. return -EIO;
  513. }
  514. spin_lock_irqsave(&priv->hcmd_lock, flags);
  515. if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
  516. spin_unlock_irqrestore(&priv->hcmd_lock, flags);
  517. IWL_ERR(priv, "No space in command queue\n");
  518. is_ct_kill = iwl_check_for_ct_kill(priv);
  519. if (!is_ct_kill) {
  520. IWL_ERR(priv, "Restarting adapter due to queue full\n");
  521. iwlagn_fw_error(priv, false);
  522. }
  523. return -ENOSPC;
  524. }
  525. idx = get_cmd_index(q, q->write_ptr);
  526. out_cmd = txq->cmd[idx];
  527. out_meta = &txq->meta[idx];
  528. if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
  529. spin_unlock_irqrestore(&priv->hcmd_lock, flags);
  530. return -ENOSPC;
  531. }
  532. memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
  533. if (cmd->flags & CMD_WANT_SKB)
  534. out_meta->source = cmd;
  535. if (cmd->flags & CMD_ASYNC)
  536. out_meta->callback = cmd->callback;
  537. /* set up the header */
  538. out_cmd->hdr.cmd = cmd->id;
  539. out_cmd->hdr.flags = 0;
  540. out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
  541. INDEX_TO_SEQ(q->write_ptr));
  542. /* and copy the data that needs to be copied */
  543. cmd_dest = &out_cmd->cmd.payload[0];
  544. for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
  545. if (!cmd->len[i])
  546. continue;
  547. if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
  548. break;
  549. memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
  550. cmd_dest += cmd->len[i];
  551. }
  552. IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
  553. "%d bytes at %d[%d]:%d\n",
  554. get_cmd_string(out_cmd->hdr.cmd),
  555. out_cmd->hdr.cmd,
  556. le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
  557. q->write_ptr, idx, priv->cmd_queue);
  558. phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
  559. copy_size, PCI_DMA_BIDIRECTIONAL);
  560. if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) {
  561. idx = -ENOMEM;
  562. goto out;
  563. }
  564. dma_unmap_addr_set(out_meta, mapping, phys_addr);
  565. dma_unmap_len_set(out_meta, len, copy_size);
  566. iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1);
  567. #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
  568. trace_bufs[0] = &out_cmd->hdr;
  569. trace_lens[0] = copy_size;
  570. trace_idx = 1;
  571. #endif
  572. for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
  573. if (!cmd->len[i])
  574. continue;
  575. if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
  576. continue;
  577. phys_addr = pci_map_single(priv->pci_dev, (void *)cmd->data[i],
  578. cmd->len[i], PCI_DMA_TODEVICE);
  579. if (pci_dma_mapping_error(priv->pci_dev, phys_addr)) {
  580. iwlagn_unmap_tfd(priv, out_meta,
  581. &txq->tfds[q->write_ptr]);
  582. idx = -ENOMEM;
  583. goto out;
  584. }
  585. iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
  586. cmd->len[i], 0);
  587. #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
  588. trace_bufs[trace_idx] = cmd->data[i];
  589. trace_lens[trace_idx] = cmd->len[i];
  590. trace_idx++;
  591. #endif
  592. }
  593. out_meta->flags = cmd->flags | CMD_MAPPED;
  594. txq->need_update = 1;
  595. /* check that tracing gets all possible blocks */
  596. BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
  597. #ifdef CONFIG_IWLWIFI_DEVICE_TRACING
  598. trace_iwlwifi_dev_hcmd(priv, cmd->flags,
  599. trace_bufs[0], trace_lens[0],
  600. trace_bufs[1], trace_lens[1],
  601. trace_bufs[2], trace_lens[2]);
  602. #endif
  603. /* Increment and update queue's write index */
  604. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  605. iwl_txq_update_write_ptr(priv, txq);
  606. out:
  607. spin_unlock_irqrestore(&priv->hcmd_lock, flags);
  608. return idx;
  609. }
  610. /**
  611. * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
  612. *
  613. * When FW advances 'R' index, all entries between old and new 'R' index
  614. * need to be reclaimed. As result, some free space forms. If there is
  615. * enough free space (> low mark), wake the stack that feeds us.
  616. */
  617. static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx)
  618. {
  619. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  620. struct iwl_queue *q = &txq->q;
  621. int nfreed = 0;
  622. if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
  623. IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
  624. "is out of range [0-%d] %d %d.\n", txq_id,
  625. idx, q->n_bd, q->write_ptr, q->read_ptr);
  626. return;
  627. }
  628. for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
  629. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
  630. if (nfreed++ > 0) {
  631. IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
  632. q->write_ptr, q->read_ptr);
  633. iwlagn_fw_error(priv, false);
  634. }
  635. }
  636. }
  637. /**
  638. * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
  639. * @rxb: Rx buffer to reclaim
  640. *
  641. * If an Rx buffer has an async callback associated with it the callback
  642. * will be executed. The attached skb (if present) will only be freed
  643. * if the callback returns 1
  644. */
  645. void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
  646. {
  647. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  648. u16 sequence = le16_to_cpu(pkt->hdr.sequence);
  649. int txq_id = SEQ_TO_QUEUE(sequence);
  650. int index = SEQ_TO_INDEX(sequence);
  651. int cmd_index;
  652. struct iwl_device_cmd *cmd;
  653. struct iwl_cmd_meta *meta;
  654. struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
  655. unsigned long flags;
  656. /* If a Tx command is being handled and it isn't in the actual
  657. * command queue then there a command routing bug has been introduced
  658. * in the queue management code. */
  659. if (WARN(txq_id != priv->cmd_queue,
  660. "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
  661. txq_id, priv->cmd_queue, sequence,
  662. priv->txq[priv->cmd_queue].q.read_ptr,
  663. priv->txq[priv->cmd_queue].q.write_ptr)) {
  664. iwl_print_hex_error(priv, pkt, 32);
  665. return;
  666. }
  667. cmd_index = get_cmd_index(&txq->q, index);
  668. cmd = txq->cmd[cmd_index];
  669. meta = &txq->meta[cmd_index];
  670. iwlagn_unmap_tfd(priv, meta, &txq->tfds[index]);
  671. /* Input error checking is done when commands are added to queue. */
  672. if (meta->flags & CMD_WANT_SKB) {
  673. meta->source->reply_page = (unsigned long)rxb_addr(rxb);
  674. rxb->page = NULL;
  675. } else if (meta->callback)
  676. meta->callback(priv, cmd, pkt);
  677. spin_lock_irqsave(&priv->hcmd_lock, flags);
  678. iwl_hcmd_queue_reclaim(priv, txq_id, index);
  679. if (!(meta->flags & CMD_ASYNC)) {
  680. clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
  681. IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
  682. get_cmd_string(cmd->hdr.cmd));
  683. wake_up_interruptible(&priv->wait_command_queue);
  684. }
  685. /* Mark as unmapped */
  686. meta->flags = 0;
  687. spin_unlock_irqrestore(&priv->hcmd_lock, flags);
  688. }