iwl-trans.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of version 2 of the GNU General Public License as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  22. * USA
  23. *
  24. * The full GNU General Public License is included in this distribution
  25. * in the file called LICENSE.GPL.
  26. *
  27. * Contact Information:
  28. * Intel Linux Wireless <ilw@linux.intel.com>
  29. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  30. *
  31. * BSD LICENSE
  32. *
  33. * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
  34. * All rights reserved.
  35. *
  36. * Redistribution and use in source and binary forms, with or without
  37. * modification, are permitted provided that the following conditions
  38. * are met:
  39. *
  40. * * Redistributions of source code must retain the above copyright
  41. * notice, this list of conditions and the following disclaimer.
  42. * * Redistributions in binary form must reproduce the above copyright
  43. * notice, this list of conditions and the following disclaimer in
  44. * the documentation and/or other materials provided with the
  45. * distribution.
  46. * * Neither the name Intel Corporation nor the names of its
  47. * contributors may be used to endorse or promote products derived
  48. * from this software without specific prior written permission.
  49. *
  50. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  51. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  52. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  53. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  54. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  55. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  56. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  57. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  58. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  59. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  60. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  61. *
  62. *****************************************************************************/
  63. #include <linux/interrupt.h>
  64. #include "iwl-dev.h"
  65. #include "iwl-trans.h"
  66. #include "iwl-core.h"
  67. #include "iwl-helpers.h"
  68. #include "iwl-trans-int-pcie.h"
  69. /*TODO remove uneeded includes when the transport layer tx_free will be here */
  70. #include "iwl-agn.h"
  71. #include "iwl-core.h"
  72. #include "iwl-shared.h"
  73. static int iwl_trans_rx_alloc(struct iwl_priv *priv)
  74. {
  75. struct iwl_rx_queue *rxq = &priv->rxq;
  76. struct device *dev = priv->bus->dev;
  77. memset(&priv->rxq, 0, sizeof(priv->rxq));
  78. spin_lock_init(&rxq->lock);
  79. INIT_LIST_HEAD(&rxq->rx_free);
  80. INIT_LIST_HEAD(&rxq->rx_used);
  81. if (WARN_ON(rxq->bd || rxq->rb_stts))
  82. return -EINVAL;
  83. /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
  84. rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
  85. &rxq->bd_dma, GFP_KERNEL);
  86. if (!rxq->bd)
  87. goto err_bd;
  88. memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
  89. /*Allocate the driver's pointer to receive buffer status */
  90. rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
  91. &rxq->rb_stts_dma, GFP_KERNEL);
  92. if (!rxq->rb_stts)
  93. goto err_rb_stts;
  94. memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
  95. return 0;
  96. err_rb_stts:
  97. dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
  98. rxq->bd, rxq->bd_dma);
  99. memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
  100. rxq->bd = NULL;
  101. err_bd:
  102. return -ENOMEM;
  103. }
  104. static void iwl_trans_rxq_free_rx_bufs(struct iwl_priv *priv)
  105. {
  106. struct iwl_rx_queue *rxq = &priv->rxq;
  107. int i;
  108. /* Fill the rx_used queue with _all_ of the Rx buffers */
  109. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
  110. /* In the reset function, these buffers may have been allocated
  111. * to an SKB, so we need to unmap and free potential storage */
  112. if (rxq->pool[i].page != NULL) {
  113. dma_unmap_page(priv->bus->dev, rxq->pool[i].page_dma,
  114. PAGE_SIZE << hw_params(priv).rx_page_order,
  115. DMA_FROM_DEVICE);
  116. __iwl_free_pages(priv, rxq->pool[i].page);
  117. rxq->pool[i].page = NULL;
  118. }
  119. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  120. }
  121. }
  122. static void iwl_trans_rx_hw_init(struct iwl_priv *priv,
  123. struct iwl_rx_queue *rxq)
  124. {
  125. u32 rb_size;
  126. const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
  127. u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
  128. rb_timeout = RX_RB_TIMEOUT;
  129. if (iwlagn_mod_params.amsdu_size_8K)
  130. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
  131. else
  132. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
  133. /* Stop Rx DMA */
  134. iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  135. /* Reset driver's Rx queue write index */
  136. iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
  137. /* Tell device where to find RBD circular buffer in DRAM */
  138. iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
  139. (u32)(rxq->bd_dma >> 8));
  140. /* Tell device where in DRAM to update its Rx status */
  141. iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
  142. rxq->rb_stts_dma >> 4);
  143. /* Enable Rx DMA
  144. * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
  145. * the credit mechanism in 5000 HW RX FIFO
  146. * Direct rx interrupts to hosts
  147. * Rx buffer size 4 or 8k
  148. * RB timeout 0x10
  149. * 256 RBDs
  150. */
  151. iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
  152. FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
  153. FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
  154. FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
  155. FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
  156. rb_size|
  157. (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
  158. (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
  159. /* Set interrupt coalescing timer to default (2048 usecs) */
  160. iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
  161. }
  162. static int iwl_rx_init(struct iwl_priv *priv)
  163. {
  164. struct iwl_rx_queue *rxq = &priv->rxq;
  165. int i, err;
  166. unsigned long flags;
  167. if (!rxq->bd) {
  168. err = iwl_trans_rx_alloc(priv);
  169. if (err)
  170. return err;
  171. }
  172. spin_lock_irqsave(&rxq->lock, flags);
  173. INIT_LIST_HEAD(&rxq->rx_free);
  174. INIT_LIST_HEAD(&rxq->rx_used);
  175. iwl_trans_rxq_free_rx_bufs(priv);
  176. for (i = 0; i < RX_QUEUE_SIZE; i++)
  177. rxq->queue[i] = NULL;
  178. /* Set us so that we have processed and used all buffers, but have
  179. * not restocked the Rx queue with fresh buffers */
  180. rxq->read = rxq->write = 0;
  181. rxq->write_actual = 0;
  182. rxq->free_count = 0;
  183. spin_unlock_irqrestore(&rxq->lock, flags);
  184. iwlagn_rx_replenish(priv);
  185. iwl_trans_rx_hw_init(priv, rxq);
  186. spin_lock_irqsave(&priv->shrd->lock, flags);
  187. rxq->need_update = 1;
  188. iwl_rx_queue_update_write_ptr(priv, rxq);
  189. spin_unlock_irqrestore(&priv->shrd->lock, flags);
  190. return 0;
  191. }
  192. static void iwl_trans_pcie_rx_free(struct iwl_priv *priv)
  193. {
  194. struct iwl_rx_queue *rxq = &priv->rxq;
  195. unsigned long flags;
  196. /*if rxq->bd is NULL, it means that nothing has been allocated,
  197. * exit now */
  198. if (!rxq->bd) {
  199. IWL_DEBUG_INFO(priv, "Free NULL rx context\n");
  200. return;
  201. }
  202. spin_lock_irqsave(&rxq->lock, flags);
  203. iwl_trans_rxq_free_rx_bufs(priv);
  204. spin_unlock_irqrestore(&rxq->lock, flags);
  205. dma_free_coherent(priv->bus->dev, sizeof(__le32) * RX_QUEUE_SIZE,
  206. rxq->bd, rxq->bd_dma);
  207. memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
  208. rxq->bd = NULL;
  209. if (rxq->rb_stts)
  210. dma_free_coherent(priv->bus->dev,
  211. sizeof(struct iwl_rb_status),
  212. rxq->rb_stts, rxq->rb_stts_dma);
  213. else
  214. IWL_DEBUG_INFO(priv, "Free rxq->rb_stts which is NULL\n");
  215. memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
  216. rxq->rb_stts = NULL;
  217. }
  218. static int iwl_trans_rx_stop(struct iwl_priv *priv)
  219. {
  220. /* stop Rx DMA */
  221. iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  222. return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
  223. FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
  224. }
  225. static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
  226. struct iwl_dma_ptr *ptr, size_t size)
  227. {
  228. if (WARN_ON(ptr->addr))
  229. return -EINVAL;
  230. ptr->addr = dma_alloc_coherent(priv->bus->dev, size,
  231. &ptr->dma, GFP_KERNEL);
  232. if (!ptr->addr)
  233. return -ENOMEM;
  234. ptr->size = size;
  235. return 0;
  236. }
  237. static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
  238. struct iwl_dma_ptr *ptr)
  239. {
  240. if (unlikely(!ptr->addr))
  241. return;
  242. dma_free_coherent(priv->bus->dev, ptr->size, ptr->addr, ptr->dma);
  243. memset(ptr, 0, sizeof(*ptr));
  244. }
  245. static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq,
  246. int slots_num, u32 txq_id)
  247. {
  248. size_t tfd_sz = hw_params(priv).tfd_size * TFD_QUEUE_SIZE_MAX;
  249. int i;
  250. if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds))
  251. return -EINVAL;
  252. txq->q.n_window = slots_num;
  253. txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num,
  254. GFP_KERNEL);
  255. txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num,
  256. GFP_KERNEL);
  257. if (!txq->meta || !txq->cmd)
  258. goto error;
  259. for (i = 0; i < slots_num; i++) {
  260. txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
  261. GFP_KERNEL);
  262. if (!txq->cmd[i])
  263. goto error;
  264. }
  265. /* Alloc driver data array and TFD circular buffer */
  266. /* Driver private data, only for Tx (not command) queues,
  267. * not shared with device. */
  268. if (txq_id != priv->shrd->cmd_queue) {
  269. txq->txb = kzalloc(sizeof(txq->txb[0]) *
  270. TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
  271. if (!txq->txb) {
  272. IWL_ERR(priv, "kmalloc for auxiliary BD "
  273. "structures failed\n");
  274. goto error;
  275. }
  276. } else {
  277. txq->txb = NULL;
  278. }
  279. /* Circular buffer of transmit frame descriptors (TFDs),
  280. * shared with device */
  281. txq->tfds = dma_alloc_coherent(priv->bus->dev, tfd_sz, &txq->q.dma_addr,
  282. GFP_KERNEL);
  283. if (!txq->tfds) {
  284. IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
  285. goto error;
  286. }
  287. txq->q.id = txq_id;
  288. return 0;
  289. error:
  290. kfree(txq->txb);
  291. txq->txb = NULL;
  292. /* since txq->cmd has been zeroed,
  293. * all non allocated cmd[i] will be NULL */
  294. if (txq->cmd)
  295. for (i = 0; i < slots_num; i++)
  296. kfree(txq->cmd[i]);
  297. kfree(txq->meta);
  298. kfree(txq->cmd);
  299. txq->meta = NULL;
  300. txq->cmd = NULL;
  301. return -ENOMEM;
  302. }
  303. static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
  304. int slots_num, u32 txq_id)
  305. {
  306. int ret;
  307. txq->need_update = 0;
  308. memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
  309. /*
  310. * For the default queues 0-3, set up the swq_id
  311. * already -- all others need to get one later
  312. * (if they need one at all).
  313. */
  314. if (txq_id < 4)
  315. iwl_set_swq_id(txq, txq_id, txq_id);
  316. /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
  317. * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
  318. BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
  319. /* Initialize queue's high/low-water marks, and head/tail indexes */
  320. ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
  321. txq_id);
  322. if (ret)
  323. return ret;
  324. /*
  325. * Tell nic where to find circular buffer of Tx Frame Descriptors for
  326. * given Tx queue, and enable the DMA channel used for that queue.
  327. * Circular buffer (TFD queue in DRAM) physical base address */
  328. iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
  329. txq->q.dma_addr >> 8);
  330. return 0;
  331. }
  332. /**
  333. * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
  334. */
  335. static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
  336. {
  337. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  338. struct iwl_queue *q = &txq->q;
  339. if (!q->n_bd)
  340. return;
  341. while (q->write_ptr != q->read_ptr) {
  342. /* The read_ptr needs to bound by q->n_window */
  343. iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr));
  344. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
  345. }
  346. }
  347. /**
  348. * iwl_tx_queue_free - Deallocate DMA queue.
  349. * @txq: Transmit queue to deallocate.
  350. *
  351. * Empty queue by removing and destroying all BD's.
  352. * Free all buffers.
  353. * 0-fill, but do not free "txq" descriptor structure.
  354. */
  355. static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
  356. {
  357. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  358. struct device *dev = priv->bus->dev;
  359. int i;
  360. if (WARN_ON(!txq))
  361. return;
  362. iwl_tx_queue_unmap(priv, txq_id);
  363. /* De-alloc array of command/tx buffers */
  364. for (i = 0; i < txq->q.n_window; i++)
  365. kfree(txq->cmd[i]);
  366. /* De-alloc circular buffer of TFDs */
  367. if (txq->q.n_bd) {
  368. dma_free_coherent(dev, hw_params(priv).tfd_size *
  369. txq->q.n_bd, txq->tfds, txq->q.dma_addr);
  370. memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
  371. }
  372. /* De-alloc array of per-TFD driver data */
  373. kfree(txq->txb);
  374. txq->txb = NULL;
  375. /* deallocate arrays */
  376. kfree(txq->cmd);
  377. kfree(txq->meta);
  378. txq->cmd = NULL;
  379. txq->meta = NULL;
  380. /* 0-fill queue descriptor structure */
  381. memset(txq, 0, sizeof(*txq));
  382. }
  383. /**
  384. * iwl_trans_tx_free - Free TXQ Context
  385. *
  386. * Destroy all TX DMA queues and structures
  387. */
  388. static void iwl_trans_pcie_tx_free(struct iwl_priv *priv)
  389. {
  390. int txq_id;
  391. /* Tx queues */
  392. if (priv->txq) {
  393. for (txq_id = 0;
  394. txq_id < hw_params(priv).max_txq_num; txq_id++)
  395. iwl_tx_queue_free(priv, txq_id);
  396. }
  397. kfree(priv->txq);
  398. priv->txq = NULL;
  399. iwlagn_free_dma_ptr(priv, &priv->kw);
  400. iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
  401. }
  402. /**
  403. * iwl_trans_tx_alloc - allocate TX context
  404. * Allocate all Tx DMA structures and initialize them
  405. *
  406. * @param priv
  407. * @return error code
  408. */
  409. static int iwl_trans_tx_alloc(struct iwl_priv *priv)
  410. {
  411. int ret;
  412. int txq_id, slots_num;
  413. /*It is not allowed to alloc twice, so warn when this happens.
  414. * We cannot rely on the previous allocation, so free and fail */
  415. if (WARN_ON(priv->txq)) {
  416. ret = -EINVAL;
  417. goto error;
  418. }
  419. ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
  420. hw_params(priv).scd_bc_tbls_size);
  421. if (ret) {
  422. IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
  423. goto error;
  424. }
  425. /* Alloc keep-warm buffer */
  426. ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
  427. if (ret) {
  428. IWL_ERR(priv, "Keep Warm allocation failed\n");
  429. goto error;
  430. }
  431. priv->txq = kzalloc(sizeof(struct iwl_tx_queue) *
  432. priv->cfg->base_params->num_of_queues, GFP_KERNEL);
  433. if (!priv->txq) {
  434. IWL_ERR(priv, "Not enough memory for txq\n");
  435. ret = ENOMEM;
  436. goto error;
  437. }
  438. /* Alloc and init all Tx queues, including the command queue (#4/#9) */
  439. for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) {
  440. slots_num = (txq_id == priv->shrd->cmd_queue) ?
  441. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  442. ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num,
  443. txq_id);
  444. if (ret) {
  445. IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id);
  446. goto error;
  447. }
  448. }
  449. return 0;
  450. error:
  451. iwl_trans_tx_free(trans(priv));
  452. return ret;
  453. }
  454. static int iwl_tx_init(struct iwl_priv *priv)
  455. {
  456. int ret;
  457. int txq_id, slots_num;
  458. unsigned long flags;
  459. bool alloc = false;
  460. if (!priv->txq) {
  461. ret = iwl_trans_tx_alloc(priv);
  462. if (ret)
  463. goto error;
  464. alloc = true;
  465. }
  466. spin_lock_irqsave(&priv->shrd->lock, flags);
  467. /* Turn off all Tx DMA fifos */
  468. iwl_write_prph(priv, SCD_TXFACT, 0);
  469. /* Tell NIC where to find the "keep warm" buffer */
  470. iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
  471. spin_unlock_irqrestore(&priv->shrd->lock, flags);
  472. /* Alloc and init all Tx queues, including the command queue (#4/#9) */
  473. for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++) {
  474. slots_num = (txq_id == priv->shrd->cmd_queue) ?
  475. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  476. ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num,
  477. txq_id);
  478. if (ret) {
  479. IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
  480. goto error;
  481. }
  482. }
  483. return 0;
  484. error:
  485. /*Upon error, free only if we allocated something */
  486. if (alloc)
  487. iwl_trans_tx_free(trans(priv));
  488. return ret;
  489. }
  490. static void iwl_set_pwr_vmain(struct iwl_priv *priv)
  491. {
  492. /*
  493. * (for documentation purposes)
  494. * to set power to V_AUX, do:
  495. if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
  496. iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
  497. APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
  498. ~APMG_PS_CTRL_MSK_PWR_SRC);
  499. */
  500. iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
  501. APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
  502. ~APMG_PS_CTRL_MSK_PWR_SRC);
  503. }
  504. static int iwl_nic_init(struct iwl_priv *priv)
  505. {
  506. unsigned long flags;
  507. /* nic_init */
  508. spin_lock_irqsave(&priv->shrd->lock, flags);
  509. iwl_apm_init(priv);
  510. /* Set interrupt coalescing calibration timer to default (512 usecs) */
  511. iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
  512. spin_unlock_irqrestore(&priv->shrd->lock, flags);
  513. iwl_set_pwr_vmain(priv);
  514. priv->cfg->lib->nic_config(priv);
  515. /* Allocate the RX queue, or reset if it is already allocated */
  516. iwl_rx_init(priv);
  517. /* Allocate or reset and init all Tx and Command queues */
  518. if (iwl_tx_init(priv))
  519. return -ENOMEM;
  520. if (priv->cfg->base_params->shadow_reg_enable) {
  521. /* enable shadow regs in HW */
  522. iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
  523. 0x800FFFFF);
  524. }
  525. set_bit(STATUS_INIT, &priv->shrd->status);
  526. return 0;
  527. }
  528. #define HW_READY_TIMEOUT (50)
  529. /* Note: returns poll_bit return value, which is >= 0 if success */
  530. static int iwl_set_hw_ready(struct iwl_priv *priv)
  531. {
  532. int ret;
  533. iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
  534. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
  535. /* See if we got it */
  536. ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
  537. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
  538. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
  539. HW_READY_TIMEOUT);
  540. IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : "");
  541. return ret;
  542. }
  543. /* Note: returns standard 0/-ERROR code */
  544. static int iwl_trans_pcie_prepare_card_hw(struct iwl_priv *priv)
  545. {
  546. int ret;
  547. IWL_DEBUG_INFO(priv, "iwl_trans_prepare_card_hw enter\n");
  548. ret = iwl_set_hw_ready(priv);
  549. if (ret >= 0)
  550. return 0;
  551. /* If HW is not ready, prepare the conditions to check again */
  552. iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
  553. CSR_HW_IF_CONFIG_REG_PREPARE);
  554. ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
  555. ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
  556. CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
  557. if (ret < 0)
  558. return ret;
  559. /* HW should be ready by now, check again. */
  560. ret = iwl_set_hw_ready(priv);
  561. if (ret >= 0)
  562. return 0;
  563. return ret;
  564. }
  565. static int iwl_trans_pcie_start_device(struct iwl_priv *priv)
  566. {
  567. int ret;
  568. priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
  569. if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
  570. iwl_trans_pcie_prepare_card_hw(priv)) {
  571. IWL_WARN(priv, "Exit HW not ready\n");
  572. return -EIO;
  573. }
  574. /* If platform's RF_KILL switch is NOT set to KILL */
  575. if (iwl_read32(priv, CSR_GP_CNTRL) &
  576. CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
  577. clear_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
  578. else
  579. set_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
  580. if (iwl_is_rfkill(priv)) {
  581. wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
  582. iwl_enable_interrupts(priv);
  583. return -ERFKILL;
  584. }
  585. iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
  586. ret = iwl_nic_init(priv);
  587. if (ret) {
  588. IWL_ERR(priv, "Unable to init nic\n");
  589. return ret;
  590. }
  591. /* make sure rfkill handshake bits are cleared */
  592. iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  593. iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
  594. CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
  595. /* clear (again), then enable host interrupts */
  596. iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
  597. iwl_enable_interrupts(priv);
  598. /* really make sure rfkill handshake bits are cleared */
  599. iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  600. iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  601. return 0;
  602. }
  603. /*
  604. * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
  605. * must be called under priv->shrd->lock and mac access
  606. */
  607. static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask)
  608. {
  609. iwl_write_prph(priv, SCD_TXFACT, mask);
  610. }
  611. #define IWL_AC_UNSET -1
  612. struct queue_to_fifo_ac {
  613. s8 fifo, ac;
  614. };
  615. static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
  616. { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
  617. { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
  618. { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
  619. { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
  620. { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
  621. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  622. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  623. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  624. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  625. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  626. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  627. };
  628. static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
  629. { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
  630. { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
  631. { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
  632. { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
  633. { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
  634. { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
  635. { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
  636. { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
  637. { IWL_TX_FIFO_BE_IPAN, 2, },
  638. { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
  639. { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
  640. };
  641. static void iwl_trans_pcie_tx_start(struct iwl_priv *priv)
  642. {
  643. const struct queue_to_fifo_ac *queue_to_fifo;
  644. struct iwl_rxon_context *ctx;
  645. u32 a;
  646. unsigned long flags;
  647. int i, chan;
  648. u32 reg_val;
  649. spin_lock_irqsave(&priv->shrd->lock, flags);
  650. priv->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR);
  651. a = priv->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
  652. /* reset conext data memory */
  653. for (; a < priv->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
  654. a += 4)
  655. iwl_write_targ_mem(priv, a, 0);
  656. /* reset tx status memory */
  657. for (; a < priv->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
  658. a += 4)
  659. iwl_write_targ_mem(priv, a, 0);
  660. for (; a < priv->scd_base_addr +
  661. SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(priv).max_txq_num);
  662. a += 4)
  663. iwl_write_targ_mem(priv, a, 0);
  664. iwl_write_prph(priv, SCD_DRAM_BASE_ADDR,
  665. priv->scd_bc_tbls.dma >> 10);
  666. /* Enable DMA channel */
  667. for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
  668. iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
  669. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
  670. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
  671. /* Update FH chicken bits */
  672. reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
  673. iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
  674. reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
  675. iwl_write_prph(priv, SCD_QUEUECHAIN_SEL,
  676. SCD_QUEUECHAIN_SEL_ALL(priv));
  677. iwl_write_prph(priv, SCD_AGGR_SEL, 0);
  678. /* initiate the queues */
  679. for (i = 0; i < hw_params(priv).max_txq_num; i++) {
  680. iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0);
  681. iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
  682. iwl_write_targ_mem(priv, priv->scd_base_addr +
  683. SCD_CONTEXT_QUEUE_OFFSET(i), 0);
  684. iwl_write_targ_mem(priv, priv->scd_base_addr +
  685. SCD_CONTEXT_QUEUE_OFFSET(i) +
  686. sizeof(u32),
  687. ((SCD_WIN_SIZE <<
  688. SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
  689. SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
  690. ((SCD_FRAME_LIMIT <<
  691. SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
  692. SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
  693. }
  694. iwl_write_prph(priv, SCD_INTERRUPT_MASK,
  695. IWL_MASK(0, hw_params(priv).max_txq_num));
  696. /* Activate all Tx DMA/FIFO channels */
  697. iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7));
  698. /* map queues to FIFOs */
  699. if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
  700. queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
  701. else
  702. queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
  703. iwl_trans_set_wr_ptrs(priv, priv->shrd->cmd_queue, 0);
  704. /* make sure all queue are not stopped */
  705. memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
  706. for (i = 0; i < 4; i++)
  707. atomic_set(&priv->queue_stop_count[i], 0);
  708. for_each_context(priv, ctx)
  709. ctx->last_tx_rejected = false;
  710. /* reset to 0 to enable all the queue first */
  711. priv->txq_ctx_active_msk = 0;
  712. BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) !=
  713. IWLAGN_FIRST_AMPDU_QUEUE);
  714. BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) !=
  715. IWLAGN_FIRST_AMPDU_QUEUE);
  716. for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
  717. int fifo = queue_to_fifo[i].fifo;
  718. int ac = queue_to_fifo[i].ac;
  719. iwl_txq_ctx_activate(priv, i);
  720. if (fifo == IWL_TX_FIFO_UNUSED)
  721. continue;
  722. if (ac != IWL_AC_UNSET)
  723. iwl_set_swq_id(&priv->txq[i], ac, i);
  724. iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
  725. }
  726. spin_unlock_irqrestore(&priv->shrd->lock, flags);
  727. /* Enable L1-Active */
  728. iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
  729. APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
  730. }
  731. /**
  732. * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
  733. */
  734. static int iwl_trans_tx_stop(struct iwl_priv *priv)
  735. {
  736. int ch, txq_id;
  737. unsigned long flags;
  738. /* Turn off all Tx DMA fifos */
  739. spin_lock_irqsave(&priv->shrd->lock, flags);
  740. iwl_trans_txq_set_sched(priv, 0);
  741. /* Stop each Tx DMA channel, and wait for it to be idle */
  742. for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
  743. iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
  744. if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
  745. FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
  746. 1000))
  747. IWL_ERR(priv, "Failing on timeout while stopping"
  748. " DMA channel %d [0x%08x]", ch,
  749. iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
  750. }
  751. spin_unlock_irqrestore(&priv->shrd->lock, flags);
  752. if (!priv->txq) {
  753. IWL_WARN(priv, "Stopping tx queues that aren't allocated...");
  754. return 0;
  755. }
  756. /* Unmap DMA from host system and free skb's */
  757. for (txq_id = 0; txq_id < hw_params(priv).max_txq_num; txq_id++)
  758. iwl_tx_queue_unmap(priv, txq_id);
  759. return 0;
  760. }
  761. static void iwl_trans_pcie_stop_device(struct iwl_priv *priv)
  762. {
  763. unsigned long flags;
  764. /* stop and reset the on-board processor */
  765. iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
  766. /* tell the device to stop sending interrupts */
  767. spin_lock_irqsave(&priv->shrd->lock, flags);
  768. iwl_disable_interrupts(priv);
  769. spin_unlock_irqrestore(&priv->shrd->lock, flags);
  770. iwl_trans_sync_irq(trans(priv));
  771. /* device going down, Stop using ICT table */
  772. iwl_disable_ict(priv);
  773. /*
  774. * If a HW restart happens during firmware loading,
  775. * then the firmware loading might call this function
  776. * and later it might be called again due to the
  777. * restart. So don't process again if the device is
  778. * already dead.
  779. */
  780. if (test_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status)) {
  781. iwl_trans_tx_stop(priv);
  782. iwl_trans_rx_stop(priv);
  783. /* Power-down device's busmaster DMA clocks */
  784. iwl_write_prph(priv, APMG_CLK_DIS_REG,
  785. APMG_CLK_VAL_DMA_CLK_RQT);
  786. udelay(5);
  787. }
  788. /* Make sure (redundant) we've released our request to stay awake */
  789. iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  790. /* Stop the device, and put it in low power state */
  791. iwl_apm_stop(priv);
  792. }
  793. static struct iwl_tx_cmd *iwl_trans_pcie_get_tx_cmd(struct iwl_priv *priv,
  794. int txq_id)
  795. {
  796. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  797. struct iwl_queue *q = &txq->q;
  798. struct iwl_device_cmd *dev_cmd;
  799. if (unlikely(iwl_queue_space(q) < q->high_mark))
  800. return NULL;
  801. /*
  802. * Set up the Tx-command (not MAC!) header.
  803. * Store the chosen Tx queue and TFD index within the sequence field;
  804. * after Tx, uCode's Tx response will return this value so driver can
  805. * locate the frame within the tx queue and do post-tx processing.
  806. */
  807. dev_cmd = txq->cmd[q->write_ptr];
  808. memset(dev_cmd, 0, sizeof(*dev_cmd));
  809. dev_cmd->hdr.cmd = REPLY_TX;
  810. dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
  811. INDEX_TO_SEQ(q->write_ptr)));
  812. return &dev_cmd->cmd.tx;
  813. }
  814. static int iwl_trans_pcie_tx(struct iwl_priv *priv, struct sk_buff *skb,
  815. struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
  816. struct iwl_rxon_context *ctx)
  817. {
  818. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  819. struct iwl_queue *q = &txq->q;
  820. struct iwl_device_cmd *dev_cmd = txq->cmd[q->write_ptr];
  821. struct iwl_cmd_meta *out_meta;
  822. dma_addr_t phys_addr = 0;
  823. dma_addr_t txcmd_phys;
  824. dma_addr_t scratch_phys;
  825. u16 len, firstlen, secondlen;
  826. u8 wait_write_ptr = 0;
  827. u8 hdr_len = ieee80211_hdrlen(fc);
  828. /* Set up driver data for this TFD */
  829. memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
  830. txq->txb[q->write_ptr].skb = skb;
  831. txq->txb[q->write_ptr].ctx = ctx;
  832. /* Set up first empty entry in queue's array of Tx/cmd buffers */
  833. out_meta = &txq->meta[q->write_ptr];
  834. /*
  835. * Use the first empty entry in this queue's command buffer array
  836. * to contain the Tx command and MAC header concatenated together
  837. * (payload data will be in another buffer).
  838. * Size of this varies, due to varying MAC header length.
  839. * If end is not dword aligned, we'll have 2 extra bytes at the end
  840. * of the MAC header (device reads on dword boundaries).
  841. * We'll tell device about this padding later.
  842. */
  843. len = sizeof(struct iwl_tx_cmd) +
  844. sizeof(struct iwl_cmd_header) + hdr_len;
  845. firstlen = (len + 3) & ~3;
  846. /* Tell NIC about any 2-byte padding after MAC header */
  847. if (firstlen != len)
  848. tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
  849. /* Physical address of this Tx command's header (not MAC header!),
  850. * within command buffer array. */
  851. txcmd_phys = dma_map_single(priv->bus->dev,
  852. &dev_cmd->hdr, firstlen,
  853. DMA_BIDIRECTIONAL);
  854. if (unlikely(dma_mapping_error(priv->bus->dev, txcmd_phys)))
  855. return -1;
  856. dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
  857. dma_unmap_len_set(out_meta, len, firstlen);
  858. if (!ieee80211_has_morefrags(fc)) {
  859. txq->need_update = 1;
  860. } else {
  861. wait_write_ptr = 1;
  862. txq->need_update = 0;
  863. }
  864. /* Set up TFD's 2nd entry to point directly to remainder of skb,
  865. * if any (802.11 null frames have no payload). */
  866. secondlen = skb->len - hdr_len;
  867. if (secondlen > 0) {
  868. phys_addr = dma_map_single(priv->bus->dev, skb->data + hdr_len,
  869. secondlen, DMA_TO_DEVICE);
  870. if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
  871. dma_unmap_single(priv->bus->dev,
  872. dma_unmap_addr(out_meta, mapping),
  873. dma_unmap_len(out_meta, len),
  874. DMA_BIDIRECTIONAL);
  875. return -1;
  876. }
  877. }
  878. /* Attach buffers to TFD */
  879. iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1);
  880. if (secondlen > 0)
  881. iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
  882. secondlen, 0);
  883. scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
  884. offsetof(struct iwl_tx_cmd, scratch);
  885. /* take back ownership of DMA buffer to enable update */
  886. dma_sync_single_for_cpu(priv->bus->dev, txcmd_phys, firstlen,
  887. DMA_BIDIRECTIONAL);
  888. tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
  889. tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
  890. IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
  891. le16_to_cpu(dev_cmd->hdr.sequence));
  892. IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
  893. iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
  894. iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
  895. /* Set up entry for this TFD in Tx byte-count array */
  896. if (ampdu)
  897. iwl_trans_txq_update_byte_cnt_tbl(priv, txq,
  898. le16_to_cpu(tx_cmd->len));
  899. dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen,
  900. DMA_BIDIRECTIONAL);
  901. trace_iwlwifi_dev_tx(priv,
  902. &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
  903. sizeof(struct iwl_tfd),
  904. &dev_cmd->hdr, firstlen,
  905. skb->data + hdr_len, secondlen);
  906. /* Tell device the write index *just past* this latest filled TFD */
  907. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  908. iwl_txq_update_write_ptr(priv, txq);
  909. /*
  910. * At this point the frame is "transmitted" successfully
  911. * and we will get a TX status notification eventually,
  912. * regardless of the value of ret. "ret" only indicates
  913. * whether or not we should update the write pointer.
  914. */
  915. if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
  916. if (wait_write_ptr) {
  917. txq->need_update = 1;
  918. iwl_txq_update_write_ptr(priv, txq);
  919. } else {
  920. iwl_stop_queue(priv, txq);
  921. }
  922. }
  923. return 0;
  924. }
  925. static void iwl_trans_pcie_kick_nic(struct iwl_priv *priv)
  926. {
  927. /* Remove all resets to allow NIC to operate */
  928. iwl_write32(priv, CSR_RESET, 0);
  929. }
  930. static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
  931. {
  932. struct iwl_priv *priv = priv(trans);
  933. int err;
  934. tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
  935. iwl_irq_tasklet, (unsigned long)priv);
  936. iwl_alloc_isr_ict(priv);
  937. err = request_irq(bus(trans)->irq, iwl_isr_ict, IRQF_SHARED,
  938. DRV_NAME, priv);
  939. if (err) {
  940. IWL_ERR(priv, "Error allocating IRQ %d\n", priv->bus->irq);
  941. iwl_free_isr_ict(priv);
  942. return err;
  943. }
  944. INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
  945. return 0;
  946. }
  947. static void iwl_trans_pcie_sync_irq(struct iwl_priv *priv)
  948. {
  949. /* wait to make sure we flush pending tasklet*/
  950. synchronize_irq(priv->bus->irq);
  951. tasklet_kill(&priv->irq_tasklet);
  952. }
  953. static void iwl_trans_pcie_free(struct iwl_priv *priv)
  954. {
  955. free_irq(priv->bus->irq, priv);
  956. iwl_free_isr_ict(priv);
  957. kfree(trans(priv));
  958. trans(priv) = NULL;
  959. }
  960. const struct iwl_trans_ops trans_ops_pcie;
  961. static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
  962. {
  963. struct iwl_trans *iwl_trans = kzalloc(sizeof(struct iwl_trans) +
  964. sizeof(struct iwl_trans_pcie),
  965. GFP_KERNEL);
  966. if (iwl_trans) {
  967. iwl_trans->ops = &trans_ops_pcie;
  968. iwl_trans->shrd = shrd;
  969. }
  970. return iwl_trans;
  971. }
  972. const struct iwl_trans_ops trans_ops_pcie = {
  973. .alloc = iwl_trans_pcie_alloc,
  974. .request_irq = iwl_trans_pcie_request_irq,
  975. .start_device = iwl_trans_pcie_start_device,
  976. .prepare_card_hw = iwl_trans_pcie_prepare_card_hw,
  977. .stop_device = iwl_trans_pcie_stop_device,
  978. .tx_start = iwl_trans_pcie_tx_start,
  979. .rx_free = iwl_trans_pcie_rx_free,
  980. .tx_free = iwl_trans_pcie_tx_free,
  981. .send_cmd = iwl_trans_pcie_send_cmd,
  982. .send_cmd_pdu = iwl_trans_pcie_send_cmd_pdu,
  983. .get_tx_cmd = iwl_trans_pcie_get_tx_cmd,
  984. .tx = iwl_trans_pcie_tx,
  985. .txq_agg_disable = iwl_trans_pcie_txq_agg_disable,
  986. .txq_agg_setup = iwl_trans_pcie_txq_agg_setup,
  987. .kick_nic = iwl_trans_pcie_kick_nic,
  988. .sync_irq = iwl_trans_pcie_sync_irq,
  989. .free = iwl_trans_pcie_free,
  990. };