iwl-trans.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of version 2 of the GNU General Public License as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  22. * USA
  23. *
  24. * The full GNU General Public License is included in this distribution
  25. * in the file called LICENSE.GPL.
  26. *
  27. * Contact Information:
  28. * Intel Linux Wireless <ilw@linux.intel.com>
  29. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  30. *
  31. * BSD LICENSE
  32. *
  33. * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
  34. * All rights reserved.
  35. *
  36. * Redistribution and use in source and binary forms, with or without
  37. * modification, are permitted provided that the following conditions
  38. * are met:
  39. *
  40. * * Redistributions of source code must retain the above copyright
  41. * notice, this list of conditions and the following disclaimer.
  42. * * Redistributions in binary form must reproduce the above copyright
  43. * notice, this list of conditions and the following disclaimer in
  44. * the documentation and/or other materials provided with the
  45. * distribution.
  46. * * Neither the name Intel Corporation nor the names of its
  47. * contributors may be used to endorse or promote products derived
  48. * from this software without specific prior written permission.
  49. *
  50. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  51. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  52. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  53. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  54. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  55. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  56. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  57. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  58. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  59. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  60. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  61. *
  62. *****************************************************************************/
  63. #include "iwl-dev.h"
  64. #include "iwl-trans.h"
  65. #include "iwl-core.h"
  66. #include "iwl-helpers.h"
  67. #include "iwl-trans-int-pcie.h"
  68. /*TODO remove uneeded includes when the transport layer tx_free will be here */
  69. #include "iwl-agn.h"
  70. #include "iwl-core.h"
  71. static int iwl_trans_rx_alloc(struct iwl_priv *priv)
  72. {
  73. struct iwl_rx_queue *rxq = &priv->rxq;
  74. struct device *dev = priv->bus->dev;
  75. memset(&priv->rxq, 0, sizeof(priv->rxq));
  76. spin_lock_init(&rxq->lock);
  77. INIT_LIST_HEAD(&rxq->rx_free);
  78. INIT_LIST_HEAD(&rxq->rx_used);
  79. if (WARN_ON(rxq->bd || rxq->rb_stts))
  80. return -EINVAL;
  81. /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
  82. rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
  83. &rxq->bd_dma, GFP_KERNEL);
  84. if (!rxq->bd)
  85. goto err_bd;
  86. memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
  87. /*Allocate the driver's pointer to receive buffer status */
  88. rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
  89. &rxq->rb_stts_dma, GFP_KERNEL);
  90. if (!rxq->rb_stts)
  91. goto err_rb_stts;
  92. memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
  93. return 0;
  94. err_rb_stts:
  95. dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
  96. rxq->bd, rxq->bd_dma);
  97. memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
  98. rxq->bd = NULL;
  99. err_bd:
  100. return -ENOMEM;
  101. }
  102. static void iwl_trans_rxq_free_rx_bufs(struct iwl_priv *priv)
  103. {
  104. struct iwl_rx_queue *rxq = &priv->rxq;
  105. int i;
  106. /* Fill the rx_used queue with _all_ of the Rx buffers */
  107. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
  108. /* In the reset function, these buffers may have been allocated
  109. * to an SKB, so we need to unmap and free potential storage */
  110. if (rxq->pool[i].page != NULL) {
  111. dma_unmap_page(priv->bus->dev, rxq->pool[i].page_dma,
  112. PAGE_SIZE << priv->hw_params.rx_page_order,
  113. DMA_FROM_DEVICE);
  114. __iwl_free_pages(priv, rxq->pool[i].page);
  115. rxq->pool[i].page = NULL;
  116. }
  117. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  118. }
  119. }
  120. static void iwl_trans_rx_hw_init(struct iwl_priv *priv,
  121. struct iwl_rx_queue *rxq)
  122. {
  123. u32 rb_size;
  124. const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
  125. u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
  126. rb_timeout = RX_RB_TIMEOUT;
  127. if (iwlagn_mod_params.amsdu_size_8K)
  128. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
  129. else
  130. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
  131. /* Stop Rx DMA */
  132. iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  133. /* Reset driver's Rx queue write index */
  134. iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
  135. /* Tell device where to find RBD circular buffer in DRAM */
  136. iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
  137. (u32)(rxq->bd_dma >> 8));
  138. /* Tell device where in DRAM to update its Rx status */
  139. iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
  140. rxq->rb_stts_dma >> 4);
  141. /* Enable Rx DMA
  142. * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
  143. * the credit mechanism in 5000 HW RX FIFO
  144. * Direct rx interrupts to hosts
  145. * Rx buffer size 4 or 8k
  146. * RB timeout 0x10
  147. * 256 RBDs
  148. */
  149. iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
  150. FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
  151. FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
  152. FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
  153. FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
  154. rb_size|
  155. (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
  156. (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
  157. /* Set interrupt coalescing timer to default (2048 usecs) */
  158. iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
  159. }
  160. static int iwl_rx_init(struct iwl_priv *priv)
  161. {
  162. struct iwl_rx_queue *rxq = &priv->rxq;
  163. int i, err;
  164. unsigned long flags;
  165. if (!rxq->bd) {
  166. err = iwl_trans_rx_alloc(priv);
  167. if (err)
  168. return err;
  169. }
  170. spin_lock_irqsave(&rxq->lock, flags);
  171. INIT_LIST_HEAD(&rxq->rx_free);
  172. INIT_LIST_HEAD(&rxq->rx_used);
  173. iwl_trans_rxq_free_rx_bufs(priv);
  174. for (i = 0; i < RX_QUEUE_SIZE; i++)
  175. rxq->queue[i] = NULL;
  176. /* Set us so that we have processed and used all buffers, but have
  177. * not restocked the Rx queue with fresh buffers */
  178. rxq->read = rxq->write = 0;
  179. rxq->write_actual = 0;
  180. rxq->free_count = 0;
  181. spin_unlock_irqrestore(&rxq->lock, flags);
  182. iwlagn_rx_replenish(priv);
  183. iwl_trans_rx_hw_init(priv, rxq);
  184. spin_lock_irqsave(&priv->lock, flags);
  185. rxq->need_update = 1;
  186. iwl_rx_queue_update_write_ptr(priv, rxq);
  187. spin_unlock_irqrestore(&priv->lock, flags);
  188. return 0;
  189. }
  190. static void iwl_trans_rx_free(struct iwl_priv *priv)
  191. {
  192. struct iwl_rx_queue *rxq = &priv->rxq;
  193. unsigned long flags;
  194. /*if rxq->bd is NULL, it means that nothing has been allocated,
  195. * exit now */
  196. if (!rxq->bd) {
  197. IWL_DEBUG_INFO(priv, "Free NULL rx context\n");
  198. return;
  199. }
  200. spin_lock_irqsave(&rxq->lock, flags);
  201. iwl_trans_rxq_free_rx_bufs(priv);
  202. spin_unlock_irqrestore(&rxq->lock, flags);
  203. dma_free_coherent(priv->bus->dev, sizeof(__le32) * RX_QUEUE_SIZE,
  204. rxq->bd, rxq->bd_dma);
  205. memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
  206. rxq->bd = NULL;
  207. if (rxq->rb_stts)
  208. dma_free_coherent(priv->bus->dev,
  209. sizeof(struct iwl_rb_status),
  210. rxq->rb_stts, rxq->rb_stts_dma);
  211. else
  212. IWL_DEBUG_INFO(priv, "Free rxq->rb_stts which is NULL\n");
  213. memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
  214. rxq->rb_stts = NULL;
  215. }
  216. static int iwl_trans_rx_stop(struct iwl_priv *priv)
  217. {
  218. /* stop Rx DMA */
  219. iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  220. return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
  221. FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
  222. }
  223. static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
  224. struct iwl_dma_ptr *ptr, size_t size)
  225. {
  226. if (WARN_ON(ptr->addr))
  227. return -EINVAL;
  228. ptr->addr = dma_alloc_coherent(priv->bus->dev, size,
  229. &ptr->dma, GFP_KERNEL);
  230. if (!ptr->addr)
  231. return -ENOMEM;
  232. ptr->size = size;
  233. return 0;
  234. }
  235. static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
  236. struct iwl_dma_ptr *ptr)
  237. {
  238. if (unlikely(!ptr->addr))
  239. return;
  240. dma_free_coherent(priv->bus->dev, ptr->size, ptr->addr, ptr->dma);
  241. memset(ptr, 0, sizeof(*ptr));
  242. }
  243. static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq,
  244. int slots_num, u32 txq_id)
  245. {
  246. size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
  247. int i;
  248. if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds))
  249. return -EINVAL;
  250. txq->q.n_window = slots_num;
  251. txq->meta = kzalloc(sizeof(txq->meta[0]) * slots_num,
  252. GFP_KERNEL);
  253. txq->cmd = kzalloc(sizeof(txq->cmd[0]) * slots_num,
  254. GFP_KERNEL);
  255. if (!txq->meta || !txq->cmd)
  256. goto error;
  257. for (i = 0; i < slots_num; i++) {
  258. txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
  259. GFP_KERNEL);
  260. if (!txq->cmd[i])
  261. goto error;
  262. }
  263. /* Alloc driver data array and TFD circular buffer */
  264. /* Driver private data, only for Tx (not command) queues,
  265. * not shared with device. */
  266. if (txq_id != priv->cmd_queue) {
  267. txq->txb = kzalloc(sizeof(txq->txb[0]) *
  268. TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
  269. if (!txq->txb) {
  270. IWL_ERR(priv, "kmalloc for auxiliary BD "
  271. "structures failed\n");
  272. goto error;
  273. }
  274. } else {
  275. txq->txb = NULL;
  276. }
  277. /* Circular buffer of transmit frame descriptors (TFDs),
  278. * shared with device */
  279. txq->tfds = dma_alloc_coherent(priv->bus->dev, tfd_sz, &txq->q.dma_addr,
  280. GFP_KERNEL);
  281. if (!txq->tfds) {
  282. IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
  283. goto error;
  284. }
  285. txq->q.id = txq_id;
  286. return 0;
  287. error:
  288. kfree(txq->txb);
  289. txq->txb = NULL;
  290. /* since txq->cmd has been zeroed,
  291. * all non allocated cmd[i] will be NULL */
  292. if (txq->cmd)
  293. for (i = 0; i < slots_num; i++)
  294. kfree(txq->cmd[i]);
  295. kfree(txq->meta);
  296. kfree(txq->cmd);
  297. txq->meta = NULL;
  298. txq->cmd = NULL;
  299. return -ENOMEM;
  300. }
  301. static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
  302. int slots_num, u32 txq_id)
  303. {
  304. int ret;
  305. txq->need_update = 0;
  306. memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
  307. /*
  308. * For the default queues 0-3, set up the swq_id
  309. * already -- all others need to get one later
  310. * (if they need one at all).
  311. */
  312. if (txq_id < 4)
  313. iwl_set_swq_id(txq, txq_id, txq_id);
  314. /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
  315. * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
  316. BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
  317. /* Initialize queue's high/low-water marks, and head/tail indexes */
  318. ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
  319. txq_id);
  320. if (ret)
  321. return ret;
  322. /*
  323. * Tell nic where to find circular buffer of Tx Frame Descriptors for
  324. * given Tx queue, and enable the DMA channel used for that queue.
  325. * Circular buffer (TFD queue in DRAM) physical base address */
  326. iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
  327. txq->q.dma_addr >> 8);
  328. return 0;
  329. }
  330. /**
  331. * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
  332. */
  333. static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
  334. {
  335. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  336. struct iwl_queue *q = &txq->q;
  337. if (!q->n_bd)
  338. return;
  339. while (q->write_ptr != q->read_ptr) {
  340. /* The read_ptr needs to bound by q->n_window */
  341. iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr));
  342. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
  343. }
  344. }
  345. /**
  346. * iwl_tx_queue_free - Deallocate DMA queue.
  347. * @txq: Transmit queue to deallocate.
  348. *
  349. * Empty queue by removing and destroying all BD's.
  350. * Free all buffers.
  351. * 0-fill, but do not free "txq" descriptor structure.
  352. */
  353. static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
  354. {
  355. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  356. struct device *dev = priv->bus->dev;
  357. int i;
  358. if (WARN_ON(!txq))
  359. return;
  360. iwl_tx_queue_unmap(priv, txq_id);
  361. /* De-alloc array of command/tx buffers */
  362. for (i = 0; i < txq->q.n_window; i++)
  363. kfree(txq->cmd[i]);
  364. /* De-alloc circular buffer of TFDs */
  365. if (txq->q.n_bd) {
  366. dma_free_coherent(dev, priv->hw_params.tfd_size *
  367. txq->q.n_bd, txq->tfds, txq->q.dma_addr);
  368. memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
  369. }
  370. /* De-alloc array of per-TFD driver data */
  371. kfree(txq->txb);
  372. txq->txb = NULL;
  373. /* deallocate arrays */
  374. kfree(txq->cmd);
  375. kfree(txq->meta);
  376. txq->cmd = NULL;
  377. txq->meta = NULL;
  378. /* 0-fill queue descriptor structure */
  379. memset(txq, 0, sizeof(*txq));
  380. }
  381. /**
  382. * iwl_trans_tx_free - Free TXQ Context
  383. *
  384. * Destroy all TX DMA queues and structures
  385. */
  386. static void iwl_trans_tx_free(struct iwl_priv *priv)
  387. {
  388. int txq_id;
  389. /* Tx queues */
  390. if (priv->txq) {
  391. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
  392. iwl_tx_queue_free(priv, txq_id);
  393. }
  394. kfree(priv->txq);
  395. priv->txq = NULL;
  396. iwlagn_free_dma_ptr(priv, &priv->kw);
  397. iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
  398. }
  399. /**
  400. * iwl_trans_tx_alloc - allocate TX context
  401. * Allocate all Tx DMA structures and initialize them
  402. *
  403. * @param priv
  404. * @return error code
  405. */
  406. static int iwl_trans_tx_alloc(struct iwl_priv *priv)
  407. {
  408. int ret;
  409. int txq_id, slots_num;
  410. /*It is not allowed to alloc twice, so warn when this happens.
  411. * We cannot rely on the previous allocation, so free and fail */
  412. if (WARN_ON(priv->txq)) {
  413. ret = -EINVAL;
  414. goto error;
  415. }
  416. ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
  417. priv->hw_params.scd_bc_tbls_size);
  418. if (ret) {
  419. IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
  420. goto error;
  421. }
  422. /* Alloc keep-warm buffer */
  423. ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
  424. if (ret) {
  425. IWL_ERR(priv, "Keep Warm allocation failed\n");
  426. goto error;
  427. }
  428. priv->txq = kzalloc(sizeof(struct iwl_tx_queue) *
  429. priv->cfg->base_params->num_of_queues, GFP_KERNEL);
  430. if (!priv->txq) {
  431. IWL_ERR(priv, "Not enough memory for txq\n");
  432. ret = ENOMEM;
  433. goto error;
  434. }
  435. /* Alloc and init all Tx queues, including the command queue (#4/#9) */
  436. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
  437. slots_num = (txq_id == priv->cmd_queue) ?
  438. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  439. ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num,
  440. txq_id);
  441. if (ret) {
  442. IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id);
  443. goto error;
  444. }
  445. }
  446. return 0;
  447. error:
  448. trans_tx_free(&priv->trans);
  449. return ret;
  450. }
  451. static int iwl_tx_init(struct iwl_priv *priv)
  452. {
  453. int ret;
  454. int txq_id, slots_num;
  455. unsigned long flags;
  456. bool alloc = false;
  457. if (!priv->txq) {
  458. ret = iwl_trans_tx_alloc(priv);
  459. if (ret)
  460. goto error;
  461. alloc = true;
  462. }
  463. spin_lock_irqsave(&priv->lock, flags);
  464. /* Turn off all Tx DMA fifos */
  465. iwl_write_prph(priv, SCD_TXFACT, 0);
  466. /* Tell NIC where to find the "keep warm" buffer */
  467. iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
  468. spin_unlock_irqrestore(&priv->lock, flags);
  469. /* Alloc and init all Tx queues, including the command queue (#4/#9) */
  470. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
  471. slots_num = (txq_id == priv->cmd_queue) ?
  472. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  473. ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num,
  474. txq_id);
  475. if (ret) {
  476. IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
  477. goto error;
  478. }
  479. }
  480. return 0;
  481. error:
  482. /*Upon error, free only if we allocated something */
  483. if (alloc)
  484. trans_tx_free(&priv->trans);
  485. return ret;
  486. }
  487. static void iwl_set_pwr_vmain(struct iwl_priv *priv)
  488. {
  489. /*
  490. * (for documentation purposes)
  491. * to set power to V_AUX, do:
  492. if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
  493. iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
  494. APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
  495. ~APMG_PS_CTRL_MSK_PWR_SRC);
  496. */
  497. iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
  498. APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
  499. ~APMG_PS_CTRL_MSK_PWR_SRC);
  500. }
  501. static int iwl_nic_init(struct iwl_priv *priv)
  502. {
  503. unsigned long flags;
  504. /* nic_init */
  505. spin_lock_irqsave(&priv->lock, flags);
  506. iwl_apm_init(priv);
  507. /* Set interrupt coalescing calibration timer to default (512 usecs) */
  508. iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
  509. spin_unlock_irqrestore(&priv->lock, flags);
  510. iwl_set_pwr_vmain(priv);
  511. priv->cfg->lib->nic_config(priv);
  512. /* Allocate the RX queue, or reset if it is already allocated */
  513. iwl_rx_init(priv);
  514. /* Allocate or reset and init all Tx and Command queues */
  515. if (iwl_tx_init(priv))
  516. return -ENOMEM;
  517. if (priv->cfg->base_params->shadow_reg_enable) {
  518. /* enable shadow regs in HW */
  519. iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
  520. 0x800FFFFF);
  521. }
  522. set_bit(STATUS_INIT, &priv->status);
  523. return 0;
  524. }
  525. #define HW_READY_TIMEOUT (50)
  526. /* Note: returns poll_bit return value, which is >= 0 if success */
  527. static int iwl_set_hw_ready(struct iwl_priv *priv)
  528. {
  529. int ret;
  530. iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
  531. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
  532. /* See if we got it */
  533. ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
  534. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
  535. CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
  536. HW_READY_TIMEOUT);
  537. IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : "");
  538. return ret;
  539. }
  540. /* Note: returns standard 0/-ERROR code */
  541. static int iwl_trans_prepare_card_hw(struct iwl_priv *priv)
  542. {
  543. int ret;
  544. IWL_DEBUG_INFO(priv, "iwl_trans_prepare_card_hw enter\n");
  545. ret = iwl_set_hw_ready(priv);
  546. if (ret >= 0)
  547. return 0;
  548. /* If HW is not ready, prepare the conditions to check again */
  549. iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
  550. CSR_HW_IF_CONFIG_REG_PREPARE);
  551. ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
  552. ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
  553. CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
  554. if (ret < 0)
  555. return ret;
  556. /* HW should be ready by now, check again. */
  557. ret = iwl_set_hw_ready(priv);
  558. if (ret >= 0)
  559. return 0;
  560. return ret;
  561. }
  562. static int iwl_trans_start_device(struct iwl_priv *priv)
  563. {
  564. int ret;
  565. priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
  566. if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
  567. iwl_trans_prepare_card_hw(priv)) {
  568. IWL_WARN(priv, "Exit HW not ready\n");
  569. return -EIO;
  570. }
  571. /* If platform's RF_KILL switch is NOT set to KILL */
  572. if (iwl_read32(priv, CSR_GP_CNTRL) &
  573. CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
  574. clear_bit(STATUS_RF_KILL_HW, &priv->status);
  575. else
  576. set_bit(STATUS_RF_KILL_HW, &priv->status);
  577. if (iwl_is_rfkill(priv)) {
  578. wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
  579. iwl_enable_interrupts(priv);
  580. return -ERFKILL;
  581. }
  582. iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
  583. ret = iwl_nic_init(priv);
  584. if (ret) {
  585. IWL_ERR(priv, "Unable to init nic\n");
  586. return ret;
  587. }
  588. /* make sure rfkill handshake bits are cleared */
  589. iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  590. iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
  591. CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
  592. /* clear (again), then enable host interrupts */
  593. iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
  594. iwl_enable_interrupts(priv);
  595. /* really make sure rfkill handshake bits are cleared */
  596. iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  597. iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
  598. return 0;
  599. }
  600. /*
  601. * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
  602. * must be called under priv->lock and mac access
  603. */
  604. static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask)
  605. {
  606. iwl_write_prph(priv, SCD_TXFACT, mask);
  607. }
  608. #define IWL_AC_UNSET -1
  609. struct queue_to_fifo_ac {
  610. s8 fifo, ac;
  611. };
  612. static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
  613. { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
  614. { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
  615. { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
  616. { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
  617. { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
  618. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  619. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  620. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  621. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  622. { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
  623. };
  624. static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
  625. { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
  626. { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
  627. { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
  628. { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
  629. { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
  630. { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
  631. { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
  632. { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
  633. { IWL_TX_FIFO_BE_IPAN, 2, },
  634. { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
  635. };
  636. static void iwl_trans_tx_start(struct iwl_priv *priv)
  637. {
  638. const struct queue_to_fifo_ac *queue_to_fifo;
  639. struct iwl_rxon_context *ctx;
  640. u32 a;
  641. unsigned long flags;
  642. int i, chan;
  643. u32 reg_val;
  644. spin_lock_irqsave(&priv->lock, flags);
  645. priv->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR);
  646. a = priv->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
  647. /* reset conext data memory */
  648. for (; a < priv->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
  649. a += 4)
  650. iwl_write_targ_mem(priv, a, 0);
  651. /* reset tx status memory */
  652. for (; a < priv->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
  653. a += 4)
  654. iwl_write_targ_mem(priv, a, 0);
  655. for (; a < priv->scd_base_addr +
  656. SCD_TRANS_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
  657. iwl_write_targ_mem(priv, a, 0);
  658. iwl_write_prph(priv, SCD_DRAM_BASE_ADDR,
  659. priv->scd_bc_tbls.dma >> 10);
  660. /* Enable DMA channel */
  661. for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
  662. iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
  663. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
  664. FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
  665. /* Update FH chicken bits */
  666. reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
  667. iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
  668. reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
  669. iwl_write_prph(priv, SCD_QUEUECHAIN_SEL,
  670. SCD_QUEUECHAIN_SEL_ALL(priv));
  671. iwl_write_prph(priv, SCD_AGGR_SEL, 0);
  672. /* initiate the queues */
  673. for (i = 0; i < priv->hw_params.max_txq_num; i++) {
  674. iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0);
  675. iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
  676. iwl_write_targ_mem(priv, priv->scd_base_addr +
  677. SCD_CONTEXT_QUEUE_OFFSET(i), 0);
  678. iwl_write_targ_mem(priv, priv->scd_base_addr +
  679. SCD_CONTEXT_QUEUE_OFFSET(i) +
  680. sizeof(u32),
  681. ((SCD_WIN_SIZE <<
  682. SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
  683. SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
  684. ((SCD_FRAME_LIMIT <<
  685. SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
  686. SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
  687. }
  688. iwl_write_prph(priv, SCD_INTERRUPT_MASK,
  689. IWL_MASK(0, priv->hw_params.max_txq_num));
  690. /* Activate all Tx DMA/FIFO channels */
  691. iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7));
  692. /* map queues to FIFOs */
  693. if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
  694. queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
  695. else
  696. queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
  697. iwl_trans_set_wr_ptrs(priv, priv->cmd_queue, 0);
  698. /* make sure all queue are not stopped */
  699. memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
  700. for (i = 0; i < 4; i++)
  701. atomic_set(&priv->queue_stop_count[i], 0);
  702. for_each_context(priv, ctx)
  703. ctx->last_tx_rejected = false;
  704. /* reset to 0 to enable all the queue first */
  705. priv->txq_ctx_active_msk = 0;
  706. BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
  707. BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);
  708. for (i = 0; i < 10; i++) {
  709. int fifo = queue_to_fifo[i].fifo;
  710. int ac = queue_to_fifo[i].ac;
  711. iwl_txq_ctx_activate(priv, i);
  712. if (fifo == IWL_TX_FIFO_UNUSED)
  713. continue;
  714. if (ac != IWL_AC_UNSET)
  715. iwl_set_swq_id(&priv->txq[i], ac, i);
  716. iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
  717. }
  718. spin_unlock_irqrestore(&priv->lock, flags);
  719. /* Enable L1-Active */
  720. iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
  721. APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
  722. }
  723. /**
  724. * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
  725. */
  726. static int iwl_trans_tx_stop(struct iwl_priv *priv)
  727. {
  728. int ch, txq_id;
  729. unsigned long flags;
  730. /* Turn off all Tx DMA fifos */
  731. spin_lock_irqsave(&priv->lock, flags);
  732. iwl_trans_txq_set_sched(priv, 0);
  733. /* Stop each Tx DMA channel, and wait for it to be idle */
  734. for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
  735. iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
  736. if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
  737. FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
  738. 1000))
  739. IWL_ERR(priv, "Failing on timeout while stopping"
  740. " DMA channel %d [0x%08x]", ch,
  741. iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG));
  742. }
  743. spin_unlock_irqrestore(&priv->lock, flags);
  744. if (!priv->txq) {
  745. IWL_WARN(priv, "Stopping tx queues that aren't allocated...");
  746. return 0;
  747. }
  748. /* Unmap DMA from host system and free skb's */
  749. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
  750. iwl_tx_queue_unmap(priv, txq_id);
  751. return 0;
  752. }
  753. static void iwl_trans_stop_device(struct iwl_priv *priv)
  754. {
  755. unsigned long flags;
  756. /* stop and reset the on-board processor */
  757. iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
  758. /* tell the device to stop sending interrupts */
  759. spin_lock_irqsave(&priv->lock, flags);
  760. iwl_disable_interrupts(priv);
  761. spin_unlock_irqrestore(&priv->lock, flags);
  762. trans_sync_irq(&priv->trans);
  763. /* device going down, Stop using ICT table */
  764. iwl_disable_ict(priv);
  765. /*
  766. * If a HW restart happens during firmware loading,
  767. * then the firmware loading might call this function
  768. * and later it might be called again due to the
  769. * restart. So don't process again if the device is
  770. * already dead.
  771. */
  772. if (test_bit(STATUS_DEVICE_ENABLED, &priv->status)) {
  773. iwl_trans_tx_stop(priv);
  774. iwl_trans_rx_stop(priv);
  775. /* Power-down device's busmaster DMA clocks */
  776. iwl_write_prph(priv, APMG_CLK_DIS_REG,
  777. APMG_CLK_VAL_DMA_CLK_RQT);
  778. udelay(5);
  779. }
  780. /* Make sure (redundant) we've released our request to stay awake */
  781. iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  782. /* Stop the device, and put it in low power state */
  783. iwl_apm_stop(priv);
  784. }
  785. static struct iwl_tx_cmd *iwl_trans_get_tx_cmd(struct iwl_priv *priv,
  786. int txq_id)
  787. {
  788. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  789. struct iwl_queue *q = &txq->q;
  790. struct iwl_device_cmd *dev_cmd;
  791. if (unlikely(iwl_queue_space(q) < q->high_mark))
  792. return NULL;
  793. /*
  794. * Set up the Tx-command (not MAC!) header.
  795. * Store the chosen Tx queue and TFD index within the sequence field;
  796. * after Tx, uCode's Tx response will return this value so driver can
  797. * locate the frame within the tx queue and do post-tx processing.
  798. */
  799. dev_cmd = txq->cmd[q->write_ptr];
  800. memset(dev_cmd, 0, sizeof(*dev_cmd));
  801. dev_cmd->hdr.cmd = REPLY_TX;
  802. dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
  803. INDEX_TO_SEQ(q->write_ptr)));
  804. return &dev_cmd->cmd.tx;
  805. }
  806. static int iwl_trans_tx(struct iwl_priv *priv, struct sk_buff *skb,
  807. struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu,
  808. struct iwl_rxon_context *ctx)
  809. {
  810. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  811. struct iwl_queue *q = &txq->q;
  812. struct iwl_device_cmd *dev_cmd = txq->cmd[q->write_ptr];
  813. struct iwl_cmd_meta *out_meta;
  814. dma_addr_t phys_addr = 0;
  815. dma_addr_t txcmd_phys;
  816. dma_addr_t scratch_phys;
  817. u16 len, firstlen, secondlen;
  818. u8 wait_write_ptr = 0;
  819. u8 hdr_len = ieee80211_hdrlen(fc);
  820. /* Set up driver data for this TFD */
  821. memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
  822. txq->txb[q->write_ptr].skb = skb;
  823. txq->txb[q->write_ptr].ctx = ctx;
  824. /* Set up first empty entry in queue's array of Tx/cmd buffers */
  825. out_meta = &txq->meta[q->write_ptr];
  826. /*
  827. * Use the first empty entry in this queue's command buffer array
  828. * to contain the Tx command and MAC header concatenated together
  829. * (payload data will be in another buffer).
  830. * Size of this varies, due to varying MAC header length.
  831. * If end is not dword aligned, we'll have 2 extra bytes at the end
  832. * of the MAC header (device reads on dword boundaries).
  833. * We'll tell device about this padding later.
  834. */
  835. len = sizeof(struct iwl_tx_cmd) +
  836. sizeof(struct iwl_cmd_header) + hdr_len;
  837. firstlen = (len + 3) & ~3;
  838. /* Tell NIC about any 2-byte padding after MAC header */
  839. if (firstlen != len)
  840. tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
  841. /* Physical address of this Tx command's header (not MAC header!),
  842. * within command buffer array. */
  843. txcmd_phys = dma_map_single(priv->bus->dev,
  844. &dev_cmd->hdr, firstlen,
  845. DMA_BIDIRECTIONAL);
  846. if (unlikely(dma_mapping_error(priv->bus->dev, txcmd_phys)))
  847. return -1;
  848. dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
  849. dma_unmap_len_set(out_meta, len, firstlen);
  850. if (!ieee80211_has_morefrags(fc)) {
  851. txq->need_update = 1;
  852. } else {
  853. wait_write_ptr = 1;
  854. txq->need_update = 0;
  855. }
  856. /* Set up TFD's 2nd entry to point directly to remainder of skb,
  857. * if any (802.11 null frames have no payload). */
  858. secondlen = skb->len - hdr_len;
  859. if (secondlen > 0) {
  860. phys_addr = dma_map_single(priv->bus->dev, skb->data + hdr_len,
  861. secondlen, DMA_TO_DEVICE);
  862. if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) {
  863. dma_unmap_single(priv->bus->dev,
  864. dma_unmap_addr(out_meta, mapping),
  865. dma_unmap_len(out_meta, len),
  866. DMA_BIDIRECTIONAL);
  867. return -1;
  868. }
  869. }
  870. /* Attach buffers to TFD */
  871. iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1);
  872. if (secondlen > 0)
  873. iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
  874. secondlen, 0);
  875. scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
  876. offsetof(struct iwl_tx_cmd, scratch);
  877. /* take back ownership of DMA buffer to enable update */
  878. dma_sync_single_for_cpu(priv->bus->dev, txcmd_phys, firstlen,
  879. DMA_BIDIRECTIONAL);
  880. tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
  881. tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
  882. IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
  883. le16_to_cpu(dev_cmd->hdr.sequence));
  884. IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
  885. iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
  886. iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
  887. /* Set up entry for this TFD in Tx byte-count array */
  888. if (ampdu)
  889. iwl_trans_txq_update_byte_cnt_tbl(priv, txq,
  890. le16_to_cpu(tx_cmd->len));
  891. dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen,
  892. DMA_BIDIRECTIONAL);
  893. trace_iwlwifi_dev_tx(priv,
  894. &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
  895. sizeof(struct iwl_tfd),
  896. &dev_cmd->hdr, firstlen,
  897. skb->data + hdr_len, secondlen);
  898. /* Tell device the write index *just past* this latest filled TFD */
  899. q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
  900. iwl_txq_update_write_ptr(priv, txq);
  901. /*
  902. * At this point the frame is "transmitted" successfully
  903. * and we will get a TX status notification eventually,
  904. * regardless of the value of ret. "ret" only indicates
  905. * whether or not we should update the write pointer.
  906. */
  907. if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
  908. if (wait_write_ptr) {
  909. txq->need_update = 1;
  910. iwl_txq_update_write_ptr(priv, txq);
  911. } else {
  912. iwl_stop_queue(priv, txq);
  913. }
  914. }
  915. return 0;
  916. }
  917. static void iwl_trans_kick_nic(struct iwl_priv *priv)
  918. {
  919. /* Remove all resets to allow NIC to operate */
  920. iwl_write32(priv, CSR_RESET, 0);
  921. }
  922. static void iwl_trans_sync_irq(struct iwl_priv *priv)
  923. {
  924. /* wait to make sure we flush pending tasklet*/
  925. synchronize_irq(priv->bus->irq);
  926. tasklet_kill(&priv->irq_tasklet);
  927. }
  928. static void iwl_trans_free(struct iwl_priv *priv)
  929. {
  930. free_irq(priv->bus->irq, priv);
  931. iwl_free_isr_ict(priv);
  932. }
  933. static const struct iwl_trans_ops trans_ops = {
  934. .start_device = iwl_trans_start_device,
  935. .prepare_card_hw = iwl_trans_prepare_card_hw,
  936. .stop_device = iwl_trans_stop_device,
  937. .tx_start = iwl_trans_tx_start,
  938. .rx_free = iwl_trans_rx_free,
  939. .tx_free = iwl_trans_tx_free,
  940. .send_cmd = iwl_send_cmd,
  941. .send_cmd_pdu = iwl_send_cmd_pdu,
  942. .get_tx_cmd = iwl_trans_get_tx_cmd,
  943. .tx = iwl_trans_tx,
  944. .txq_agg_disable = iwl_trans_txq_agg_disable,
  945. .txq_agg_setup = iwl_trans_txq_agg_setup,
  946. .kick_nic = iwl_trans_kick_nic,
  947. .sync_irq = iwl_trans_sync_irq,
  948. .free = iwl_trans_free,
  949. };
  950. int iwl_trans_register(struct iwl_trans *trans, struct iwl_priv *priv)
  951. {
  952. int err;
  953. priv->trans.ops = &trans_ops;
  954. priv->trans.priv = priv;
  955. tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
  956. iwl_irq_tasklet, (unsigned long)priv);
  957. iwl_alloc_isr_ict(priv);
  958. err = request_irq(priv->bus->irq, iwl_isr_ict, IRQF_SHARED,
  959. DRV_NAME, priv);
  960. if (err) {
  961. IWL_ERR(priv, "Error allocating IRQ %d\n", priv->bus->irq);
  962. iwl_free_isr_ict(priv);
  963. return err;
  964. }
  965. INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
  966. return 0;
  967. }