rx.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
  4. *
  5. * Portions of this file are derived from the ipw3945 project, as well
  6. * as portions of the ieee80211 subsystem header files.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of version 2 of the GNU General Public License as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20. *
  21. * The full GNU General Public License is included in this distribution in the
  22. * file called LICENSE.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/sched.h>
  30. #include <linux/wait.h>
  31. #include <linux/gfp.h>
  32. #include "iwl-prph.h"
  33. #include "iwl-io.h"
  34. #include "internal.h"
  35. #include "iwl-op-mode.h"
  36. /******************************************************************************
  37. *
  38. * RX path functions
  39. *
  40. ******************************************************************************/
  41. /*
  42. * Rx theory of operation
  43. *
  44. * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
  45. * each of which point to Receive Buffers to be filled by the NIC. These get
  46. * used not only for Rx frames, but for any command response or notification
  47. * from the NIC. The driver and NIC manage the Rx buffers by means
  48. * of indexes into the circular buffer.
  49. *
  50. * Rx Queue Indexes
  51. * The host/firmware share two index registers for managing the Rx buffers.
  52. *
  53. * The READ index maps to the first position that the firmware may be writing
  54. * to -- the driver can read up to (but not including) this position and get
  55. * good data.
  56. * The READ index is managed by the firmware once the card is enabled.
  57. *
  58. * The WRITE index maps to the last position the driver has read from -- the
  59. * position preceding WRITE is the last slot the firmware can place a packet.
  60. *
  61. * The queue is empty (no good data) if WRITE = READ - 1, and is full if
  62. * WRITE = READ.
  63. *
  64. * During initialization, the host sets up the READ queue position to the first
  65. * INDEX position, and WRITE to the last (READ - 1 wrapped)
  66. *
  67. * When the firmware places a packet in a buffer, it will advance the READ index
  68. * and fire the RX interrupt. The driver can then query the READ index and
  69. * process as many packets as possible, moving the WRITE index forward as it
  70. * resets the Rx queue buffers with new memory.
  71. *
  72. * The management in the driver is as follows:
  73. * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
  74. * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
  75. * to replenish the iwl->rxq->rx_free.
  76. * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
  77. * iwl->rxq is replenished and the READ INDEX is updated (updating the
  78. * 'processed' and 'read' driver indexes as well)
  79. * + A received packet is processed and handed to the kernel network stack,
  80. * detached from the iwl->rxq. The driver 'processed' index is updated.
  81. * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
  82. * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
  83. * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
  84. * If there were enough free buffers and RX_STALLED is set it is cleared.
  85. *
  86. *
  87. * Driver sequence:
  88. *
  89. * iwl_rxq_alloc() Allocates rx_free
  90. * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
  91. * iwl_pcie_rxq_restock
  92. * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
  93. * queue, updates firmware pointers, and updates
  94. * the WRITE index. If insufficient rx_free buffers
  95. * are available, schedules iwl_pcie_rx_replenish
  96. *
  97. * -- enable interrupts --
  98. * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
  99. * READ INDEX, detaching the SKB from the pool.
  100. * Moves the packet buffer from queue to rx_used.
  101. * Calls iwl_pcie_rxq_restock to refill any empty
  102. * slots.
  103. * ...
  104. *
  105. */
  106. /*
  107. * iwl_rxq_space - Return number of free slots available in queue.
  108. */
  109. static int iwl_rxq_space(const struct iwl_rxq *rxq)
  110. {
  111. /* Make sure RX_QUEUE_SIZE is a power of 2 */
  112. BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1));
  113. /*
  114. * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
  115. * between empty and completely full queues.
  116. * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
  117. * defined for negative dividends.
  118. */
  119. return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1);
  120. }
  121. /*
  122. * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
  123. */
  124. static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
  125. {
  126. return cpu_to_le32((u32)(dma_addr >> 8));
  127. }
  128. /*
  129. * iwl_pcie_rx_stop - stops the Rx DMA
  130. */
  131. int iwl_pcie_rx_stop(struct iwl_trans *trans)
  132. {
  133. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  134. return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
  135. FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
  136. }
  137. /*
  138. * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
  139. */
  140. static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
  141. struct iwl_rxq *rxq)
  142. {
  143. unsigned long flags;
  144. u32 reg;
  145. spin_lock_irqsave(&rxq->lock, flags);
  146. if (rxq->need_update == 0)
  147. goto exit_unlock;
  148. if (trans->cfg->base_params->shadow_reg_enable) {
  149. /* shadow register enabled */
  150. /* Device expects a multiple of 8 */
  151. rxq->write_actual = (rxq->write & ~0x7);
  152. iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
  153. } else {
  154. struct iwl_trans_pcie *trans_pcie =
  155. IWL_TRANS_GET_PCIE_TRANS(trans);
  156. /* If power-saving is in use, make sure device is awake */
  157. if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
  158. reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
  159. if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
  160. IWL_DEBUG_INFO(trans,
  161. "Rx queue requesting wakeup,"
  162. " GP1 = 0x%x\n", reg);
  163. iwl_set_bit(trans, CSR_GP_CNTRL,
  164. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  165. goto exit_unlock;
  166. }
  167. rxq->write_actual = (rxq->write & ~0x7);
  168. iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
  169. rxq->write_actual);
  170. /* Else device is assumed to be awake */
  171. } else {
  172. /* Device expects a multiple of 8 */
  173. rxq->write_actual = (rxq->write & ~0x7);
  174. iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
  175. rxq->write_actual);
  176. }
  177. }
  178. rxq->need_update = 0;
  179. exit_unlock:
  180. spin_unlock_irqrestore(&rxq->lock, flags);
  181. }
  182. /*
  183. * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
  184. *
  185. * If there are slots in the RX queue that need to be restocked,
  186. * and we have free pre-allocated buffers, fill the ranks as much
  187. * as we can, pulling from rx_free.
  188. *
  189. * This moves the 'write' index forward to catch up with 'processed', and
  190. * also updates the memory address in the firmware to reference the new
  191. * target buffer.
  192. */
  193. static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
  194. {
  195. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  196. struct iwl_rxq *rxq = &trans_pcie->rxq;
  197. struct iwl_rx_mem_buffer *rxb;
  198. unsigned long flags;
  199. /*
  200. * If the device isn't enabled - not need to try to add buffers...
  201. * This can happen when we stop the device and still have an interrupt
  202. * pending. We stop the APM before we sync the interrupts because we
  203. * have to (see comment there). On the other hand, since the APM is
  204. * stopped, we cannot access the HW (in particular not prph).
  205. * So don't try to restock if the APM has been already stopped.
  206. */
  207. if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
  208. return;
  209. spin_lock_irqsave(&rxq->lock, flags);
  210. while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
  211. /* The overwritten rxb must be a used one */
  212. rxb = rxq->queue[rxq->write];
  213. BUG_ON(rxb && rxb->page);
  214. /* Get next free Rx buffer, remove from free list */
  215. rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
  216. list);
  217. list_del(&rxb->list);
  218. /* Point to Rx buffer via next RBD in circular buffer */
  219. rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
  220. rxq->queue[rxq->write] = rxb;
  221. rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
  222. rxq->free_count--;
  223. }
  224. spin_unlock_irqrestore(&rxq->lock, flags);
  225. /* If the pre-allocated buffer pool is dropping low, schedule to
  226. * refill it */
  227. if (rxq->free_count <= RX_LOW_WATERMARK)
  228. schedule_work(&trans_pcie->rx_replenish);
  229. /* If we've added more space for the firmware to place data, tell it.
  230. * Increment device's write pointer in multiples of 8. */
  231. if (rxq->write_actual != (rxq->write & ~0x7)) {
  232. spin_lock_irqsave(&rxq->lock, flags);
  233. rxq->need_update = 1;
  234. spin_unlock_irqrestore(&rxq->lock, flags);
  235. iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
  236. }
  237. }
  238. /*
  239. * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
  240. *
  241. * A used RBD is an Rx buffer that has been given to the stack. To use it again
  242. * a page must be allocated and the RBD must point to the page. This function
  243. * doesn't change the HW pointer but handles the list of pages that is used by
  244. * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
  245. * allocated buffers.
  246. */
  247. static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
  248. {
  249. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  250. struct iwl_rxq *rxq = &trans_pcie->rxq;
  251. struct iwl_rx_mem_buffer *rxb;
  252. struct page *page;
  253. unsigned long flags;
  254. gfp_t gfp_mask = priority;
  255. while (1) {
  256. spin_lock_irqsave(&rxq->lock, flags);
  257. if (list_empty(&rxq->rx_used)) {
  258. spin_unlock_irqrestore(&rxq->lock, flags);
  259. return;
  260. }
  261. spin_unlock_irqrestore(&rxq->lock, flags);
  262. if (rxq->free_count > RX_LOW_WATERMARK)
  263. gfp_mask |= __GFP_NOWARN;
  264. if (trans_pcie->rx_page_order > 0)
  265. gfp_mask |= __GFP_COMP;
  266. /* Alloc a new receive buffer */
  267. page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
  268. if (!page) {
  269. if (net_ratelimit())
  270. IWL_DEBUG_INFO(trans, "alloc_pages failed, "
  271. "order: %d\n",
  272. trans_pcie->rx_page_order);
  273. if ((rxq->free_count <= RX_LOW_WATERMARK) &&
  274. net_ratelimit())
  275. IWL_CRIT(trans, "Failed to alloc_pages with %s."
  276. "Only %u free buffers remaining.\n",
  277. priority == GFP_ATOMIC ?
  278. "GFP_ATOMIC" : "GFP_KERNEL",
  279. rxq->free_count);
  280. /* We don't reschedule replenish work here -- we will
  281. * call the restock method and if it still needs
  282. * more buffers it will schedule replenish */
  283. return;
  284. }
  285. spin_lock_irqsave(&rxq->lock, flags);
  286. if (list_empty(&rxq->rx_used)) {
  287. spin_unlock_irqrestore(&rxq->lock, flags);
  288. __free_pages(page, trans_pcie->rx_page_order);
  289. return;
  290. }
  291. rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
  292. list);
  293. list_del(&rxb->list);
  294. spin_unlock_irqrestore(&rxq->lock, flags);
  295. BUG_ON(rxb->page);
  296. rxb->page = page;
  297. /* Get physical address of the RB */
  298. rxb->page_dma =
  299. dma_map_page(trans->dev, page, 0,
  300. PAGE_SIZE << trans_pcie->rx_page_order,
  301. DMA_FROM_DEVICE);
  302. if (dma_mapping_error(trans->dev, rxb->page_dma)) {
  303. rxb->page = NULL;
  304. spin_lock_irqsave(&rxq->lock, flags);
  305. list_add(&rxb->list, &rxq->rx_used);
  306. spin_unlock_irqrestore(&rxq->lock, flags);
  307. __free_pages(page, trans_pcie->rx_page_order);
  308. return;
  309. }
  310. /* dma address must be no more than 36 bits */
  311. BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
  312. /* and also 256 byte aligned! */
  313. BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
  314. spin_lock_irqsave(&rxq->lock, flags);
  315. list_add_tail(&rxb->list, &rxq->rx_free);
  316. rxq->free_count++;
  317. spin_unlock_irqrestore(&rxq->lock, flags);
  318. }
  319. }
  320. static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
  321. {
  322. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  323. struct iwl_rxq *rxq = &trans_pcie->rxq;
  324. int i;
  325. lockdep_assert_held(&rxq->lock);
  326. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
  327. if (!rxq->pool[i].page)
  328. continue;
  329. dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
  330. PAGE_SIZE << trans_pcie->rx_page_order,
  331. DMA_FROM_DEVICE);
  332. __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
  333. rxq->pool[i].page = NULL;
  334. }
  335. }
  336. /*
  337. * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
  338. *
  339. * When moving to rx_free an page is allocated for the slot.
  340. *
  341. * Also restock the Rx queue via iwl_pcie_rxq_restock.
  342. * This is called as a scheduled work item (except for during initialization)
  343. */
  344. static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
  345. {
  346. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  347. unsigned long flags;
  348. iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
  349. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  350. iwl_pcie_rxq_restock(trans);
  351. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  352. }
  353. static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
  354. {
  355. iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
  356. iwl_pcie_rxq_restock(trans);
  357. }
  358. static void iwl_pcie_rx_replenish_work(struct work_struct *data)
  359. {
  360. struct iwl_trans_pcie *trans_pcie =
  361. container_of(data, struct iwl_trans_pcie, rx_replenish);
  362. iwl_pcie_rx_replenish(trans_pcie->trans);
  363. }
  364. static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
  365. {
  366. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  367. struct iwl_rxq *rxq = &trans_pcie->rxq;
  368. struct device *dev = trans->dev;
  369. memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
  370. spin_lock_init(&rxq->lock);
  371. if (WARN_ON(rxq->bd || rxq->rb_stts))
  372. return -EINVAL;
  373. /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
  374. rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
  375. &rxq->bd_dma, GFP_KERNEL);
  376. if (!rxq->bd)
  377. goto err_bd;
  378. /*Allocate the driver's pointer to receive buffer status */
  379. rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
  380. &rxq->rb_stts_dma, GFP_KERNEL);
  381. if (!rxq->rb_stts)
  382. goto err_rb_stts;
  383. return 0;
  384. err_rb_stts:
  385. dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
  386. rxq->bd, rxq->bd_dma);
  387. rxq->bd_dma = 0;
  388. rxq->bd = NULL;
  389. err_bd:
  390. return -ENOMEM;
  391. }
  392. static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
  393. {
  394. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  395. u32 rb_size;
  396. const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
  397. if (trans_pcie->rx_buf_size_8k)
  398. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
  399. else
  400. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
  401. /* Stop Rx DMA */
  402. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  403. /* reset and flush pointers */
  404. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
  405. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
  406. iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
  407. /* Reset driver's Rx queue write index */
  408. iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
  409. /* Tell device where to find RBD circular buffer in DRAM */
  410. iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
  411. (u32)(rxq->bd_dma >> 8));
  412. /* Tell device where in DRAM to update its Rx status */
  413. iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
  414. rxq->rb_stts_dma >> 4);
  415. /* Enable Rx DMA
  416. * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
  417. * the credit mechanism in 5000 HW RX FIFO
  418. * Direct rx interrupts to hosts
  419. * Rx buffer size 4 or 8k
  420. * RB timeout 0x10
  421. * 256 RBDs
  422. */
  423. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
  424. FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
  425. FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
  426. FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
  427. rb_size|
  428. (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
  429. (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
  430. /* Set interrupt coalescing timer to default (2048 usecs) */
  431. iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
  432. /* W/A for interrupt coalescing bug in 7260 and 3160 */
  433. if (trans->cfg->host_interrupt_operation_mode)
  434. iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
  435. }
  436. static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
  437. {
  438. int i;
  439. lockdep_assert_held(&rxq->lock);
  440. INIT_LIST_HEAD(&rxq->rx_free);
  441. INIT_LIST_HEAD(&rxq->rx_used);
  442. rxq->free_count = 0;
  443. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
  444. list_add(&rxq->pool[i].list, &rxq->rx_used);
  445. }
  446. int iwl_pcie_rx_init(struct iwl_trans *trans)
  447. {
  448. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  449. struct iwl_rxq *rxq = &trans_pcie->rxq;
  450. int i, err;
  451. unsigned long flags;
  452. if (!rxq->bd) {
  453. err = iwl_pcie_rx_alloc(trans);
  454. if (err)
  455. return err;
  456. }
  457. spin_lock_irqsave(&rxq->lock, flags);
  458. INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
  459. /* free all first - we might be reconfigured for a different size */
  460. iwl_pcie_rxq_free_rbs(trans);
  461. iwl_pcie_rx_init_rxb_lists(rxq);
  462. for (i = 0; i < RX_QUEUE_SIZE; i++)
  463. rxq->queue[i] = NULL;
  464. /* Set us so that we have processed and used all buffers, but have
  465. * not restocked the Rx queue with fresh buffers */
  466. rxq->read = rxq->write = 0;
  467. rxq->write_actual = 0;
  468. memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
  469. spin_unlock_irqrestore(&rxq->lock, flags);
  470. iwl_pcie_rx_replenish(trans);
  471. iwl_pcie_rx_hw_init(trans, rxq);
  472. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  473. rxq->need_update = 1;
  474. iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
  475. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  476. return 0;
  477. }
  478. void iwl_pcie_rx_free(struct iwl_trans *trans)
  479. {
  480. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  481. struct iwl_rxq *rxq = &trans_pcie->rxq;
  482. unsigned long flags;
  483. /*if rxq->bd is NULL, it means that nothing has been allocated,
  484. * exit now */
  485. if (!rxq->bd) {
  486. IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
  487. return;
  488. }
  489. cancel_work_sync(&trans_pcie->rx_replenish);
  490. spin_lock_irqsave(&rxq->lock, flags);
  491. iwl_pcie_rxq_free_rbs(trans);
  492. spin_unlock_irqrestore(&rxq->lock, flags);
  493. dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
  494. rxq->bd, rxq->bd_dma);
  495. rxq->bd_dma = 0;
  496. rxq->bd = NULL;
  497. if (rxq->rb_stts)
  498. dma_free_coherent(trans->dev,
  499. sizeof(struct iwl_rb_status),
  500. rxq->rb_stts, rxq->rb_stts_dma);
  501. else
  502. IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
  503. rxq->rb_stts_dma = 0;
  504. rxq->rb_stts = NULL;
  505. }
  506. static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
  507. struct iwl_rx_mem_buffer *rxb)
  508. {
  509. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  510. struct iwl_rxq *rxq = &trans_pcie->rxq;
  511. struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
  512. unsigned long flags;
  513. bool page_stolen = false;
  514. int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
  515. u32 offset = 0;
  516. if (WARN_ON(!rxb))
  517. return;
  518. dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
  519. while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
  520. struct iwl_rx_packet *pkt;
  521. struct iwl_device_cmd *cmd;
  522. u16 sequence;
  523. bool reclaim;
  524. int index, cmd_index, err, len;
  525. struct iwl_rx_cmd_buffer rxcb = {
  526. ._offset = offset,
  527. ._rx_page_order = trans_pcie->rx_page_order,
  528. ._page = rxb->page,
  529. ._page_stolen = false,
  530. .truesize = max_len,
  531. };
  532. pkt = rxb_addr(&rxcb);
  533. if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
  534. break;
  535. IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
  536. rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
  537. pkt->hdr.cmd);
  538. len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
  539. len += sizeof(u32); /* account for status word */
  540. trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
  541. trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
  542. /* Reclaim a command buffer only if this packet is a response
  543. * to a (driver-originated) command.
  544. * If the packet (e.g. Rx frame) originated from uCode,
  545. * there is no command buffer to reclaim.
  546. * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
  547. * but apparently a few don't get set; catch them here. */
  548. reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
  549. if (reclaim) {
  550. int i;
  551. for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
  552. if (trans_pcie->no_reclaim_cmds[i] ==
  553. pkt->hdr.cmd) {
  554. reclaim = false;
  555. break;
  556. }
  557. }
  558. }
  559. sequence = le16_to_cpu(pkt->hdr.sequence);
  560. index = SEQ_TO_INDEX(sequence);
  561. cmd_index = get_cmd_index(&txq->q, index);
  562. if (reclaim)
  563. cmd = txq->entries[cmd_index].cmd;
  564. else
  565. cmd = NULL;
  566. err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
  567. if (reclaim) {
  568. kfree(txq->entries[cmd_index].free_buf);
  569. txq->entries[cmd_index].free_buf = NULL;
  570. }
  571. /*
  572. * After here, we should always check rxcb._page_stolen,
  573. * if it is true then one of the handlers took the page.
  574. */
  575. if (reclaim) {
  576. /* Invoke any callbacks, transfer the buffer to caller,
  577. * and fire off the (possibly) blocking
  578. * iwl_trans_send_cmd()
  579. * as we reclaim the driver command queue */
  580. if (!rxcb._page_stolen)
  581. iwl_pcie_hcmd_complete(trans, &rxcb, err);
  582. else
  583. IWL_WARN(trans, "Claim null rxb?\n");
  584. }
  585. page_stolen |= rxcb._page_stolen;
  586. offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
  587. }
  588. /* page was stolen from us -- free our reference */
  589. if (page_stolen) {
  590. __free_pages(rxb->page, trans_pcie->rx_page_order);
  591. rxb->page = NULL;
  592. }
  593. /* Reuse the page if possible. For notification packets and
  594. * SKBs that fail to Rx correctly, add them back into the
  595. * rx_free list for reuse later. */
  596. spin_lock_irqsave(&rxq->lock, flags);
  597. if (rxb->page != NULL) {
  598. rxb->page_dma =
  599. dma_map_page(trans->dev, rxb->page, 0,
  600. PAGE_SIZE << trans_pcie->rx_page_order,
  601. DMA_FROM_DEVICE);
  602. if (dma_mapping_error(trans->dev, rxb->page_dma)) {
  603. /*
  604. * free the page(s) as well to not break
  605. * the invariant that the items on the used
  606. * list have no page(s)
  607. */
  608. __free_pages(rxb->page, trans_pcie->rx_page_order);
  609. rxb->page = NULL;
  610. list_add_tail(&rxb->list, &rxq->rx_used);
  611. } else {
  612. list_add_tail(&rxb->list, &rxq->rx_free);
  613. rxq->free_count++;
  614. }
  615. } else
  616. list_add_tail(&rxb->list, &rxq->rx_used);
  617. spin_unlock_irqrestore(&rxq->lock, flags);
  618. }
  619. /*
  620. * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
  621. */
  622. static void iwl_pcie_rx_handle(struct iwl_trans *trans)
  623. {
  624. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  625. struct iwl_rxq *rxq = &trans_pcie->rxq;
  626. u32 r, i;
  627. u8 fill_rx = 0;
  628. u32 count = 8;
  629. int total_empty;
  630. /* uCode's read index (stored in shared DRAM) indicates the last Rx
  631. * buffer that the driver may process (last buffer filled by ucode). */
  632. r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
  633. i = rxq->read;
  634. /* Rx interrupt, but nothing sent from uCode */
  635. if (i == r)
  636. IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
  637. /* calculate total frames need to be restock after handling RX */
  638. total_empty = r - rxq->write_actual;
  639. if (total_empty < 0)
  640. total_empty += RX_QUEUE_SIZE;
  641. if (total_empty > (RX_QUEUE_SIZE / 2))
  642. fill_rx = 1;
  643. while (i != r) {
  644. struct iwl_rx_mem_buffer *rxb;
  645. rxb = rxq->queue[i];
  646. rxq->queue[i] = NULL;
  647. IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
  648. r, i, rxb);
  649. iwl_pcie_rx_handle_rb(trans, rxb);
  650. i = (i + 1) & RX_QUEUE_MASK;
  651. /* If there are a lot of unused frames,
  652. * restock the Rx queue so ucode wont assert. */
  653. if (fill_rx) {
  654. count++;
  655. if (count >= 8) {
  656. rxq->read = i;
  657. iwl_pcie_rx_replenish_now(trans);
  658. count = 0;
  659. }
  660. }
  661. }
  662. /* Backtrack one entry */
  663. rxq->read = i;
  664. if (fill_rx)
  665. iwl_pcie_rx_replenish_now(trans);
  666. else
  667. iwl_pcie_rxq_restock(trans);
  668. }
  669. /*
  670. * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
  671. */
  672. static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
  673. {
  674. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  675. /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
  676. if (trans->cfg->internal_wimax_coex &&
  677. (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
  678. APMS_CLK_VAL_MRB_FUNC_MODE) ||
  679. (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
  680. APMG_PS_CTRL_VAL_RESET_REQ))) {
  681. clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
  682. iwl_op_mode_wimax_active(trans->op_mode);
  683. wake_up(&trans_pcie->wait_command_queue);
  684. return;
  685. }
  686. iwl_pcie_dump_csr(trans);
  687. iwl_dump_fh(trans, NULL);
  688. /* set the ERROR bit before we wake up the caller */
  689. set_bit(STATUS_FW_ERROR, &trans_pcie->status);
  690. clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
  691. wake_up(&trans_pcie->wait_command_queue);
  692. local_bh_disable();
  693. iwl_nic_error(trans);
  694. local_bh_enable();
  695. }
  696. irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
  697. {
  698. struct iwl_trans *trans = dev_id;
  699. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  700. struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
  701. u32 inta = 0;
  702. u32 handled = 0;
  703. unsigned long flags;
  704. u32 i;
  705. lock_map_acquire(&trans->sync_cmd_lockdep_map);
  706. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  707. /* Ack/clear/reset pending uCode interrupts.
  708. * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
  709. */
  710. /* There is a hardware bug in the interrupt mask function that some
  711. * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
  712. * they are disabled in the CSR_INT_MASK register. Furthermore the
  713. * ICT interrupt handling mechanism has another bug that might cause
  714. * these unmasked interrupts fail to be detected. We workaround the
  715. * hardware bugs here by ACKing all the possible interrupts so that
  716. * interrupt coalescing can still be achieved.
  717. */
  718. iwl_write32(trans, CSR_INT,
  719. trans_pcie->inta | ~trans_pcie->inta_mask);
  720. inta = trans_pcie->inta;
  721. if (iwl_have_debug_level(IWL_DL_ISR))
  722. IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
  723. inta, iwl_read32(trans, CSR_INT_MASK));
  724. /* saved interrupt in inta variable now we can reset trans_pcie->inta */
  725. trans_pcie->inta = 0;
  726. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  727. /* Now service all interrupt bits discovered above. */
  728. if (inta & CSR_INT_BIT_HW_ERR) {
  729. IWL_ERR(trans, "Hardware error detected. Restarting.\n");
  730. /* Tell the device to stop sending interrupts */
  731. iwl_disable_interrupts(trans);
  732. isr_stats->hw++;
  733. iwl_pcie_irq_handle_error(trans);
  734. handled |= CSR_INT_BIT_HW_ERR;
  735. goto out;
  736. }
  737. if (iwl_have_debug_level(IWL_DL_ISR)) {
  738. /* NIC fires this, but we don't use it, redundant with WAKEUP */
  739. if (inta & CSR_INT_BIT_SCD) {
  740. IWL_DEBUG_ISR(trans,
  741. "Scheduler finished to transmit the frame/frames.\n");
  742. isr_stats->sch++;
  743. }
  744. /* Alive notification via Rx interrupt will do the real work */
  745. if (inta & CSR_INT_BIT_ALIVE) {
  746. IWL_DEBUG_ISR(trans, "Alive interrupt\n");
  747. isr_stats->alive++;
  748. }
  749. }
  750. /* Safely ignore these bits for debug checks below */
  751. inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
  752. /* HW RF KILL switch toggled */
  753. if (inta & CSR_INT_BIT_RF_KILL) {
  754. bool hw_rfkill;
  755. hw_rfkill = iwl_is_rfkill_set(trans);
  756. IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
  757. hw_rfkill ? "disable radio" : "enable radio");
  758. isr_stats->rfkill++;
  759. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  760. if (hw_rfkill) {
  761. set_bit(STATUS_RFKILL, &trans_pcie->status);
  762. if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
  763. &trans_pcie->status))
  764. IWL_DEBUG_RF_KILL(trans,
  765. "Rfkill while SYNC HCMD in flight\n");
  766. wake_up(&trans_pcie->wait_command_queue);
  767. } else {
  768. clear_bit(STATUS_RFKILL, &trans_pcie->status);
  769. }
  770. handled |= CSR_INT_BIT_RF_KILL;
  771. }
  772. /* Chip got too hot and stopped itself */
  773. if (inta & CSR_INT_BIT_CT_KILL) {
  774. IWL_ERR(trans, "Microcode CT kill error detected.\n");
  775. isr_stats->ctkill++;
  776. handled |= CSR_INT_BIT_CT_KILL;
  777. }
  778. /* Error detected by uCode */
  779. if (inta & CSR_INT_BIT_SW_ERR) {
  780. IWL_ERR(trans, "Microcode SW error detected. "
  781. " Restarting 0x%X.\n", inta);
  782. isr_stats->sw++;
  783. iwl_pcie_irq_handle_error(trans);
  784. handled |= CSR_INT_BIT_SW_ERR;
  785. }
  786. /* uCode wakes up after power-down sleep */
  787. if (inta & CSR_INT_BIT_WAKEUP) {
  788. IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
  789. iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
  790. for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
  791. iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
  792. isr_stats->wakeup++;
  793. handled |= CSR_INT_BIT_WAKEUP;
  794. }
  795. /* All uCode command responses, including Tx command responses,
  796. * Rx "responses" (frame-received notification), and other
  797. * notifications from uCode come through here*/
  798. if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
  799. CSR_INT_BIT_RX_PERIODIC)) {
  800. IWL_DEBUG_ISR(trans, "Rx interrupt\n");
  801. if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
  802. handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
  803. iwl_write32(trans, CSR_FH_INT_STATUS,
  804. CSR_FH_INT_RX_MASK);
  805. }
  806. if (inta & CSR_INT_BIT_RX_PERIODIC) {
  807. handled |= CSR_INT_BIT_RX_PERIODIC;
  808. iwl_write32(trans,
  809. CSR_INT, CSR_INT_BIT_RX_PERIODIC);
  810. }
  811. /* Sending RX interrupt require many steps to be done in the
  812. * the device:
  813. * 1- write interrupt to current index in ICT table.
  814. * 2- dma RX frame.
  815. * 3- update RX shared data to indicate last write index.
  816. * 4- send interrupt.
  817. * This could lead to RX race, driver could receive RX interrupt
  818. * but the shared data changes does not reflect this;
  819. * periodic interrupt will detect any dangling Rx activity.
  820. */
  821. /* Disable periodic interrupt; we use it as just a one-shot. */
  822. iwl_write8(trans, CSR_INT_PERIODIC_REG,
  823. CSR_INT_PERIODIC_DIS);
  824. iwl_pcie_rx_handle(trans);
  825. /*
  826. * Enable periodic interrupt in 8 msec only if we received
  827. * real RX interrupt (instead of just periodic int), to catch
  828. * any dangling Rx interrupt. If it was just the periodic
  829. * interrupt, there was no dangling Rx activity, and no need
  830. * to extend the periodic interrupt; one-shot is enough.
  831. */
  832. if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
  833. iwl_write8(trans, CSR_INT_PERIODIC_REG,
  834. CSR_INT_PERIODIC_ENA);
  835. isr_stats->rx++;
  836. }
  837. /* This "Tx" DMA channel is used only for loading uCode */
  838. if (inta & CSR_INT_BIT_FH_TX) {
  839. iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
  840. IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
  841. isr_stats->tx++;
  842. handled |= CSR_INT_BIT_FH_TX;
  843. /* Wake up uCode load routine, now that load is complete */
  844. trans_pcie->ucode_write_complete = true;
  845. wake_up(&trans_pcie->ucode_write_waitq);
  846. }
  847. if (inta & ~handled) {
  848. IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
  849. isr_stats->unhandled++;
  850. }
  851. if (inta & ~(trans_pcie->inta_mask)) {
  852. IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
  853. inta & ~trans_pcie->inta_mask);
  854. }
  855. /* Re-enable all interrupts */
  856. /* only Re-enable if disabled by irq */
  857. if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
  858. iwl_enable_interrupts(trans);
  859. /* Re-enable RF_KILL if it occurred */
  860. else if (handled & CSR_INT_BIT_RF_KILL)
  861. iwl_enable_rfkill_int(trans);
  862. out:
  863. lock_map_release(&trans->sync_cmd_lockdep_map);
  864. return IRQ_HANDLED;
  865. }
  866. /******************************************************************************
  867. *
  868. * ICT functions
  869. *
  870. ******************************************************************************/
  871. /* a device (PCI-E) page is 4096 bytes long */
  872. #define ICT_SHIFT 12
  873. #define ICT_SIZE (1 << ICT_SHIFT)
  874. #define ICT_COUNT (ICT_SIZE / sizeof(u32))
  875. /* Free dram table */
  876. void iwl_pcie_free_ict(struct iwl_trans *trans)
  877. {
  878. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  879. if (trans_pcie->ict_tbl) {
  880. dma_free_coherent(trans->dev, ICT_SIZE,
  881. trans_pcie->ict_tbl,
  882. trans_pcie->ict_tbl_dma);
  883. trans_pcie->ict_tbl = NULL;
  884. trans_pcie->ict_tbl_dma = 0;
  885. }
  886. }
  887. /*
  888. * allocate dram shared table, it is an aligned memory
  889. * block of ICT_SIZE.
  890. * also reset all data related to ICT table interrupt.
  891. */
  892. int iwl_pcie_alloc_ict(struct iwl_trans *trans)
  893. {
  894. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  895. trans_pcie->ict_tbl =
  896. dma_alloc_coherent(trans->dev, ICT_SIZE,
  897. &trans_pcie->ict_tbl_dma,
  898. GFP_KERNEL);
  899. if (!trans_pcie->ict_tbl)
  900. return -ENOMEM;
  901. /* just an API sanity check ... it is guaranteed to be aligned */
  902. if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
  903. iwl_pcie_free_ict(trans);
  904. return -EINVAL;
  905. }
  906. IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
  907. (unsigned long long)trans_pcie->ict_tbl_dma);
  908. IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
  909. /* reset table and index to all 0 */
  910. memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
  911. trans_pcie->ict_index = 0;
  912. /* add periodic RX interrupt */
  913. trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
  914. return 0;
  915. }
  916. /* Device is going up inform it about using ICT interrupt table,
  917. * also we need to tell the driver to start using ICT interrupt.
  918. */
  919. void iwl_pcie_reset_ict(struct iwl_trans *trans)
  920. {
  921. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  922. u32 val;
  923. unsigned long flags;
  924. if (!trans_pcie->ict_tbl)
  925. return;
  926. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  927. iwl_disable_interrupts(trans);
  928. memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
  929. val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
  930. val |= CSR_DRAM_INT_TBL_ENABLE;
  931. val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
  932. IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
  933. iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
  934. trans_pcie->use_ict = true;
  935. trans_pcie->ict_index = 0;
  936. iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
  937. iwl_enable_interrupts(trans);
  938. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  939. }
  940. /* Device is going down disable ict interrupt usage */
  941. void iwl_pcie_disable_ict(struct iwl_trans *trans)
  942. {
  943. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  944. unsigned long flags;
  945. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  946. trans_pcie->use_ict = false;
  947. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  948. }
  949. /* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
  950. static irqreturn_t iwl_pcie_isr(int irq, void *data)
  951. {
  952. struct iwl_trans *trans = data;
  953. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  954. u32 inta, inta_mask;
  955. irqreturn_t ret = IRQ_NONE;
  956. lockdep_assert_held(&trans_pcie->irq_lock);
  957. trace_iwlwifi_dev_irq(trans->dev);
  958. /* Disable (but don't clear!) interrupts here to avoid
  959. * back-to-back ISRs and sporadic interrupts from our NIC.
  960. * If we have something to service, the irq thread will re-enable ints.
  961. * If we *don't* have something, we'll re-enable before leaving here. */
  962. inta_mask = iwl_read32(trans, CSR_INT_MASK);
  963. iwl_write32(trans, CSR_INT_MASK, 0x00000000);
  964. /* Discover which interrupts are active/pending */
  965. inta = iwl_read32(trans, CSR_INT);
  966. if (inta & (~inta_mask)) {
  967. IWL_DEBUG_ISR(trans,
  968. "We got a masked interrupt (0x%08x)...Ack and ignore\n",
  969. inta & (~inta_mask));
  970. iwl_write32(trans, CSR_INT, inta & (~inta_mask));
  971. inta &= inta_mask;
  972. }
  973. /* Ignore interrupt if there's nothing in NIC to service.
  974. * This may be due to IRQ shared with another device,
  975. * or due to sporadic interrupts thrown from our NIC. */
  976. if (!inta) {
  977. IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
  978. goto none;
  979. }
  980. if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
  981. /* Hardware disappeared. It might have already raised
  982. * an interrupt */
  983. IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
  984. return IRQ_HANDLED;
  985. }
  986. if (iwl_have_debug_level(IWL_DL_ISR))
  987. IWL_DEBUG_ISR(trans,
  988. "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
  989. inta, inta_mask,
  990. iwl_read32(trans, CSR_FH_INT_STATUS));
  991. trans_pcie->inta |= inta;
  992. /* the thread will service interrupts and re-enable them */
  993. if (likely(inta))
  994. return IRQ_WAKE_THREAD;
  995. ret = IRQ_HANDLED;
  996. none:
  997. /* re-enable interrupts here since we don't have anything to service. */
  998. /* only Re-enable if disabled by irq and no schedules tasklet. */
  999. if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
  1000. !trans_pcie->inta)
  1001. iwl_enable_interrupts(trans);
  1002. return ret;
  1003. }
  1004. /* interrupt handler using ict table, with this interrupt driver will
  1005. * stop using INTA register to get device's interrupt, reading this register
  1006. * is expensive, device will write interrupts in ICT dram table, increment
  1007. * index then will fire interrupt to driver, driver will OR all ICT table
  1008. * entries from current index up to table entry with 0 value. the result is
  1009. * the interrupt we need to service, driver will set the entries back to 0 and
  1010. * set index.
  1011. */
  1012. irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
  1013. {
  1014. struct iwl_trans *trans = data;
  1015. struct iwl_trans_pcie *trans_pcie;
  1016. u32 inta;
  1017. u32 val = 0;
  1018. u32 read;
  1019. unsigned long flags;
  1020. irqreturn_t ret = IRQ_NONE;
  1021. if (!trans)
  1022. return IRQ_NONE;
  1023. trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1024. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  1025. /* dram interrupt table not set yet,
  1026. * use legacy interrupt.
  1027. */
  1028. if (unlikely(!trans_pcie->use_ict)) {
  1029. ret = iwl_pcie_isr(irq, data);
  1030. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  1031. return ret;
  1032. }
  1033. trace_iwlwifi_dev_irq(trans->dev);
  1034. /* Disable (but don't clear!) interrupts here to avoid
  1035. * back-to-back ISRs and sporadic interrupts from our NIC.
  1036. * If we have something to service, the tasklet will re-enable ints.
  1037. * If we *don't* have something, we'll re-enable before leaving here.
  1038. */
  1039. iwl_write32(trans, CSR_INT_MASK, 0x00000000);
  1040. /* Ignore interrupt if there's nothing in NIC to service.
  1041. * This may be due to IRQ shared with another device,
  1042. * or due to sporadic interrupts thrown from our NIC. */
  1043. read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
  1044. trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
  1045. if (!read) {
  1046. IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
  1047. goto none;
  1048. }
  1049. /*
  1050. * Collect all entries up to the first 0, starting from ict_index;
  1051. * note we already read at ict_index.
  1052. */
  1053. do {
  1054. val |= read;
  1055. IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
  1056. trans_pcie->ict_index, read);
  1057. trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
  1058. trans_pcie->ict_index =
  1059. iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
  1060. read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
  1061. trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
  1062. read);
  1063. } while (read);
  1064. /* We should not get this value, just ignore it. */
  1065. if (val == 0xffffffff)
  1066. val = 0;
  1067. /*
  1068. * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
  1069. * (bit 15 before shifting it to 31) to clear when using interrupt
  1070. * coalescing. fortunately, bits 18 and 19 stay set when this happens
  1071. * so we use them to decide on the real state of the Rx bit.
  1072. * In order words, bit 15 is set if bit 18 or bit 19 are set.
  1073. */
  1074. if (val & 0xC0000)
  1075. val |= 0x8000;
  1076. inta = (0xff & val) | ((0xff00 & val) << 16);
  1077. IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled(sw) 0x%08x ict 0x%08x\n",
  1078. inta, trans_pcie->inta_mask, val);
  1079. if (iwl_have_debug_level(IWL_DL_ISR))
  1080. IWL_DEBUG_ISR(trans, "enabled(hw) 0x%08x\n",
  1081. iwl_read32(trans, CSR_INT_MASK));
  1082. inta &= trans_pcie->inta_mask;
  1083. trans_pcie->inta |= inta;
  1084. /* iwl_pcie_tasklet() will service interrupts and re-enable them */
  1085. if (likely(inta)) {
  1086. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  1087. return IRQ_WAKE_THREAD;
  1088. }
  1089. ret = IRQ_HANDLED;
  1090. none:
  1091. /* re-enable interrupts here since we don't have anything to service.
  1092. * only Re-enable if disabled by irq.
  1093. */
  1094. if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
  1095. !trans_pcie->inta)
  1096. iwl_enable_interrupts(trans);
  1097. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  1098. return ret;
  1099. }