rx.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
  4. *
  5. * Portions of this file are derived from the ipw3945 project, as well
  6. * as portions of the ieee80211 subsystem header files.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of version 2 of the GNU General Public License as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20. *
  21. * The full GNU General Public License is included in this distribution in the
  22. * file called LICENSE.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/sched.h>
  30. #include <linux/wait.h>
  31. #include <linux/gfp.h>
  32. #include "iwl-prph.h"
  33. #include "iwl-io.h"
  34. #include "internal.h"
  35. #include "iwl-op-mode.h"
  36. /******************************************************************************
  37. *
  38. * RX path functions
  39. *
  40. ******************************************************************************/
  41. /*
  42. * Rx theory of operation
  43. *
  44. * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
  45. * each of which point to Receive Buffers to be filled by the NIC. These get
  46. * used not only for Rx frames, but for any command response or notification
  47. * from the NIC. The driver and NIC manage the Rx buffers by means
  48. * of indexes into the circular buffer.
  49. *
  50. * Rx Queue Indexes
  51. * The host/firmware share two index registers for managing the Rx buffers.
  52. *
  53. * The READ index maps to the first position that the firmware may be writing
  54. * to -- the driver can read up to (but not including) this position and get
  55. * good data.
  56. * The READ index is managed by the firmware once the card is enabled.
  57. *
  58. * The WRITE index maps to the last position the driver has read from -- the
  59. * position preceding WRITE is the last slot the firmware can place a packet.
  60. *
  61. * The queue is empty (no good data) if WRITE = READ - 1, and is full if
  62. * WRITE = READ.
  63. *
  64. * During initialization, the host sets up the READ queue position to the first
  65. * INDEX position, and WRITE to the last (READ - 1 wrapped)
  66. *
  67. * When the firmware places a packet in a buffer, it will advance the READ index
  68. * and fire the RX interrupt. The driver can then query the READ index and
  69. * process as many packets as possible, moving the WRITE index forward as it
  70. * resets the Rx queue buffers with new memory.
  71. *
  72. * The management in the driver is as follows:
  73. * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
  74. * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
  75. * to replenish the iwl->rxq->rx_free.
  76. * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
  77. * iwl->rxq is replenished and the READ INDEX is updated (updating the
  78. * 'processed' and 'read' driver indexes as well)
  79. * + A received packet is processed and handed to the kernel network stack,
  80. * detached from the iwl->rxq. The driver 'processed' index is updated.
  81. * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
  82. * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
  83. * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
  84. * were enough free buffers and RX_STALLED is set it is cleared.
  85. *
  86. *
  87. * Driver sequence:
  88. *
  89. * iwl_rxq_alloc() Allocates rx_free
  90. * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
  91. * iwl_pcie_rxq_restock
  92. * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
  93. * queue, updates firmware pointers, and updates
  94. * the WRITE index. If insufficient rx_free buffers
  95. * are available, schedules iwl_pcie_rx_replenish
  96. *
  97. * -- enable interrupts --
  98. * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
  99. * READ INDEX, detaching the SKB from the pool.
  100. * Moves the packet buffer from queue to rx_used.
  101. * Calls iwl_pcie_rxq_restock to refill any empty
  102. * slots.
  103. * ...
  104. *
  105. */
  106. /*
  107. * iwl_rxq_space - Return number of free slots available in queue.
  108. */
  109. static int iwl_rxq_space(const struct iwl_rxq *q)
  110. {
  111. int s = q->read - q->write;
  112. if (s <= 0)
  113. s += RX_QUEUE_SIZE;
  114. /* keep some buffer to not confuse full and empty queue */
  115. s -= 2;
  116. if (s < 0)
  117. s = 0;
  118. return s;
  119. }
  120. /*
  121. * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
  122. */
  123. static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
  124. {
  125. return cpu_to_le32((u32)(dma_addr >> 8));
  126. }
  127. /*
  128. * iwl_pcie_rx_stop - stops the Rx DMA
  129. */
  130. int iwl_pcie_rx_stop(struct iwl_trans *trans)
  131. {
  132. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  133. return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
  134. FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
  135. }
  136. /*
  137. * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
  138. */
  139. static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q)
  140. {
  141. unsigned long flags;
  142. u32 reg;
  143. spin_lock_irqsave(&q->lock, flags);
  144. if (q->need_update == 0)
  145. goto exit_unlock;
  146. if (trans->cfg->base_params->shadow_reg_enable) {
  147. /* shadow register enabled */
  148. /* Device expects a multiple of 8 */
  149. q->write_actual = (q->write & ~0x7);
  150. iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
  151. } else {
  152. struct iwl_trans_pcie *trans_pcie =
  153. IWL_TRANS_GET_PCIE_TRANS(trans);
  154. /* If power-saving is in use, make sure device is awake */
  155. if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
  156. reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
  157. if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
  158. IWL_DEBUG_INFO(trans,
  159. "Rx queue requesting wakeup,"
  160. " GP1 = 0x%x\n", reg);
  161. iwl_set_bit(trans, CSR_GP_CNTRL,
  162. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  163. goto exit_unlock;
  164. }
  165. q->write_actual = (q->write & ~0x7);
  166. iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
  167. q->write_actual);
  168. /* Else device is assumed to be awake */
  169. } else {
  170. /* Device expects a multiple of 8 */
  171. q->write_actual = (q->write & ~0x7);
  172. iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
  173. q->write_actual);
  174. }
  175. }
  176. q->need_update = 0;
  177. exit_unlock:
  178. spin_unlock_irqrestore(&q->lock, flags);
  179. }
  180. /*
  181. * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
  182. *
  183. * If there are slots in the RX queue that need to be restocked,
  184. * and we have free pre-allocated buffers, fill the ranks as much
  185. * as we can, pulling from rx_free.
  186. *
  187. * This moves the 'write' index forward to catch up with 'processed', and
  188. * also updates the memory address in the firmware to reference the new
  189. * target buffer.
  190. */
  191. static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
  192. {
  193. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  194. struct iwl_rxq *rxq = &trans_pcie->rxq;
  195. struct iwl_rx_mem_buffer *rxb;
  196. unsigned long flags;
  197. /*
  198. * If the device isn't enabled - not need to try to add buffers...
  199. * This can happen when we stop the device and still have an interrupt
  200. * pending. We stop the APM before we sync the interrupts / tasklets
  201. * because we have to (see comment there). On the other hand, since
  202. * the APM is stopped, we cannot access the HW (in particular not prph).
  203. * So don't try to restock if the APM has been already stopped.
  204. */
  205. if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
  206. return;
  207. spin_lock_irqsave(&rxq->lock, flags);
  208. while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
  209. /* The overwritten rxb must be a used one */
  210. rxb = rxq->queue[rxq->write];
  211. BUG_ON(rxb && rxb->page);
  212. /* Get next free Rx buffer, remove from free list */
  213. rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
  214. list);
  215. list_del(&rxb->list);
  216. /* Point to Rx buffer via next RBD in circular buffer */
  217. rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
  218. rxq->queue[rxq->write] = rxb;
  219. rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
  220. rxq->free_count--;
  221. }
  222. spin_unlock_irqrestore(&rxq->lock, flags);
  223. /* If the pre-allocated buffer pool is dropping low, schedule to
  224. * refill it */
  225. if (rxq->free_count <= RX_LOW_WATERMARK)
  226. schedule_work(&trans_pcie->rx_replenish);
  227. /* If we've added more space for the firmware to place data, tell it.
  228. * Increment device's write pointer in multiples of 8. */
  229. if (rxq->write_actual != (rxq->write & ~0x7)) {
  230. spin_lock_irqsave(&rxq->lock, flags);
  231. rxq->need_update = 1;
  232. spin_unlock_irqrestore(&rxq->lock, flags);
  233. iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
  234. }
  235. }
  236. /*
  237. * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
  238. *
  239. * A used RBD is an Rx buffer that has been given to the stack. To use it again
  240. * a page must be allocated and the RBD must point to the page. This function
  241. * doesn't change the HW pointer but handles the list of pages that is used by
  242. * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
  243. * allocated buffers.
  244. */
  245. static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
  246. {
  247. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  248. struct iwl_rxq *rxq = &trans_pcie->rxq;
  249. struct iwl_rx_mem_buffer *rxb;
  250. struct page *page;
  251. unsigned long flags;
  252. gfp_t gfp_mask = priority;
  253. while (1) {
  254. spin_lock_irqsave(&rxq->lock, flags);
  255. if (list_empty(&rxq->rx_used)) {
  256. spin_unlock_irqrestore(&rxq->lock, flags);
  257. return;
  258. }
  259. spin_unlock_irqrestore(&rxq->lock, flags);
  260. if (rxq->free_count > RX_LOW_WATERMARK)
  261. gfp_mask |= __GFP_NOWARN;
  262. if (trans_pcie->rx_page_order > 0)
  263. gfp_mask |= __GFP_COMP;
  264. /* Alloc a new receive buffer */
  265. page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
  266. if (!page) {
  267. if (net_ratelimit())
  268. IWL_DEBUG_INFO(trans, "alloc_pages failed, "
  269. "order: %d\n",
  270. trans_pcie->rx_page_order);
  271. if ((rxq->free_count <= RX_LOW_WATERMARK) &&
  272. net_ratelimit())
  273. IWL_CRIT(trans, "Failed to alloc_pages with %s."
  274. "Only %u free buffers remaining.\n",
  275. priority == GFP_ATOMIC ?
  276. "GFP_ATOMIC" : "GFP_KERNEL",
  277. rxq->free_count);
  278. /* We don't reschedule replenish work here -- we will
  279. * call the restock method and if it still needs
  280. * more buffers it will schedule replenish */
  281. return;
  282. }
  283. spin_lock_irqsave(&rxq->lock, flags);
  284. if (list_empty(&rxq->rx_used)) {
  285. spin_unlock_irqrestore(&rxq->lock, flags);
  286. __free_pages(page, trans_pcie->rx_page_order);
  287. return;
  288. }
  289. rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
  290. list);
  291. list_del(&rxb->list);
  292. spin_unlock_irqrestore(&rxq->lock, flags);
  293. BUG_ON(rxb->page);
  294. rxb->page = page;
  295. /* Get physical address of the RB */
  296. rxb->page_dma =
  297. dma_map_page(trans->dev, page, 0,
  298. PAGE_SIZE << trans_pcie->rx_page_order,
  299. DMA_FROM_DEVICE);
  300. /* dma address must be no more than 36 bits */
  301. BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
  302. /* and also 256 byte aligned! */
  303. BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
  304. spin_lock_irqsave(&rxq->lock, flags);
  305. list_add_tail(&rxb->list, &rxq->rx_free);
  306. rxq->free_count++;
  307. spin_unlock_irqrestore(&rxq->lock, flags);
  308. }
  309. }
  310. static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
  311. {
  312. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  313. struct iwl_rxq *rxq = &trans_pcie->rxq;
  314. int i;
  315. /* Fill the rx_used queue with _all_ of the Rx buffers */
  316. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
  317. /* In the reset function, these buffers may have been allocated
  318. * to an SKB, so we need to unmap and free potential storage */
  319. if (rxq->pool[i].page != NULL) {
  320. dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
  321. PAGE_SIZE << trans_pcie->rx_page_order,
  322. DMA_FROM_DEVICE);
  323. __free_pages(rxq->pool[i].page,
  324. trans_pcie->rx_page_order);
  325. rxq->pool[i].page = NULL;
  326. }
  327. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  328. }
  329. }
  330. /*
  331. * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
  332. *
  333. * When moving to rx_free an page is allocated for the slot.
  334. *
  335. * Also restock the Rx queue via iwl_pcie_rxq_restock.
  336. * This is called as a scheduled work item (except for during initialization)
  337. */
  338. static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
  339. {
  340. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  341. unsigned long flags;
  342. iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
  343. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  344. iwl_pcie_rxq_restock(trans);
  345. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  346. }
  347. static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
  348. {
  349. iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
  350. iwl_pcie_rxq_restock(trans);
  351. }
  352. static void iwl_pcie_rx_replenish_work(struct work_struct *data)
  353. {
  354. struct iwl_trans_pcie *trans_pcie =
  355. container_of(data, struct iwl_trans_pcie, rx_replenish);
  356. iwl_pcie_rx_replenish(trans_pcie->trans);
  357. }
  358. static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
  359. {
  360. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  361. struct iwl_rxq *rxq = &trans_pcie->rxq;
  362. struct device *dev = trans->dev;
  363. memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
  364. spin_lock_init(&rxq->lock);
  365. if (WARN_ON(rxq->bd || rxq->rb_stts))
  366. return -EINVAL;
  367. /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
  368. rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
  369. &rxq->bd_dma, GFP_KERNEL);
  370. if (!rxq->bd)
  371. goto err_bd;
  372. /*Allocate the driver's pointer to receive buffer status */
  373. rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
  374. &rxq->rb_stts_dma, GFP_KERNEL);
  375. if (!rxq->rb_stts)
  376. goto err_rb_stts;
  377. return 0;
  378. err_rb_stts:
  379. dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
  380. rxq->bd, rxq->bd_dma);
  381. memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
  382. rxq->bd = NULL;
  383. err_bd:
  384. return -ENOMEM;
  385. }
  386. static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
  387. {
  388. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  389. u32 rb_size;
  390. const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
  391. if (trans_pcie->rx_buf_size_8k)
  392. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
  393. else
  394. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
  395. /* Stop Rx DMA */
  396. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  397. /* Reset driver's Rx queue write index */
  398. iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
  399. /* Tell device where to find RBD circular buffer in DRAM */
  400. iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
  401. (u32)(rxq->bd_dma >> 8));
  402. /* Tell device where in DRAM to update its Rx status */
  403. iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
  404. rxq->rb_stts_dma >> 4);
  405. /* Enable Rx DMA
  406. * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
  407. * the credit mechanism in 5000 HW RX FIFO
  408. * Direct rx interrupts to hosts
  409. * Rx buffer size 4 or 8k
  410. * RB timeout 0x10
  411. * 256 RBDs
  412. */
  413. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
  414. FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
  415. FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
  416. FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
  417. rb_size|
  418. (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
  419. (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
  420. /* Set interrupt coalescing timer to default (2048 usecs) */
  421. iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
  422. }
  423. int iwl_pcie_rx_init(struct iwl_trans *trans)
  424. {
  425. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  426. struct iwl_rxq *rxq = &trans_pcie->rxq;
  427. int i, err;
  428. unsigned long flags;
  429. if (!rxq->bd) {
  430. err = iwl_pcie_rx_alloc(trans);
  431. if (err)
  432. return err;
  433. }
  434. spin_lock_irqsave(&rxq->lock, flags);
  435. INIT_LIST_HEAD(&rxq->rx_free);
  436. INIT_LIST_HEAD(&rxq->rx_used);
  437. INIT_WORK(&trans_pcie->rx_replenish,
  438. iwl_pcie_rx_replenish_work);
  439. iwl_pcie_rxq_free_rbs(trans);
  440. for (i = 0; i < RX_QUEUE_SIZE; i++)
  441. rxq->queue[i] = NULL;
  442. /* Set us so that we have processed and used all buffers, but have
  443. * not restocked the Rx queue with fresh buffers */
  444. rxq->read = rxq->write = 0;
  445. rxq->write_actual = 0;
  446. rxq->free_count = 0;
  447. spin_unlock_irqrestore(&rxq->lock, flags);
  448. iwl_pcie_rx_replenish(trans);
  449. iwl_pcie_rx_hw_init(trans, rxq);
  450. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  451. rxq->need_update = 1;
  452. iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
  453. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  454. return 0;
  455. }
  456. void iwl_pcie_rx_free(struct iwl_trans *trans)
  457. {
  458. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  459. struct iwl_rxq *rxq = &trans_pcie->rxq;
  460. unsigned long flags;
  461. /*if rxq->bd is NULL, it means that nothing has been allocated,
  462. * exit now */
  463. if (!rxq->bd) {
  464. IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
  465. return;
  466. }
  467. spin_lock_irqsave(&rxq->lock, flags);
  468. iwl_pcie_rxq_free_rbs(trans);
  469. spin_unlock_irqrestore(&rxq->lock, flags);
  470. dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
  471. rxq->bd, rxq->bd_dma);
  472. memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
  473. rxq->bd = NULL;
  474. if (rxq->rb_stts)
  475. dma_free_coherent(trans->dev,
  476. sizeof(struct iwl_rb_status),
  477. rxq->rb_stts, rxq->rb_stts_dma);
  478. else
  479. IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
  480. memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
  481. rxq->rb_stts = NULL;
  482. }
  483. static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
  484. struct iwl_rx_mem_buffer *rxb)
  485. {
  486. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  487. struct iwl_rxq *rxq = &trans_pcie->rxq;
  488. struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
  489. unsigned long flags;
  490. bool page_stolen = false;
  491. int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
  492. u32 offset = 0;
  493. if (WARN_ON(!rxb))
  494. return;
  495. dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
  496. while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
  497. struct iwl_rx_packet *pkt;
  498. struct iwl_device_cmd *cmd;
  499. u16 sequence;
  500. bool reclaim;
  501. int index, cmd_index, err, len;
  502. struct iwl_rx_cmd_buffer rxcb = {
  503. ._offset = offset,
  504. ._page = rxb->page,
  505. ._page_stolen = false,
  506. .truesize = max_len,
  507. };
  508. pkt = rxb_addr(&rxcb);
  509. if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
  510. break;
  511. IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
  512. rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
  513. pkt->hdr.cmd);
  514. len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
  515. len += sizeof(u32); /* account for status word */
  516. trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
  517. trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
  518. /* Reclaim a command buffer only if this packet is a response
  519. * to a (driver-originated) command.
  520. * If the packet (e.g. Rx frame) originated from uCode,
  521. * there is no command buffer to reclaim.
  522. * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
  523. * but apparently a few don't get set; catch them here. */
  524. reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
  525. if (reclaim) {
  526. int i;
  527. for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
  528. if (trans_pcie->no_reclaim_cmds[i] ==
  529. pkt->hdr.cmd) {
  530. reclaim = false;
  531. break;
  532. }
  533. }
  534. }
  535. sequence = le16_to_cpu(pkt->hdr.sequence);
  536. index = SEQ_TO_INDEX(sequence);
  537. cmd_index = get_cmd_index(&txq->q, index);
  538. if (reclaim) {
  539. struct iwl_pcie_txq_entry *ent;
  540. ent = &txq->entries[cmd_index];
  541. cmd = ent->copy_cmd;
  542. WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
  543. } else {
  544. cmd = NULL;
  545. }
  546. err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
  547. if (reclaim) {
  548. /* The original command isn't needed any more */
  549. kfree(txq->entries[cmd_index].copy_cmd);
  550. txq->entries[cmd_index].copy_cmd = NULL;
  551. /* nor is the duplicated part of the command */
  552. kfree(txq->entries[cmd_index].free_buf);
  553. txq->entries[cmd_index].free_buf = NULL;
  554. }
  555. /*
  556. * After here, we should always check rxcb._page_stolen,
  557. * if it is true then one of the handlers took the page.
  558. */
  559. if (reclaim) {
  560. /* Invoke any callbacks, transfer the buffer to caller,
  561. * and fire off the (possibly) blocking
  562. * iwl_trans_send_cmd()
  563. * as we reclaim the driver command queue */
  564. if (!rxcb._page_stolen)
  565. iwl_pcie_hcmd_complete(trans, &rxcb, err);
  566. else
  567. IWL_WARN(trans, "Claim null rxb?\n");
  568. }
  569. page_stolen |= rxcb._page_stolen;
  570. offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
  571. }
  572. /* page was stolen from us -- free our reference */
  573. if (page_stolen) {
  574. __free_pages(rxb->page, trans_pcie->rx_page_order);
  575. rxb->page = NULL;
  576. }
  577. /* Reuse the page if possible. For notification packets and
  578. * SKBs that fail to Rx correctly, add them back into the
  579. * rx_free list for reuse later. */
  580. spin_lock_irqsave(&rxq->lock, flags);
  581. if (rxb->page != NULL) {
  582. rxb->page_dma =
  583. dma_map_page(trans->dev, rxb->page, 0,
  584. PAGE_SIZE << trans_pcie->rx_page_order,
  585. DMA_FROM_DEVICE);
  586. list_add_tail(&rxb->list, &rxq->rx_free);
  587. rxq->free_count++;
  588. } else
  589. list_add_tail(&rxb->list, &rxq->rx_used);
  590. spin_unlock_irqrestore(&rxq->lock, flags);
  591. }
  592. /*
  593. * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
  594. */
  595. static void iwl_pcie_rx_handle(struct iwl_trans *trans)
  596. {
  597. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  598. struct iwl_rxq *rxq = &trans_pcie->rxq;
  599. u32 r, i;
  600. u8 fill_rx = 0;
  601. u32 count = 8;
  602. int total_empty;
  603. /* uCode's read index (stored in shared DRAM) indicates the last Rx
  604. * buffer that the driver may process (last buffer filled by ucode). */
  605. r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
  606. i = rxq->read;
  607. /* Rx interrupt, but nothing sent from uCode */
  608. if (i == r)
  609. IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
  610. /* calculate total frames need to be restock after handling RX */
  611. total_empty = r - rxq->write_actual;
  612. if (total_empty < 0)
  613. total_empty += RX_QUEUE_SIZE;
  614. if (total_empty > (RX_QUEUE_SIZE / 2))
  615. fill_rx = 1;
  616. while (i != r) {
  617. struct iwl_rx_mem_buffer *rxb;
  618. rxb = rxq->queue[i];
  619. rxq->queue[i] = NULL;
  620. IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
  621. r, i, rxb);
  622. iwl_pcie_rx_handle_rb(trans, rxb);
  623. i = (i + 1) & RX_QUEUE_MASK;
  624. /* If there are a lot of unused frames,
  625. * restock the Rx queue so ucode wont assert. */
  626. if (fill_rx) {
  627. count++;
  628. if (count >= 8) {
  629. rxq->read = i;
  630. iwl_pcie_rx_replenish_now(trans);
  631. count = 0;
  632. }
  633. }
  634. }
  635. /* Backtrack one entry */
  636. rxq->read = i;
  637. if (fill_rx)
  638. iwl_pcie_rx_replenish_now(trans);
  639. else
  640. iwl_pcie_rxq_restock(trans);
  641. }
  642. /*
  643. * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
  644. */
  645. static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
  646. {
  647. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  648. /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
  649. if (trans->cfg->internal_wimax_coex &&
  650. (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
  651. APMS_CLK_VAL_MRB_FUNC_MODE) ||
  652. (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
  653. APMG_PS_CTRL_VAL_RESET_REQ))) {
  654. clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
  655. iwl_op_mode_wimax_active(trans->op_mode);
  656. wake_up(&trans_pcie->wait_command_queue);
  657. return;
  658. }
  659. iwl_pcie_dump_csr(trans);
  660. iwl_pcie_dump_fh(trans, NULL);
  661. set_bit(STATUS_FW_ERROR, &trans_pcie->status);
  662. clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
  663. wake_up(&trans_pcie->wait_command_queue);
  664. iwl_op_mode_nic_error(trans->op_mode);
  665. }
  666. void iwl_pcie_tasklet(struct iwl_trans *trans)
  667. {
  668. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  669. struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
  670. u32 inta = 0;
  671. u32 handled = 0;
  672. unsigned long flags;
  673. u32 i;
  674. #ifdef CONFIG_IWLWIFI_DEBUG
  675. u32 inta_mask;
  676. #endif
  677. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  678. /* Ack/clear/reset pending uCode interrupts.
  679. * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
  680. */
  681. /* There is a hardware bug in the interrupt mask function that some
  682. * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
  683. * they are disabled in the CSR_INT_MASK register. Furthermore the
  684. * ICT interrupt handling mechanism has another bug that might cause
  685. * these unmasked interrupts fail to be detected. We workaround the
  686. * hardware bugs here by ACKing all the possible interrupts so that
  687. * interrupt coalescing can still be achieved.
  688. */
  689. iwl_write32(trans, CSR_INT,
  690. trans_pcie->inta | ~trans_pcie->inta_mask);
  691. inta = trans_pcie->inta;
  692. #ifdef CONFIG_IWLWIFI_DEBUG
  693. if (iwl_have_debug_level(IWL_DL_ISR)) {
  694. /* just for debug */
  695. inta_mask = iwl_read32(trans, CSR_INT_MASK);
  696. IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
  697. inta, inta_mask);
  698. }
  699. #endif
  700. /* saved interrupt in inta variable now we can reset trans_pcie->inta */
  701. trans_pcie->inta = 0;
  702. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  703. /* Now service all interrupt bits discovered above. */
  704. if (inta & CSR_INT_BIT_HW_ERR) {
  705. IWL_ERR(trans, "Hardware error detected. Restarting.\n");
  706. /* Tell the device to stop sending interrupts */
  707. iwl_disable_interrupts(trans);
  708. isr_stats->hw++;
  709. iwl_pcie_irq_handle_error(trans);
  710. handled |= CSR_INT_BIT_HW_ERR;
  711. return;
  712. }
  713. #ifdef CONFIG_IWLWIFI_DEBUG
  714. if (iwl_have_debug_level(IWL_DL_ISR)) {
  715. /* NIC fires this, but we don't use it, redundant with WAKEUP */
  716. if (inta & CSR_INT_BIT_SCD) {
  717. IWL_DEBUG_ISR(trans, "Scheduler finished to transmit "
  718. "the frame/frames.\n");
  719. isr_stats->sch++;
  720. }
  721. /* Alive notification via Rx interrupt will do the real work */
  722. if (inta & CSR_INT_BIT_ALIVE) {
  723. IWL_DEBUG_ISR(trans, "Alive interrupt\n");
  724. isr_stats->alive++;
  725. }
  726. }
  727. #endif
  728. /* Safely ignore these bits for debug checks below */
  729. inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
  730. /* HW RF KILL switch toggled */
  731. if (inta & CSR_INT_BIT_RF_KILL) {
  732. bool hw_rfkill;
  733. hw_rfkill = iwl_is_rfkill_set(trans);
  734. IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
  735. hw_rfkill ? "disable radio" : "enable radio");
  736. isr_stats->rfkill++;
  737. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  738. if (hw_rfkill) {
  739. set_bit(STATUS_RFKILL, &trans_pcie->status);
  740. if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
  741. &trans_pcie->status))
  742. IWL_DEBUG_RF_KILL(trans,
  743. "Rfkill while SYNC HCMD in flight\n");
  744. wake_up(&trans_pcie->wait_command_queue);
  745. } else {
  746. clear_bit(STATUS_RFKILL, &trans_pcie->status);
  747. }
  748. handled |= CSR_INT_BIT_RF_KILL;
  749. }
  750. /* Chip got too hot and stopped itself */
  751. if (inta & CSR_INT_BIT_CT_KILL) {
  752. IWL_ERR(trans, "Microcode CT kill error detected.\n");
  753. isr_stats->ctkill++;
  754. handled |= CSR_INT_BIT_CT_KILL;
  755. }
  756. /* Error detected by uCode */
  757. if (inta & CSR_INT_BIT_SW_ERR) {
  758. IWL_ERR(trans, "Microcode SW error detected. "
  759. " Restarting 0x%X.\n", inta);
  760. isr_stats->sw++;
  761. iwl_pcie_irq_handle_error(trans);
  762. handled |= CSR_INT_BIT_SW_ERR;
  763. }
  764. /* uCode wakes up after power-down sleep */
  765. if (inta & CSR_INT_BIT_WAKEUP) {
  766. IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
  767. iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
  768. for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
  769. iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
  770. isr_stats->wakeup++;
  771. handled |= CSR_INT_BIT_WAKEUP;
  772. }
  773. /* All uCode command responses, including Tx command responses,
  774. * Rx "responses" (frame-received notification), and other
  775. * notifications from uCode come through here*/
  776. if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
  777. CSR_INT_BIT_RX_PERIODIC)) {
  778. IWL_DEBUG_ISR(trans, "Rx interrupt\n");
  779. if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
  780. handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
  781. iwl_write32(trans, CSR_FH_INT_STATUS,
  782. CSR_FH_INT_RX_MASK);
  783. }
  784. if (inta & CSR_INT_BIT_RX_PERIODIC) {
  785. handled |= CSR_INT_BIT_RX_PERIODIC;
  786. iwl_write32(trans,
  787. CSR_INT, CSR_INT_BIT_RX_PERIODIC);
  788. }
  789. /* Sending RX interrupt require many steps to be done in the
  790. * the device:
  791. * 1- write interrupt to current index in ICT table.
  792. * 2- dma RX frame.
  793. * 3- update RX shared data to indicate last write index.
  794. * 4- send interrupt.
  795. * This could lead to RX race, driver could receive RX interrupt
  796. * but the shared data changes does not reflect this;
  797. * periodic interrupt will detect any dangling Rx activity.
  798. */
  799. /* Disable periodic interrupt; we use it as just a one-shot. */
  800. iwl_write8(trans, CSR_INT_PERIODIC_REG,
  801. CSR_INT_PERIODIC_DIS);
  802. iwl_pcie_rx_handle(trans);
  803. /*
  804. * Enable periodic interrupt in 8 msec only if we received
  805. * real RX interrupt (instead of just periodic int), to catch
  806. * any dangling Rx interrupt. If it was just the periodic
  807. * interrupt, there was no dangling Rx activity, and no need
  808. * to extend the periodic interrupt; one-shot is enough.
  809. */
  810. if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
  811. iwl_write8(trans, CSR_INT_PERIODIC_REG,
  812. CSR_INT_PERIODIC_ENA);
  813. isr_stats->rx++;
  814. }
  815. /* This "Tx" DMA channel is used only for loading uCode */
  816. if (inta & CSR_INT_BIT_FH_TX) {
  817. iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
  818. IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
  819. isr_stats->tx++;
  820. handled |= CSR_INT_BIT_FH_TX;
  821. /* Wake up uCode load routine, now that load is complete */
  822. trans_pcie->ucode_write_complete = true;
  823. wake_up(&trans_pcie->ucode_write_waitq);
  824. }
  825. if (inta & ~handled) {
  826. IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
  827. isr_stats->unhandled++;
  828. }
  829. if (inta & ~(trans_pcie->inta_mask)) {
  830. IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
  831. inta & ~trans_pcie->inta_mask);
  832. }
  833. /* Re-enable all interrupts */
  834. /* only Re-enable if disabled by irq */
  835. if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
  836. iwl_enable_interrupts(trans);
  837. /* Re-enable RF_KILL if it occurred */
  838. else if (handled & CSR_INT_BIT_RF_KILL)
  839. iwl_enable_rfkill_int(trans);
  840. }
  841. /******************************************************************************
  842. *
  843. * ICT functions
  844. *
  845. ******************************************************************************/
  846. /* a device (PCI-E) page is 4096 bytes long */
  847. #define ICT_SHIFT 12
  848. #define ICT_SIZE (1 << ICT_SHIFT)
  849. #define ICT_COUNT (ICT_SIZE / sizeof(u32))
  850. /* Free dram table */
  851. void iwl_pcie_free_ict(struct iwl_trans *trans)
  852. {
  853. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  854. if (trans_pcie->ict_tbl) {
  855. dma_free_coherent(trans->dev, ICT_SIZE,
  856. trans_pcie->ict_tbl,
  857. trans_pcie->ict_tbl_dma);
  858. trans_pcie->ict_tbl = NULL;
  859. trans_pcie->ict_tbl_dma = 0;
  860. }
  861. }
  862. /*
  863. * allocate dram shared table, it is an aligned memory
  864. * block of ICT_SIZE.
  865. * also reset all data related to ICT table interrupt.
  866. */
  867. int iwl_pcie_alloc_ict(struct iwl_trans *trans)
  868. {
  869. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  870. trans_pcie->ict_tbl =
  871. dma_alloc_coherent(trans->dev, ICT_SIZE,
  872. &trans_pcie->ict_tbl_dma,
  873. GFP_KERNEL);
  874. if (!trans_pcie->ict_tbl)
  875. return -ENOMEM;
  876. /* just an API sanity check ... it is guaranteed to be aligned */
  877. if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
  878. iwl_pcie_free_ict(trans);
  879. return -EINVAL;
  880. }
  881. IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
  882. (unsigned long long)trans_pcie->ict_tbl_dma);
  883. IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
  884. /* reset table and index to all 0 */
  885. memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
  886. trans_pcie->ict_index = 0;
  887. /* add periodic RX interrupt */
  888. trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
  889. return 0;
  890. }
  891. /* Device is going up inform it about using ICT interrupt table,
  892. * also we need to tell the driver to start using ICT interrupt.
  893. */
  894. void iwl_pcie_reset_ict(struct iwl_trans *trans)
  895. {
  896. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  897. u32 val;
  898. unsigned long flags;
  899. if (!trans_pcie->ict_tbl)
  900. return;
  901. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  902. iwl_disable_interrupts(trans);
  903. memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
  904. val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
  905. val |= CSR_DRAM_INT_TBL_ENABLE;
  906. val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
  907. IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
  908. iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
  909. trans_pcie->use_ict = true;
  910. trans_pcie->ict_index = 0;
  911. iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
  912. iwl_enable_interrupts(trans);
  913. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  914. }
  915. /* Device is going down disable ict interrupt usage */
  916. void iwl_pcie_disable_ict(struct iwl_trans *trans)
  917. {
  918. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  919. unsigned long flags;
  920. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  921. trans_pcie->use_ict = false;
  922. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  923. }
  924. /* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
  925. static irqreturn_t iwl_pcie_isr(int irq, void *data)
  926. {
  927. struct iwl_trans *trans = data;
  928. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  929. u32 inta, inta_mask;
  930. #ifdef CONFIG_IWLWIFI_DEBUG
  931. u32 inta_fh;
  932. #endif
  933. lockdep_assert_held(&trans_pcie->irq_lock);
  934. trace_iwlwifi_dev_irq(trans->dev);
  935. /* Disable (but don't clear!) interrupts here to avoid
  936. * back-to-back ISRs and sporadic interrupts from our NIC.
  937. * If we have something to service, the tasklet will re-enable ints.
  938. * If we *don't* have something, we'll re-enable before leaving here. */
  939. inta_mask = iwl_read32(trans, CSR_INT_MASK);
  940. iwl_write32(trans, CSR_INT_MASK, 0x00000000);
  941. /* Discover which interrupts are active/pending */
  942. inta = iwl_read32(trans, CSR_INT);
  943. if (inta & (~inta_mask)) {
  944. IWL_DEBUG_ISR(trans,
  945. "We got a masked interrupt (0x%08x)...Ack and ignore\n",
  946. inta & (~inta_mask));
  947. iwl_write32(trans, CSR_INT, inta & (~inta_mask));
  948. inta &= inta_mask;
  949. }
  950. /* Ignore interrupt if there's nothing in NIC to service.
  951. * This may be due to IRQ shared with another device,
  952. * or due to sporadic interrupts thrown from our NIC. */
  953. if (!inta) {
  954. IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
  955. goto none;
  956. }
  957. if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
  958. /* Hardware disappeared. It might have already raised
  959. * an interrupt */
  960. IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
  961. return IRQ_HANDLED;
  962. }
  963. #ifdef CONFIG_IWLWIFI_DEBUG
  964. if (iwl_have_debug_level(IWL_DL_ISR)) {
  965. inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS);
  966. IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, "
  967. "fh 0x%08x\n", inta, inta_mask, inta_fh);
  968. }
  969. #endif
  970. trans_pcie->inta |= inta;
  971. /* iwl_pcie_tasklet() will service interrupts and re-enable them */
  972. if (likely(inta))
  973. tasklet_schedule(&trans_pcie->irq_tasklet);
  974. else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
  975. !trans_pcie->inta)
  976. iwl_enable_interrupts(trans);
  977. none:
  978. /* re-enable interrupts here since we don't have anything to service. */
  979. /* only Re-enable if disabled by irq and no schedules tasklet. */
  980. if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
  981. !trans_pcie->inta)
  982. iwl_enable_interrupts(trans);
  983. return IRQ_NONE;
  984. }
  985. /* interrupt handler using ict table, with this interrupt driver will
  986. * stop using INTA register to get device's interrupt, reading this register
  987. * is expensive, device will write interrupts in ICT dram table, increment
  988. * index then will fire interrupt to driver, driver will OR all ICT table
  989. * entries from current index up to table entry with 0 value. the result is
  990. * the interrupt we need to service, driver will set the entries back to 0 and
  991. * set index.
  992. */
  993. irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
  994. {
  995. struct iwl_trans *trans = data;
  996. struct iwl_trans_pcie *trans_pcie;
  997. u32 inta, inta_mask;
  998. u32 val = 0;
  999. u32 read;
  1000. unsigned long flags;
  1001. if (!trans)
  1002. return IRQ_NONE;
  1003. trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1004. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  1005. /* dram interrupt table not set yet,
  1006. * use legacy interrupt.
  1007. */
  1008. if (unlikely(!trans_pcie->use_ict)) {
  1009. irqreturn_t ret = iwl_pcie_isr(irq, data);
  1010. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  1011. return ret;
  1012. }
  1013. trace_iwlwifi_dev_irq(trans->dev);
  1014. /* Disable (but don't clear!) interrupts here to avoid
  1015. * back-to-back ISRs and sporadic interrupts from our NIC.
  1016. * If we have something to service, the tasklet will re-enable ints.
  1017. * If we *don't* have something, we'll re-enable before leaving here.
  1018. */
  1019. inta_mask = iwl_read32(trans, CSR_INT_MASK);
  1020. iwl_write32(trans, CSR_INT_MASK, 0x00000000);
  1021. /* Ignore interrupt if there's nothing in NIC to service.
  1022. * This may be due to IRQ shared with another device,
  1023. * or due to sporadic interrupts thrown from our NIC. */
  1024. read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
  1025. trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
  1026. if (!read) {
  1027. IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
  1028. goto none;
  1029. }
  1030. /*
  1031. * Collect all entries up to the first 0, starting from ict_index;
  1032. * note we already read at ict_index.
  1033. */
  1034. do {
  1035. val |= read;
  1036. IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
  1037. trans_pcie->ict_index, read);
  1038. trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
  1039. trans_pcie->ict_index =
  1040. iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
  1041. read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
  1042. trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
  1043. read);
  1044. } while (read);
  1045. /* We should not get this value, just ignore it. */
  1046. if (val == 0xffffffff)
  1047. val = 0;
  1048. /*
  1049. * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
  1050. * (bit 15 before shifting it to 31) to clear when using interrupt
  1051. * coalescing. fortunately, bits 18 and 19 stay set when this happens
  1052. * so we use them to decide on the real state of the Rx bit.
  1053. * In order words, bit 15 is set if bit 18 or bit 19 are set.
  1054. */
  1055. if (val & 0xC0000)
  1056. val |= 0x8000;
  1057. inta = (0xff & val) | ((0xff00 & val) << 16);
  1058. IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
  1059. inta, inta_mask, val);
  1060. inta &= trans_pcie->inta_mask;
  1061. trans_pcie->inta |= inta;
  1062. /* iwl_pcie_tasklet() will service interrupts and re-enable them */
  1063. if (likely(inta))
  1064. tasklet_schedule(&trans_pcie->irq_tasklet);
  1065. else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
  1066. !trans_pcie->inta) {
  1067. /* Allow interrupt if was disabled by this handler and
  1068. * no tasklet was schedules, We should not enable interrupt,
  1069. * tasklet will enable it.
  1070. */
  1071. iwl_enable_interrupts(trans);
  1072. }
  1073. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  1074. return IRQ_HANDLED;
  1075. none:
  1076. /* re-enable interrupts here since we don't have anything to service.
  1077. * only Re-enable if disabled by irq.
  1078. */
  1079. if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
  1080. !trans_pcie->inta)
  1081. iwl_enable_interrupts(trans);
  1082. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  1083. return IRQ_NONE;
  1084. }