rx.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
  4. *
  5. * Portions of this file are derived from the ipw3945 project, as well
  6. * as portions of the ieee80211 subsystem header files.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of version 2 of the GNU General Public License as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20. *
  21. * The full GNU General Public License is included in this distribution in the
  22. * file called LICENSE.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/sched.h>
  30. #include <linux/wait.h>
  31. #include <linux/gfp.h>
  32. #include "iwl-prph.h"
  33. #include "iwl-io.h"
  34. #include "internal.h"
  35. #include "iwl-op-mode.h"
  36. /******************************************************************************
  37. *
  38. * RX path functions
  39. *
  40. ******************************************************************************/
  41. /*
  42. * Rx theory of operation
  43. *
  44. * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
  45. * each of which point to Receive Buffers to be filled by the NIC. These get
  46. * used not only for Rx frames, but for any command response or notification
  47. * from the NIC. The driver and NIC manage the Rx buffers by means
  48. * of indexes into the circular buffer.
  49. *
  50. * Rx Queue Indexes
  51. * The host/firmware share two index registers for managing the Rx buffers.
  52. *
  53. * The READ index maps to the first position that the firmware may be writing
  54. * to -- the driver can read up to (but not including) this position and get
  55. * good data.
  56. * The READ index is managed by the firmware once the card is enabled.
  57. *
  58. * The WRITE index maps to the last position the driver has read from -- the
  59. * position preceding WRITE is the last slot the firmware can place a packet.
  60. *
  61. * The queue is empty (no good data) if WRITE = READ - 1, and is full if
  62. * WRITE = READ.
  63. *
  64. * During initialization, the host sets up the READ queue position to the first
  65. * INDEX position, and WRITE to the last (READ - 1 wrapped)
  66. *
  67. * When the firmware places a packet in a buffer, it will advance the READ index
  68. * and fire the RX interrupt. The driver can then query the READ index and
  69. * process as many packets as possible, moving the WRITE index forward as it
  70. * resets the Rx queue buffers with new memory.
  71. *
  72. * The management in the driver is as follows:
  73. * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
  74. * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
  75. * to replenish the iwl->rxq->rx_free.
  76. * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
  77. * iwl->rxq is replenished and the READ INDEX is updated (updating the
  78. * 'processed' and 'read' driver indexes as well)
  79. * + A received packet is processed and handed to the kernel network stack,
  80. * detached from the iwl->rxq. The driver 'processed' index is updated.
  81. * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
  82. * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
  83. * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
  84. * were enough free buffers and RX_STALLED is set it is cleared.
  85. *
  86. *
  87. * Driver sequence:
  88. *
  89. * iwl_rxq_alloc() Allocates rx_free
  90. * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
  91. * iwl_pcie_rxq_restock
  92. * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
  93. * queue, updates firmware pointers, and updates
  94. * the WRITE index. If insufficient rx_free buffers
  95. * are available, schedules iwl_pcie_rx_replenish
  96. *
  97. * -- enable interrupts --
  98. * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
  99. * READ INDEX, detaching the SKB from the pool.
  100. * Moves the packet buffer from queue to rx_used.
  101. * Calls iwl_pcie_rxq_restock to refill any empty
  102. * slots.
  103. * ...
  104. *
  105. */
  106. /*
  107. * iwl_rxq_space - Return number of free slots available in queue.
  108. */
  109. static int iwl_rxq_space(const struct iwl_rxq *q)
  110. {
  111. int s = q->read - q->write;
  112. if (s <= 0)
  113. s += RX_QUEUE_SIZE;
  114. /* keep some buffer to not confuse full and empty queue */
  115. s -= 2;
  116. if (s < 0)
  117. s = 0;
  118. return s;
  119. }
  120. /*
  121. * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
  122. */
  123. static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
  124. {
  125. return cpu_to_le32((u32)(dma_addr >> 8));
  126. }
  127. /*
  128. * iwl_pcie_rx_stop - stops the Rx DMA
  129. */
  130. int iwl_pcie_rx_stop(struct iwl_trans *trans)
  131. {
  132. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  133. return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
  134. FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
  135. }
  136. /*
  137. * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
  138. */
  139. static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q)
  140. {
  141. unsigned long flags;
  142. u32 reg;
  143. spin_lock_irqsave(&q->lock, flags);
  144. if (q->need_update == 0)
  145. goto exit_unlock;
  146. if (trans->cfg->base_params->shadow_reg_enable) {
  147. /* shadow register enabled */
  148. /* Device expects a multiple of 8 */
  149. q->write_actual = (q->write & ~0x7);
  150. iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
  151. } else {
  152. struct iwl_trans_pcie *trans_pcie =
  153. IWL_TRANS_GET_PCIE_TRANS(trans);
  154. /* If power-saving is in use, make sure device is awake */
  155. if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
  156. reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
  157. if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
  158. IWL_DEBUG_INFO(trans,
  159. "Rx queue requesting wakeup,"
  160. " GP1 = 0x%x\n", reg);
  161. iwl_set_bit(trans, CSR_GP_CNTRL,
  162. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  163. goto exit_unlock;
  164. }
  165. q->write_actual = (q->write & ~0x7);
  166. iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
  167. q->write_actual);
  168. /* Else device is assumed to be awake */
  169. } else {
  170. /* Device expects a multiple of 8 */
  171. q->write_actual = (q->write & ~0x7);
  172. iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
  173. q->write_actual);
  174. }
  175. }
  176. q->need_update = 0;
  177. exit_unlock:
  178. spin_unlock_irqrestore(&q->lock, flags);
  179. }
  180. /*
  181. * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
  182. *
  183. * If there are slots in the RX queue that need to be restocked,
  184. * and we have free pre-allocated buffers, fill the ranks as much
  185. * as we can, pulling from rx_free.
  186. *
  187. * This moves the 'write' index forward to catch up with 'processed', and
  188. * also updates the memory address in the firmware to reference the new
  189. * target buffer.
  190. */
  191. static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
  192. {
  193. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  194. struct iwl_rxq *rxq = &trans_pcie->rxq;
  195. struct iwl_rx_mem_buffer *rxb;
  196. unsigned long flags;
  197. /*
  198. * If the device isn't enabled - not need to try to add buffers...
  199. * This can happen when we stop the device and still have an interrupt
  200. * pending. We stop the APM before we sync the interrupts / tasklets
  201. * because we have to (see comment there). On the other hand, since
  202. * the APM is stopped, we cannot access the HW (in particular not prph).
  203. * So don't try to restock if the APM has been already stopped.
  204. */
  205. if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
  206. return;
  207. spin_lock_irqsave(&rxq->lock, flags);
  208. while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
  209. /* The overwritten rxb must be a used one */
  210. rxb = rxq->queue[rxq->write];
  211. BUG_ON(rxb && rxb->page);
  212. /* Get next free Rx buffer, remove from free list */
  213. rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
  214. list);
  215. list_del(&rxb->list);
  216. /* Point to Rx buffer via next RBD in circular buffer */
  217. rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
  218. rxq->queue[rxq->write] = rxb;
  219. rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
  220. rxq->free_count--;
  221. }
  222. spin_unlock_irqrestore(&rxq->lock, flags);
  223. /* If the pre-allocated buffer pool is dropping low, schedule to
  224. * refill it */
  225. if (rxq->free_count <= RX_LOW_WATERMARK)
  226. schedule_work(&trans_pcie->rx_replenish);
  227. /* If we've added more space for the firmware to place data, tell it.
  228. * Increment device's write pointer in multiples of 8. */
  229. if (rxq->write_actual != (rxq->write & ~0x7)) {
  230. spin_lock_irqsave(&rxq->lock, flags);
  231. rxq->need_update = 1;
  232. spin_unlock_irqrestore(&rxq->lock, flags);
  233. iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
  234. }
  235. }
  236. /*
  237. * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
  238. *
  239. * A used RBD is an Rx buffer that has been given to the stack. To use it again
  240. * a page must be allocated and the RBD must point to the page. This function
  241. * doesn't change the HW pointer but handles the list of pages that is used by
  242. * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
  243. * allocated buffers.
  244. */
  245. static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
  246. {
  247. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  248. struct iwl_rxq *rxq = &trans_pcie->rxq;
  249. struct iwl_rx_mem_buffer *rxb;
  250. struct page *page;
  251. unsigned long flags;
  252. gfp_t gfp_mask = priority;
  253. while (1) {
  254. spin_lock_irqsave(&rxq->lock, flags);
  255. if (list_empty(&rxq->rx_used)) {
  256. spin_unlock_irqrestore(&rxq->lock, flags);
  257. return;
  258. }
  259. spin_unlock_irqrestore(&rxq->lock, flags);
  260. if (rxq->free_count > RX_LOW_WATERMARK)
  261. gfp_mask |= __GFP_NOWARN;
  262. if (trans_pcie->rx_page_order > 0)
  263. gfp_mask |= __GFP_COMP;
  264. /* Alloc a new receive buffer */
  265. page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
  266. if (!page) {
  267. if (net_ratelimit())
  268. IWL_DEBUG_INFO(trans, "alloc_pages failed, "
  269. "order: %d\n",
  270. trans_pcie->rx_page_order);
  271. if ((rxq->free_count <= RX_LOW_WATERMARK) &&
  272. net_ratelimit())
  273. IWL_CRIT(trans, "Failed to alloc_pages with %s."
  274. "Only %u free buffers remaining.\n",
  275. priority == GFP_ATOMIC ?
  276. "GFP_ATOMIC" : "GFP_KERNEL",
  277. rxq->free_count);
  278. /* We don't reschedule replenish work here -- we will
  279. * call the restock method and if it still needs
  280. * more buffers it will schedule replenish */
  281. return;
  282. }
  283. spin_lock_irqsave(&rxq->lock, flags);
  284. if (list_empty(&rxq->rx_used)) {
  285. spin_unlock_irqrestore(&rxq->lock, flags);
  286. __free_pages(page, trans_pcie->rx_page_order);
  287. return;
  288. }
  289. rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
  290. list);
  291. list_del(&rxb->list);
  292. spin_unlock_irqrestore(&rxq->lock, flags);
  293. BUG_ON(rxb->page);
  294. rxb->page = page;
  295. /* Get physical address of the RB */
  296. rxb->page_dma =
  297. dma_map_page(trans->dev, page, 0,
  298. PAGE_SIZE << trans_pcie->rx_page_order,
  299. DMA_FROM_DEVICE);
  300. if (dma_mapping_error(trans->dev, rxb->page_dma)) {
  301. rxb->page = NULL;
  302. spin_lock_irqsave(&rxq->lock, flags);
  303. list_add(&rxb->list, &rxq->rx_used);
  304. spin_unlock_irqrestore(&rxq->lock, flags);
  305. __free_pages(page, trans_pcie->rx_page_order);
  306. return;
  307. }
  308. /* dma address must be no more than 36 bits */
  309. BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
  310. /* and also 256 byte aligned! */
  311. BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
  312. spin_lock_irqsave(&rxq->lock, flags);
  313. list_add_tail(&rxb->list, &rxq->rx_free);
  314. rxq->free_count++;
  315. spin_unlock_irqrestore(&rxq->lock, flags);
  316. }
  317. }
  318. static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
  319. {
  320. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  321. struct iwl_rxq *rxq = &trans_pcie->rxq;
  322. int i;
  323. /* Fill the rx_used queue with _all_ of the Rx buffers */
  324. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
  325. /* In the reset function, these buffers may have been allocated
  326. * to an SKB, so we need to unmap and free potential storage */
  327. if (rxq->pool[i].page != NULL) {
  328. dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
  329. PAGE_SIZE << trans_pcie->rx_page_order,
  330. DMA_FROM_DEVICE);
  331. __free_pages(rxq->pool[i].page,
  332. trans_pcie->rx_page_order);
  333. rxq->pool[i].page = NULL;
  334. }
  335. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  336. }
  337. }
  338. /*
  339. * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
  340. *
  341. * When moving to rx_free an page is allocated for the slot.
  342. *
  343. * Also restock the Rx queue via iwl_pcie_rxq_restock.
  344. * This is called as a scheduled work item (except for during initialization)
  345. */
  346. static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
  347. {
  348. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  349. unsigned long flags;
  350. iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
  351. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  352. iwl_pcie_rxq_restock(trans);
  353. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  354. }
  355. static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
  356. {
  357. iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
  358. iwl_pcie_rxq_restock(trans);
  359. }
  360. static void iwl_pcie_rx_replenish_work(struct work_struct *data)
  361. {
  362. struct iwl_trans_pcie *trans_pcie =
  363. container_of(data, struct iwl_trans_pcie, rx_replenish);
  364. iwl_pcie_rx_replenish(trans_pcie->trans);
  365. }
  366. static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
  367. {
  368. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  369. struct iwl_rxq *rxq = &trans_pcie->rxq;
  370. struct device *dev = trans->dev;
  371. memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
  372. spin_lock_init(&rxq->lock);
  373. if (WARN_ON(rxq->bd || rxq->rb_stts))
  374. return -EINVAL;
  375. /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
  376. rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
  377. &rxq->bd_dma, GFP_KERNEL);
  378. if (!rxq->bd)
  379. goto err_bd;
  380. /*Allocate the driver's pointer to receive buffer status */
  381. rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
  382. &rxq->rb_stts_dma, GFP_KERNEL);
  383. if (!rxq->rb_stts)
  384. goto err_rb_stts;
  385. return 0;
  386. err_rb_stts:
  387. dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
  388. rxq->bd, rxq->bd_dma);
  389. memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
  390. rxq->bd = NULL;
  391. err_bd:
  392. return -ENOMEM;
  393. }
  394. static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
  395. {
  396. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  397. u32 rb_size;
  398. const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
  399. if (trans_pcie->rx_buf_size_8k)
  400. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
  401. else
  402. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
  403. /* Stop Rx DMA */
  404. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  405. /* Reset driver's Rx queue write index */
  406. iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
  407. /* Tell device where to find RBD circular buffer in DRAM */
  408. iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
  409. (u32)(rxq->bd_dma >> 8));
  410. /* Tell device where in DRAM to update its Rx status */
  411. iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
  412. rxq->rb_stts_dma >> 4);
  413. /* Enable Rx DMA
  414. * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
  415. * the credit mechanism in 5000 HW RX FIFO
  416. * Direct rx interrupts to hosts
  417. * Rx buffer size 4 or 8k
  418. * RB timeout 0x10
  419. * 256 RBDs
  420. */
  421. iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
  422. FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
  423. FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
  424. FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
  425. rb_size|
  426. (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
  427. (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
  428. /* Set interrupt coalescing timer to default (2048 usecs) */
  429. iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
  430. }
  431. int iwl_pcie_rx_init(struct iwl_trans *trans)
  432. {
  433. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  434. struct iwl_rxq *rxq = &trans_pcie->rxq;
  435. int i, err;
  436. unsigned long flags;
  437. if (!rxq->bd) {
  438. err = iwl_pcie_rx_alloc(trans);
  439. if (err)
  440. return err;
  441. }
  442. spin_lock_irqsave(&rxq->lock, flags);
  443. INIT_LIST_HEAD(&rxq->rx_free);
  444. INIT_LIST_HEAD(&rxq->rx_used);
  445. INIT_WORK(&trans_pcie->rx_replenish,
  446. iwl_pcie_rx_replenish_work);
  447. iwl_pcie_rxq_free_rbs(trans);
  448. for (i = 0; i < RX_QUEUE_SIZE; i++)
  449. rxq->queue[i] = NULL;
  450. /* Set us so that we have processed and used all buffers, but have
  451. * not restocked the Rx queue with fresh buffers */
  452. rxq->read = rxq->write = 0;
  453. rxq->write_actual = 0;
  454. rxq->free_count = 0;
  455. spin_unlock_irqrestore(&rxq->lock, flags);
  456. iwl_pcie_rx_replenish(trans);
  457. iwl_pcie_rx_hw_init(trans, rxq);
  458. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  459. rxq->need_update = 1;
  460. iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
  461. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  462. return 0;
  463. }
  464. void iwl_pcie_rx_free(struct iwl_trans *trans)
  465. {
  466. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  467. struct iwl_rxq *rxq = &trans_pcie->rxq;
  468. unsigned long flags;
  469. /*if rxq->bd is NULL, it means that nothing has been allocated,
  470. * exit now */
  471. if (!rxq->bd) {
  472. IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
  473. return;
  474. }
  475. spin_lock_irqsave(&rxq->lock, flags);
  476. iwl_pcie_rxq_free_rbs(trans);
  477. spin_unlock_irqrestore(&rxq->lock, flags);
  478. dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
  479. rxq->bd, rxq->bd_dma);
  480. memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
  481. rxq->bd = NULL;
  482. if (rxq->rb_stts)
  483. dma_free_coherent(trans->dev,
  484. sizeof(struct iwl_rb_status),
  485. rxq->rb_stts, rxq->rb_stts_dma);
  486. else
  487. IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
  488. memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
  489. rxq->rb_stts = NULL;
  490. }
  491. static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
  492. struct iwl_rx_mem_buffer *rxb)
  493. {
  494. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  495. struct iwl_rxq *rxq = &trans_pcie->rxq;
  496. struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
  497. unsigned long flags;
  498. bool page_stolen = false;
  499. int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
  500. u32 offset = 0;
  501. if (WARN_ON(!rxb))
  502. return;
  503. dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
  504. while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
  505. struct iwl_rx_packet *pkt;
  506. struct iwl_device_cmd *cmd;
  507. u16 sequence;
  508. bool reclaim;
  509. int index, cmd_index, err, len;
  510. struct iwl_rx_cmd_buffer rxcb = {
  511. ._offset = offset,
  512. ._page = rxb->page,
  513. ._page_stolen = false,
  514. .truesize = max_len,
  515. };
  516. pkt = rxb_addr(&rxcb);
  517. if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
  518. break;
  519. IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
  520. rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
  521. pkt->hdr.cmd);
  522. len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
  523. len += sizeof(u32); /* account for status word */
  524. trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
  525. trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
  526. /* Reclaim a command buffer only if this packet is a response
  527. * to a (driver-originated) command.
  528. * If the packet (e.g. Rx frame) originated from uCode,
  529. * there is no command buffer to reclaim.
  530. * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
  531. * but apparently a few don't get set; catch them here. */
  532. reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
  533. if (reclaim) {
  534. int i;
  535. for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
  536. if (trans_pcie->no_reclaim_cmds[i] ==
  537. pkt->hdr.cmd) {
  538. reclaim = false;
  539. break;
  540. }
  541. }
  542. }
  543. sequence = le16_to_cpu(pkt->hdr.sequence);
  544. index = SEQ_TO_INDEX(sequence);
  545. cmd_index = get_cmd_index(&txq->q, index);
  546. if (reclaim) {
  547. struct iwl_pcie_txq_entry *ent;
  548. ent = &txq->entries[cmd_index];
  549. cmd = ent->copy_cmd;
  550. WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
  551. } else {
  552. cmd = NULL;
  553. }
  554. err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
  555. if (reclaim) {
  556. /* The original command isn't needed any more */
  557. kfree(txq->entries[cmd_index].copy_cmd);
  558. txq->entries[cmd_index].copy_cmd = NULL;
  559. /* nor is the duplicated part of the command */
  560. kfree(txq->entries[cmd_index].free_buf);
  561. txq->entries[cmd_index].free_buf = NULL;
  562. }
  563. /*
  564. * After here, we should always check rxcb._page_stolen,
  565. * if it is true then one of the handlers took the page.
  566. */
  567. if (reclaim) {
  568. /* Invoke any callbacks, transfer the buffer to caller,
  569. * and fire off the (possibly) blocking
  570. * iwl_trans_send_cmd()
  571. * as we reclaim the driver command queue */
  572. if (!rxcb._page_stolen)
  573. iwl_pcie_hcmd_complete(trans, &rxcb, err);
  574. else
  575. IWL_WARN(trans, "Claim null rxb?\n");
  576. }
  577. page_stolen |= rxcb._page_stolen;
  578. offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
  579. }
  580. /* page was stolen from us -- free our reference */
  581. if (page_stolen) {
  582. __free_pages(rxb->page, trans_pcie->rx_page_order);
  583. rxb->page = NULL;
  584. }
  585. /* Reuse the page if possible. For notification packets and
  586. * SKBs that fail to Rx correctly, add them back into the
  587. * rx_free list for reuse later. */
  588. spin_lock_irqsave(&rxq->lock, flags);
  589. if (rxb->page != NULL) {
  590. rxb->page_dma =
  591. dma_map_page(trans->dev, rxb->page, 0,
  592. PAGE_SIZE << trans_pcie->rx_page_order,
  593. DMA_FROM_DEVICE);
  594. if (dma_mapping_error(trans->dev, rxb->page_dma)) {
  595. /*
  596. * free the page(s) as well to not break
  597. * the invariant that the items on the used
  598. * list have no page(s)
  599. */
  600. __free_pages(rxb->page, trans_pcie->rx_page_order);
  601. rxb->page = NULL;
  602. list_add_tail(&rxb->list, &rxq->rx_used);
  603. } else {
  604. list_add_tail(&rxb->list, &rxq->rx_free);
  605. rxq->free_count++;
  606. }
  607. } else
  608. list_add_tail(&rxb->list, &rxq->rx_used);
  609. spin_unlock_irqrestore(&rxq->lock, flags);
  610. }
  611. /*
  612. * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
  613. */
  614. static void iwl_pcie_rx_handle(struct iwl_trans *trans)
  615. {
  616. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  617. struct iwl_rxq *rxq = &trans_pcie->rxq;
  618. u32 r, i;
  619. u8 fill_rx = 0;
  620. u32 count = 8;
  621. int total_empty;
  622. /* uCode's read index (stored in shared DRAM) indicates the last Rx
  623. * buffer that the driver may process (last buffer filled by ucode). */
  624. r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
  625. i = rxq->read;
  626. /* Rx interrupt, but nothing sent from uCode */
  627. if (i == r)
  628. IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
  629. /* calculate total frames need to be restock after handling RX */
  630. total_empty = r - rxq->write_actual;
  631. if (total_empty < 0)
  632. total_empty += RX_QUEUE_SIZE;
  633. if (total_empty > (RX_QUEUE_SIZE / 2))
  634. fill_rx = 1;
  635. while (i != r) {
  636. struct iwl_rx_mem_buffer *rxb;
  637. rxb = rxq->queue[i];
  638. rxq->queue[i] = NULL;
  639. IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
  640. r, i, rxb);
  641. iwl_pcie_rx_handle_rb(trans, rxb);
  642. i = (i + 1) & RX_QUEUE_MASK;
  643. /* If there are a lot of unused frames,
  644. * restock the Rx queue so ucode wont assert. */
  645. if (fill_rx) {
  646. count++;
  647. if (count >= 8) {
  648. rxq->read = i;
  649. iwl_pcie_rx_replenish_now(trans);
  650. count = 0;
  651. }
  652. }
  653. }
  654. /* Backtrack one entry */
  655. rxq->read = i;
  656. if (fill_rx)
  657. iwl_pcie_rx_replenish_now(trans);
  658. else
  659. iwl_pcie_rxq_restock(trans);
  660. }
  661. /*
  662. * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
  663. */
  664. static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
  665. {
  666. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  667. /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
  668. if (trans->cfg->internal_wimax_coex &&
  669. (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
  670. APMS_CLK_VAL_MRB_FUNC_MODE) ||
  671. (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
  672. APMG_PS_CTRL_VAL_RESET_REQ))) {
  673. clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
  674. iwl_op_mode_wimax_active(trans->op_mode);
  675. wake_up(&trans_pcie->wait_command_queue);
  676. return;
  677. }
  678. iwl_pcie_dump_csr(trans);
  679. iwl_pcie_dump_fh(trans, NULL);
  680. set_bit(STATUS_FW_ERROR, &trans_pcie->status);
  681. clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
  682. wake_up(&trans_pcie->wait_command_queue);
  683. iwl_op_mode_nic_error(trans->op_mode);
  684. }
  685. void iwl_pcie_tasklet(struct iwl_trans *trans)
  686. {
  687. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  688. struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
  689. u32 inta = 0;
  690. u32 handled = 0;
  691. unsigned long flags;
  692. u32 i;
  693. #ifdef CONFIG_IWLWIFI_DEBUG
  694. u32 inta_mask;
  695. #endif
  696. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  697. /* Ack/clear/reset pending uCode interrupts.
  698. * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
  699. */
  700. /* There is a hardware bug in the interrupt mask function that some
  701. * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
  702. * they are disabled in the CSR_INT_MASK register. Furthermore the
  703. * ICT interrupt handling mechanism has another bug that might cause
  704. * these unmasked interrupts fail to be detected. We workaround the
  705. * hardware bugs here by ACKing all the possible interrupts so that
  706. * interrupt coalescing can still be achieved.
  707. */
  708. iwl_write32(trans, CSR_INT,
  709. trans_pcie->inta | ~trans_pcie->inta_mask);
  710. inta = trans_pcie->inta;
  711. #ifdef CONFIG_IWLWIFI_DEBUG
  712. if (iwl_have_debug_level(IWL_DL_ISR)) {
  713. /* just for debug */
  714. inta_mask = iwl_read32(trans, CSR_INT_MASK);
  715. IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
  716. inta, inta_mask);
  717. }
  718. #endif
  719. /* saved interrupt in inta variable now we can reset trans_pcie->inta */
  720. trans_pcie->inta = 0;
  721. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  722. /* Now service all interrupt bits discovered above. */
  723. if (inta & CSR_INT_BIT_HW_ERR) {
  724. IWL_ERR(trans, "Hardware error detected. Restarting.\n");
  725. /* Tell the device to stop sending interrupts */
  726. iwl_disable_interrupts(trans);
  727. isr_stats->hw++;
  728. iwl_pcie_irq_handle_error(trans);
  729. handled |= CSR_INT_BIT_HW_ERR;
  730. return;
  731. }
  732. #ifdef CONFIG_IWLWIFI_DEBUG
  733. if (iwl_have_debug_level(IWL_DL_ISR)) {
  734. /* NIC fires this, but we don't use it, redundant with WAKEUP */
  735. if (inta & CSR_INT_BIT_SCD) {
  736. IWL_DEBUG_ISR(trans, "Scheduler finished to transmit "
  737. "the frame/frames.\n");
  738. isr_stats->sch++;
  739. }
  740. /* Alive notification via Rx interrupt will do the real work */
  741. if (inta & CSR_INT_BIT_ALIVE) {
  742. IWL_DEBUG_ISR(trans, "Alive interrupt\n");
  743. isr_stats->alive++;
  744. }
  745. }
  746. #endif
  747. /* Safely ignore these bits for debug checks below */
  748. inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
  749. /* HW RF KILL switch toggled */
  750. if (inta & CSR_INT_BIT_RF_KILL) {
  751. bool hw_rfkill;
  752. hw_rfkill = iwl_is_rfkill_set(trans);
  753. IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
  754. hw_rfkill ? "disable radio" : "enable radio");
  755. isr_stats->rfkill++;
  756. iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
  757. if (hw_rfkill) {
  758. set_bit(STATUS_RFKILL, &trans_pcie->status);
  759. if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
  760. &trans_pcie->status))
  761. IWL_DEBUG_RF_KILL(trans,
  762. "Rfkill while SYNC HCMD in flight\n");
  763. wake_up(&trans_pcie->wait_command_queue);
  764. } else {
  765. clear_bit(STATUS_RFKILL, &trans_pcie->status);
  766. }
  767. handled |= CSR_INT_BIT_RF_KILL;
  768. }
  769. /* Chip got too hot and stopped itself */
  770. if (inta & CSR_INT_BIT_CT_KILL) {
  771. IWL_ERR(trans, "Microcode CT kill error detected.\n");
  772. isr_stats->ctkill++;
  773. handled |= CSR_INT_BIT_CT_KILL;
  774. }
  775. /* Error detected by uCode */
  776. if (inta & CSR_INT_BIT_SW_ERR) {
  777. IWL_ERR(trans, "Microcode SW error detected. "
  778. " Restarting 0x%X.\n", inta);
  779. isr_stats->sw++;
  780. iwl_pcie_irq_handle_error(trans);
  781. handled |= CSR_INT_BIT_SW_ERR;
  782. }
  783. /* uCode wakes up after power-down sleep */
  784. if (inta & CSR_INT_BIT_WAKEUP) {
  785. IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
  786. iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
  787. for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
  788. iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
  789. isr_stats->wakeup++;
  790. handled |= CSR_INT_BIT_WAKEUP;
  791. }
  792. /* All uCode command responses, including Tx command responses,
  793. * Rx "responses" (frame-received notification), and other
  794. * notifications from uCode come through here*/
  795. if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
  796. CSR_INT_BIT_RX_PERIODIC)) {
  797. IWL_DEBUG_ISR(trans, "Rx interrupt\n");
  798. if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
  799. handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
  800. iwl_write32(trans, CSR_FH_INT_STATUS,
  801. CSR_FH_INT_RX_MASK);
  802. }
  803. if (inta & CSR_INT_BIT_RX_PERIODIC) {
  804. handled |= CSR_INT_BIT_RX_PERIODIC;
  805. iwl_write32(trans,
  806. CSR_INT, CSR_INT_BIT_RX_PERIODIC);
  807. }
  808. /* Sending RX interrupt require many steps to be done in the
  809. * the device:
  810. * 1- write interrupt to current index in ICT table.
  811. * 2- dma RX frame.
  812. * 3- update RX shared data to indicate last write index.
  813. * 4- send interrupt.
  814. * This could lead to RX race, driver could receive RX interrupt
  815. * but the shared data changes does not reflect this;
  816. * periodic interrupt will detect any dangling Rx activity.
  817. */
  818. /* Disable periodic interrupt; we use it as just a one-shot. */
  819. iwl_write8(trans, CSR_INT_PERIODIC_REG,
  820. CSR_INT_PERIODIC_DIS);
  821. iwl_pcie_rx_handle(trans);
  822. /*
  823. * Enable periodic interrupt in 8 msec only if we received
  824. * real RX interrupt (instead of just periodic int), to catch
  825. * any dangling Rx interrupt. If it was just the periodic
  826. * interrupt, there was no dangling Rx activity, and no need
  827. * to extend the periodic interrupt; one-shot is enough.
  828. */
  829. if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
  830. iwl_write8(trans, CSR_INT_PERIODIC_REG,
  831. CSR_INT_PERIODIC_ENA);
  832. isr_stats->rx++;
  833. }
  834. /* This "Tx" DMA channel is used only for loading uCode */
  835. if (inta & CSR_INT_BIT_FH_TX) {
  836. iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
  837. IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
  838. isr_stats->tx++;
  839. handled |= CSR_INT_BIT_FH_TX;
  840. /* Wake up uCode load routine, now that load is complete */
  841. trans_pcie->ucode_write_complete = true;
  842. wake_up(&trans_pcie->ucode_write_waitq);
  843. }
  844. if (inta & ~handled) {
  845. IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
  846. isr_stats->unhandled++;
  847. }
  848. if (inta & ~(trans_pcie->inta_mask)) {
  849. IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
  850. inta & ~trans_pcie->inta_mask);
  851. }
  852. /* Re-enable all interrupts */
  853. /* only Re-enable if disabled by irq */
  854. if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
  855. iwl_enable_interrupts(trans);
  856. /* Re-enable RF_KILL if it occurred */
  857. else if (handled & CSR_INT_BIT_RF_KILL)
  858. iwl_enable_rfkill_int(trans);
  859. }
  860. /******************************************************************************
  861. *
  862. * ICT functions
  863. *
  864. ******************************************************************************/
  865. /* a device (PCI-E) page is 4096 bytes long */
  866. #define ICT_SHIFT 12
  867. #define ICT_SIZE (1 << ICT_SHIFT)
  868. #define ICT_COUNT (ICT_SIZE / sizeof(u32))
  869. /* Free dram table */
  870. void iwl_pcie_free_ict(struct iwl_trans *trans)
  871. {
  872. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  873. if (trans_pcie->ict_tbl) {
  874. dma_free_coherent(trans->dev, ICT_SIZE,
  875. trans_pcie->ict_tbl,
  876. trans_pcie->ict_tbl_dma);
  877. trans_pcie->ict_tbl = NULL;
  878. trans_pcie->ict_tbl_dma = 0;
  879. }
  880. }
  881. /*
  882. * allocate dram shared table, it is an aligned memory
  883. * block of ICT_SIZE.
  884. * also reset all data related to ICT table interrupt.
  885. */
  886. int iwl_pcie_alloc_ict(struct iwl_trans *trans)
  887. {
  888. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  889. trans_pcie->ict_tbl =
  890. dma_alloc_coherent(trans->dev, ICT_SIZE,
  891. &trans_pcie->ict_tbl_dma,
  892. GFP_KERNEL);
  893. if (!trans_pcie->ict_tbl)
  894. return -ENOMEM;
  895. /* just an API sanity check ... it is guaranteed to be aligned */
  896. if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
  897. iwl_pcie_free_ict(trans);
  898. return -EINVAL;
  899. }
  900. IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
  901. (unsigned long long)trans_pcie->ict_tbl_dma);
  902. IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
  903. /* reset table and index to all 0 */
  904. memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
  905. trans_pcie->ict_index = 0;
  906. /* add periodic RX interrupt */
  907. trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
  908. return 0;
  909. }
  910. /* Device is going up inform it about using ICT interrupt table,
  911. * also we need to tell the driver to start using ICT interrupt.
  912. */
  913. void iwl_pcie_reset_ict(struct iwl_trans *trans)
  914. {
  915. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  916. u32 val;
  917. unsigned long flags;
  918. if (!trans_pcie->ict_tbl)
  919. return;
  920. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  921. iwl_disable_interrupts(trans);
  922. memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
  923. val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
  924. val |= CSR_DRAM_INT_TBL_ENABLE;
  925. val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
  926. IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
  927. iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
  928. trans_pcie->use_ict = true;
  929. trans_pcie->ict_index = 0;
  930. iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
  931. iwl_enable_interrupts(trans);
  932. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  933. }
  934. /* Device is going down disable ict interrupt usage */
  935. void iwl_pcie_disable_ict(struct iwl_trans *trans)
  936. {
  937. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  938. unsigned long flags;
  939. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  940. trans_pcie->use_ict = false;
  941. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  942. }
  943. /* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
  944. static irqreturn_t iwl_pcie_isr(int irq, void *data)
  945. {
  946. struct iwl_trans *trans = data;
  947. struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  948. u32 inta, inta_mask;
  949. #ifdef CONFIG_IWLWIFI_DEBUG
  950. u32 inta_fh;
  951. #endif
  952. lockdep_assert_held(&trans_pcie->irq_lock);
  953. trace_iwlwifi_dev_irq(trans->dev);
  954. /* Disable (but don't clear!) interrupts here to avoid
  955. * back-to-back ISRs and sporadic interrupts from our NIC.
  956. * If we have something to service, the tasklet will re-enable ints.
  957. * If we *don't* have something, we'll re-enable before leaving here. */
  958. inta_mask = iwl_read32(trans, CSR_INT_MASK);
  959. iwl_write32(trans, CSR_INT_MASK, 0x00000000);
  960. /* Discover which interrupts are active/pending */
  961. inta = iwl_read32(trans, CSR_INT);
  962. if (inta & (~inta_mask)) {
  963. IWL_DEBUG_ISR(trans,
  964. "We got a masked interrupt (0x%08x)...Ack and ignore\n",
  965. inta & (~inta_mask));
  966. iwl_write32(trans, CSR_INT, inta & (~inta_mask));
  967. inta &= inta_mask;
  968. }
  969. /* Ignore interrupt if there's nothing in NIC to service.
  970. * This may be due to IRQ shared with another device,
  971. * or due to sporadic interrupts thrown from our NIC. */
  972. if (!inta) {
  973. IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
  974. goto none;
  975. }
  976. if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
  977. /* Hardware disappeared. It might have already raised
  978. * an interrupt */
  979. IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
  980. return IRQ_HANDLED;
  981. }
  982. #ifdef CONFIG_IWLWIFI_DEBUG
  983. if (iwl_have_debug_level(IWL_DL_ISR)) {
  984. inta_fh = iwl_read32(trans, CSR_FH_INT_STATUS);
  985. IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, "
  986. "fh 0x%08x\n", inta, inta_mask, inta_fh);
  987. }
  988. #endif
  989. trans_pcie->inta |= inta;
  990. /* iwl_pcie_tasklet() will service interrupts and re-enable them */
  991. if (likely(inta))
  992. tasklet_schedule(&trans_pcie->irq_tasklet);
  993. else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
  994. !trans_pcie->inta)
  995. iwl_enable_interrupts(trans);
  996. none:
  997. /* re-enable interrupts here since we don't have anything to service. */
  998. /* only Re-enable if disabled by irq and no schedules tasklet. */
  999. if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
  1000. !trans_pcie->inta)
  1001. iwl_enable_interrupts(trans);
  1002. return IRQ_NONE;
  1003. }
  1004. /* interrupt handler using ict table, with this interrupt driver will
  1005. * stop using INTA register to get device's interrupt, reading this register
  1006. * is expensive, device will write interrupts in ICT dram table, increment
  1007. * index then will fire interrupt to driver, driver will OR all ICT table
  1008. * entries from current index up to table entry with 0 value. the result is
  1009. * the interrupt we need to service, driver will set the entries back to 0 and
  1010. * set index.
  1011. */
  1012. irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
  1013. {
  1014. struct iwl_trans *trans = data;
  1015. struct iwl_trans_pcie *trans_pcie;
  1016. u32 inta, inta_mask;
  1017. u32 val = 0;
  1018. u32 read;
  1019. unsigned long flags;
  1020. if (!trans)
  1021. return IRQ_NONE;
  1022. trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  1023. spin_lock_irqsave(&trans_pcie->irq_lock, flags);
  1024. /* dram interrupt table not set yet,
  1025. * use legacy interrupt.
  1026. */
  1027. if (unlikely(!trans_pcie->use_ict)) {
  1028. irqreturn_t ret = iwl_pcie_isr(irq, data);
  1029. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  1030. return ret;
  1031. }
  1032. trace_iwlwifi_dev_irq(trans->dev);
  1033. /* Disable (but don't clear!) interrupts here to avoid
  1034. * back-to-back ISRs and sporadic interrupts from our NIC.
  1035. * If we have something to service, the tasklet will re-enable ints.
  1036. * If we *don't* have something, we'll re-enable before leaving here.
  1037. */
  1038. inta_mask = iwl_read32(trans, CSR_INT_MASK);
  1039. iwl_write32(trans, CSR_INT_MASK, 0x00000000);
  1040. /* Ignore interrupt if there's nothing in NIC to service.
  1041. * This may be due to IRQ shared with another device,
  1042. * or due to sporadic interrupts thrown from our NIC. */
  1043. read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
  1044. trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
  1045. if (!read) {
  1046. IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
  1047. goto none;
  1048. }
  1049. /*
  1050. * Collect all entries up to the first 0, starting from ict_index;
  1051. * note we already read at ict_index.
  1052. */
  1053. do {
  1054. val |= read;
  1055. IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
  1056. trans_pcie->ict_index, read);
  1057. trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
  1058. trans_pcie->ict_index =
  1059. iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
  1060. read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
  1061. trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
  1062. read);
  1063. } while (read);
  1064. /* We should not get this value, just ignore it. */
  1065. if (val == 0xffffffff)
  1066. val = 0;
  1067. /*
  1068. * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
  1069. * (bit 15 before shifting it to 31) to clear when using interrupt
  1070. * coalescing. fortunately, bits 18 and 19 stay set when this happens
  1071. * so we use them to decide on the real state of the Rx bit.
  1072. * In order words, bit 15 is set if bit 18 or bit 19 are set.
  1073. */
  1074. if (val & 0xC0000)
  1075. val |= 0x8000;
  1076. inta = (0xff & val) | ((0xff00 & val) << 16);
  1077. IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
  1078. inta, inta_mask, val);
  1079. inta &= trans_pcie->inta_mask;
  1080. trans_pcie->inta |= inta;
  1081. /* iwl_pcie_tasklet() will service interrupts and re-enable them */
  1082. if (likely(inta))
  1083. tasklet_schedule(&trans_pcie->irq_tasklet);
  1084. else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
  1085. !trans_pcie->inta) {
  1086. /* Allow interrupt if was disabled by this handler and
  1087. * no tasklet was schedules, We should not enable interrupt,
  1088. * tasklet will enable it.
  1089. */
  1090. iwl_enable_interrupts(trans);
  1091. }
  1092. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  1093. return IRQ_HANDLED;
  1094. none:
  1095. /* re-enable interrupts here since we don't have anything to service.
  1096. * only Re-enable if disabled by irq.
  1097. */
  1098. if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
  1099. !trans_pcie->inta)
  1100. iwl_enable_interrupts(trans);
  1101. spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
  1102. return IRQ_NONE;
  1103. }