iwl-rx.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
  4. *
  5. * Portions of this file are derived from the ipw3945 project, as well
  6. * as portions of the ieee80211 subsystem header files.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of version 2 of the GNU General Public License as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20. *
  21. * The full GNU General Public License is included in this distribution in the
  22. * file called LICENSE.
  23. *
  24. * Contact Information:
  25. * James P. Ketrenos <ipw2100-admin@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <net/mac80211.h>
  30. #include "iwl-eeprom.h"
  31. #include "iwl-dev.h"
  32. #include "iwl-core.h"
  33. #include "iwl-sta.h"
  34. #include "iwl-io.h"
  35. #include "iwl-calib.h"
  36. #include "iwl-helpers.h"
  37. /************************** RX-FUNCTIONS ****************************/
  38. /*
  39. * Rx theory of operation
  40. *
  41. * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
  42. * each of which point to Receive Buffers to be filled by the NIC. These get
  43. * used not only for Rx frames, but for any command response or notification
  44. * from the NIC. The driver and NIC manage the Rx buffers by means
  45. * of indexes into the circular buffer.
  46. *
  47. * Rx Queue Indexes
  48. * The host/firmware share two index registers for managing the Rx buffers.
  49. *
  50. * The READ index maps to the first position that the firmware may be writing
  51. * to -- the driver can read up to (but not including) this position and get
  52. * good data.
  53. * The READ index is managed by the firmware once the card is enabled.
  54. *
  55. * The WRITE index maps to the last position the driver has read from -- the
  56. * position preceding WRITE is the last slot the firmware can place a packet.
  57. *
  58. * The queue is empty (no good data) if WRITE = READ - 1, and is full if
  59. * WRITE = READ.
  60. *
  61. * During initialization, the host sets up the READ queue position to the first
  62. * INDEX position, and WRITE to the last (READ - 1 wrapped)
  63. *
  64. * When the firmware places a packet in a buffer, it will advance the READ index
  65. * and fire the RX interrupt. The driver can then query the READ index and
  66. * process as many packets as possible, moving the WRITE index forward as it
  67. * resets the Rx queue buffers with new memory.
  68. *
  69. * The management in the driver is as follows:
  70. * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
  71. * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
  72. * to replenish the iwl->rxq->rx_free.
  73. * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
  74. * iwl->rxq is replenished and the READ INDEX is updated (updating the
  75. * 'processed' and 'read' driver indexes as well)
  76. * + A received packet is processed and handed to the kernel network stack,
  77. * detached from the iwl->rxq. The driver 'processed' index is updated.
  78. * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
  79. * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
  80. * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
  81. * were enough free buffers and RX_STALLED is set it is cleared.
  82. *
  83. *
  84. * Driver sequence:
  85. *
  86. * iwl_rx_queue_alloc() Allocates rx_free
  87. * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
  88. * iwl_rx_queue_restock
  89. * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
  90. * queue, updates firmware pointers, and updates
  91. * the WRITE index. If insufficient rx_free buffers
  92. * are available, schedules iwl_rx_replenish
  93. *
  94. * -- enable interrupts --
  95. * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
  96. * READ INDEX, detaching the SKB from the pool.
  97. * Moves the packet buffer from queue to rx_used.
  98. * Calls iwl_rx_queue_restock to refill any empty
  99. * slots.
  100. * ...
  101. *
  102. */
  103. /**
  104. * iwl_rx_queue_space - Return number of free slots available in queue.
  105. */
  106. int iwl_rx_queue_space(const struct iwl_rx_queue *q)
  107. {
  108. int s = q->read - q->write;
  109. if (s <= 0)
  110. s += RX_QUEUE_SIZE;
  111. /* keep some buffer to not confuse full and empty queue */
  112. s -= 2;
  113. if (s < 0)
  114. s = 0;
  115. return s;
  116. }
  117. EXPORT_SYMBOL(iwl_rx_queue_space);
  118. /**
  119. * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
  120. */
  121. int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
  122. {
  123. u32 reg = 0;
  124. int ret = 0;
  125. unsigned long flags;
  126. spin_lock_irqsave(&q->lock, flags);
  127. if (q->need_update == 0)
  128. goto exit_unlock;
  129. /* If power-saving is in use, make sure device is awake */
  130. if (test_bit(STATUS_POWER_PMI, &priv->status)) {
  131. reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
  132. if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
  133. iwl_set_bit(priv, CSR_GP_CNTRL,
  134. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  135. goto exit_unlock;
  136. }
  137. ret = iwl_grab_nic_access(priv);
  138. if (ret)
  139. goto exit_unlock;
  140. /* Device expects a multiple of 8 */
  141. iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
  142. q->write & ~0x7);
  143. iwl_release_nic_access(priv);
  144. /* Else device is assumed to be awake */
  145. } else
  146. /* Device expects a multiple of 8 */
  147. iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
  148. q->need_update = 0;
  149. exit_unlock:
  150. spin_unlock_irqrestore(&q->lock, flags);
  151. return ret;
  152. }
  153. EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
  154. /**
  155. * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
  156. */
  157. static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
  158. dma_addr_t dma_addr)
  159. {
  160. return cpu_to_le32((u32)(dma_addr >> 8));
  161. }
  162. /**
  163. * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
  164. *
  165. * If there are slots in the RX queue that need to be restocked,
  166. * and we have free pre-allocated buffers, fill the ranks as much
  167. * as we can, pulling from rx_free.
  168. *
  169. * This moves the 'write' index forward to catch up with 'processed', and
  170. * also updates the memory address in the firmware to reference the new
  171. * target buffer.
  172. */
  173. int iwl_rx_queue_restock(struct iwl_priv *priv)
  174. {
  175. struct iwl_rx_queue *rxq = &priv->rxq;
  176. struct list_head *element;
  177. struct iwl_rx_mem_buffer *rxb;
  178. unsigned long flags;
  179. int write;
  180. int ret = 0;
  181. spin_lock_irqsave(&rxq->lock, flags);
  182. write = rxq->write & ~0x7;
  183. while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
  184. /* Get next free Rx buffer, remove from free list */
  185. element = rxq->rx_free.next;
  186. rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
  187. list_del(element);
  188. /* Point to Rx buffer via next RBD in circular buffer */
  189. rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr);
  190. rxq->queue[rxq->write] = rxb;
  191. rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
  192. rxq->free_count--;
  193. }
  194. spin_unlock_irqrestore(&rxq->lock, flags);
  195. /* If the pre-allocated buffer pool is dropping low, schedule to
  196. * refill it */
  197. if (rxq->free_count <= RX_LOW_WATERMARK)
  198. queue_work(priv->workqueue, &priv->rx_replenish);
  199. /* If we've added more space for the firmware to place data, tell it.
  200. * Increment device's write pointer in multiples of 8. */
  201. if ((write != (rxq->write & ~0x7))
  202. || (abs(rxq->write - rxq->read) > 7)) {
  203. spin_lock_irqsave(&rxq->lock, flags);
  204. rxq->need_update = 1;
  205. spin_unlock_irqrestore(&rxq->lock, flags);
  206. ret = iwl_rx_queue_update_write_ptr(priv, rxq);
  207. }
  208. return ret;
  209. }
  210. EXPORT_SYMBOL(iwl_rx_queue_restock);
  211. /**
  212. * iwl_rx_replenish - Move all used packet from rx_used to rx_free
  213. *
  214. * When moving to rx_free an SKB is allocated for the slot.
  215. *
  216. * Also restock the Rx queue via iwl_rx_queue_restock.
  217. * This is called as a scheduled work item (except for during initialization)
  218. */
  219. void iwl_rx_allocate(struct iwl_priv *priv)
  220. {
  221. struct iwl_rx_queue *rxq = &priv->rxq;
  222. struct list_head *element;
  223. struct iwl_rx_mem_buffer *rxb;
  224. unsigned long flags;
  225. spin_lock_irqsave(&rxq->lock, flags);
  226. while (!list_empty(&rxq->rx_used)) {
  227. element = rxq->rx_used.next;
  228. rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
  229. /* Alloc a new receive buffer */
  230. rxb->skb = alloc_skb(priv->hw_params.rx_buf_size,
  231. __GFP_NOWARN | GFP_ATOMIC);
  232. if (!rxb->skb) {
  233. if (net_ratelimit())
  234. printk(KERN_CRIT DRV_NAME
  235. ": Can not allocate SKB buffers\n");
  236. /* We don't reschedule replenish work here -- we will
  237. * call the restock method and if it still needs
  238. * more buffers it will schedule replenish */
  239. break;
  240. }
  241. priv->alloc_rxb_skb++;
  242. list_del(element);
  243. /* Get physical address of RB/SKB */
  244. rxb->dma_addr =
  245. pci_map_single(priv->pci_dev, rxb->skb->data,
  246. priv->hw_params.rx_buf_size, PCI_DMA_FROMDEVICE);
  247. list_add_tail(&rxb->list, &rxq->rx_free);
  248. rxq->free_count++;
  249. }
  250. spin_unlock_irqrestore(&rxq->lock, flags);
  251. }
  252. EXPORT_SYMBOL(iwl_rx_allocate);
  253. void iwl_rx_replenish(struct iwl_priv *priv)
  254. {
  255. unsigned long flags;
  256. iwl_rx_allocate(priv);
  257. spin_lock_irqsave(&priv->lock, flags);
  258. iwl_rx_queue_restock(priv);
  259. spin_unlock_irqrestore(&priv->lock, flags);
  260. }
  261. EXPORT_SYMBOL(iwl_rx_replenish);
  262. /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
  263. * If an SKB has been detached, the POOL needs to have its SKB set to NULL
  264. * This free routine walks the list of POOL entries and if SKB is set to
  265. * non NULL it is unmapped and freed
  266. */
  267. void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
  268. {
  269. int i;
  270. for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
  271. if (rxq->pool[i].skb != NULL) {
  272. pci_unmap_single(priv->pci_dev,
  273. rxq->pool[i].dma_addr,
  274. priv->hw_params.rx_buf_size,
  275. PCI_DMA_FROMDEVICE);
  276. dev_kfree_skb(rxq->pool[i].skb);
  277. }
  278. }
  279. pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
  280. rxq->dma_addr);
  281. rxq->bd = NULL;
  282. }
  283. EXPORT_SYMBOL(iwl_rx_queue_free);
  284. int iwl_rx_queue_alloc(struct iwl_priv *priv)
  285. {
  286. struct iwl_rx_queue *rxq = &priv->rxq;
  287. struct pci_dev *dev = priv->pci_dev;
  288. int i;
  289. spin_lock_init(&rxq->lock);
  290. INIT_LIST_HEAD(&rxq->rx_free);
  291. INIT_LIST_HEAD(&rxq->rx_used);
  292. /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
  293. rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
  294. if (!rxq->bd)
  295. return -ENOMEM;
  296. /* Fill the rx_used queue with _all_ of the Rx buffers */
  297. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
  298. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  299. /* Set us so that we have processed and used all buffers, but have
  300. * not restocked the Rx queue with fresh buffers */
  301. rxq->read = rxq->write = 0;
  302. rxq->free_count = 0;
  303. rxq->need_update = 0;
  304. return 0;
  305. }
  306. EXPORT_SYMBOL(iwl_rx_queue_alloc);
  307. void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
  308. {
  309. unsigned long flags;
  310. int i;
  311. spin_lock_irqsave(&rxq->lock, flags);
  312. INIT_LIST_HEAD(&rxq->rx_free);
  313. INIT_LIST_HEAD(&rxq->rx_used);
  314. /* Fill the rx_used queue with _all_ of the Rx buffers */
  315. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
  316. /* In the reset function, these buffers may have been allocated
  317. * to an SKB, so we need to unmap and free potential storage */
  318. if (rxq->pool[i].skb != NULL) {
  319. pci_unmap_single(priv->pci_dev,
  320. rxq->pool[i].dma_addr,
  321. priv->hw_params.rx_buf_size,
  322. PCI_DMA_FROMDEVICE);
  323. priv->alloc_rxb_skb--;
  324. dev_kfree_skb(rxq->pool[i].skb);
  325. rxq->pool[i].skb = NULL;
  326. }
  327. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  328. }
  329. /* Set us so that we have processed and used all buffers, but have
  330. * not restocked the Rx queue with fresh buffers */
  331. rxq->read = rxq->write = 0;
  332. rxq->free_count = 0;
  333. spin_unlock_irqrestore(&rxq->lock, flags);
  334. }
  335. EXPORT_SYMBOL(iwl_rx_queue_reset);
  336. int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
  337. {
  338. int ret;
  339. unsigned long flags;
  340. unsigned int rb_size;
  341. spin_lock_irqsave(&priv->lock, flags);
  342. ret = iwl_grab_nic_access(priv);
  343. if (ret) {
  344. spin_unlock_irqrestore(&priv->lock, flags);
  345. return ret;
  346. }
  347. if (priv->cfg->mod_params->amsdu_size_8K)
  348. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
  349. else
  350. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
  351. /* Stop Rx DMA */
  352. iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  353. /* Reset driver's Rx queue write index */
  354. iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
  355. /* Tell device where to find RBD circular buffer in DRAM */
  356. iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
  357. rxq->dma_addr >> 8);
  358. /* Tell device where in DRAM to update its Rx status */
  359. iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
  360. (priv->shared_phys + priv->rb_closed_offset) >> 4);
  361. /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
  362. iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
  363. FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
  364. FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
  365. rb_size |
  366. /* 0x10 << 4 | */
  367. (RX_QUEUE_SIZE_LOG <<
  368. FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
  369. /*
  370. * iwl_write32(priv,CSR_INT_COAL_REG,0);
  371. */
  372. iwl_release_nic_access(priv);
  373. spin_unlock_irqrestore(&priv->lock, flags);
  374. return 0;
  375. }
  376. int iwl_rxq_stop(struct iwl_priv *priv)
  377. {
  378. int ret;
  379. unsigned long flags;
  380. spin_lock_irqsave(&priv->lock, flags);
  381. ret = iwl_grab_nic_access(priv);
  382. if (unlikely(ret)) {
  383. spin_unlock_irqrestore(&priv->lock, flags);
  384. return ret;
  385. }
  386. /* stop Rx DMA */
  387. iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  388. ret = iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
  389. (1 << 24), 1000);
  390. if (ret < 0)
  391. IWL_ERROR("Can't stop Rx DMA.\n");
  392. iwl_release_nic_access(priv);
  393. spin_unlock_irqrestore(&priv->lock, flags);
  394. return 0;
  395. }
  396. EXPORT_SYMBOL(iwl_rxq_stop);
  397. void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
  398. struct iwl_rx_mem_buffer *rxb)
  399. {
  400. #ifdef CONFIG_IWLWIFI_RUN_TIME_CALIB
  401. struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
  402. struct iwl4965_missed_beacon_notif *missed_beacon;
  403. missed_beacon = &pkt->u.missed_beacon;
  404. if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
  405. IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
  406. le32_to_cpu(missed_beacon->consequtive_missed_beacons),
  407. le32_to_cpu(missed_beacon->total_missed_becons),
  408. le32_to_cpu(missed_beacon->num_recvd_beacons),
  409. le32_to_cpu(missed_beacon->num_expected_beacons));
  410. if (!test_bit(STATUS_SCANNING, &priv->status))
  411. iwl_init_sensitivity(priv);
  412. }
  413. #endif /* CONFIG_IWLWIFI_RUN_TIME_CALIB */
  414. }
  415. EXPORT_SYMBOL(iwl_rx_missed_beacon_notif);