iwl-rx.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
  4. *
  5. * Portions of this file are derived from the ipw3945 project, as well
  6. * as portions of the ieee80211 subsystem header files.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of version 2 of the GNU General Public License as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20. *
  21. * The full GNU General Public License is included in this distribution in the
  22. * file called LICENSE.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/etherdevice.h>
  30. #include <linux/slab.h>
  31. #include <net/mac80211.h>
  32. #include <asm/unaligned.h>
  33. #include "iwl-eeprom.h"
  34. #include "iwl-dev.h"
  35. #include "iwl-core.h"
  36. #include "iwl-sta.h"
  37. #include "iwl-io.h"
  38. #include "iwl-helpers.h"
  39. /************************** RX-FUNCTIONS ****************************/
  40. /*
  41. * Rx theory of operation
  42. *
  43. * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
  44. * each of which point to Receive Buffers to be filled by the NIC. These get
  45. * used not only for Rx frames, but for any command response or notification
  46. * from the NIC. The driver and NIC manage the Rx buffers by means
  47. * of indexes into the circular buffer.
  48. *
  49. * Rx Queue Indexes
  50. * The host/firmware share two index registers for managing the Rx buffers.
  51. *
  52. * The READ index maps to the first position that the firmware may be writing
  53. * to -- the driver can read up to (but not including) this position and get
  54. * good data.
  55. * The READ index is managed by the firmware once the card is enabled.
  56. *
  57. * The WRITE index maps to the last position the driver has read from -- the
  58. * position preceding WRITE is the last slot the firmware can place a packet.
  59. *
  60. * The queue is empty (no good data) if WRITE = READ - 1, and is full if
  61. * WRITE = READ.
  62. *
  63. * During initialization, the host sets up the READ queue position to the first
  64. * INDEX position, and WRITE to the last (READ - 1 wrapped)
  65. *
  66. * When the firmware places a packet in a buffer, it will advance the READ index
  67. * and fire the RX interrupt. The driver can then query the READ index and
  68. * process as many packets as possible, moving the WRITE index forward as it
  69. * resets the Rx queue buffers with new memory.
  70. *
  71. * The management in the driver is as follows:
  72. * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
  73. * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
  74. * to replenish the iwl->rxq->rx_free.
  75. * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
  76. * iwl->rxq is replenished and the READ INDEX is updated (updating the
  77. * 'processed' and 'read' driver indexes as well)
  78. * + A received packet is processed and handed to the kernel network stack,
  79. * detached from the iwl->rxq. The driver 'processed' index is updated.
  80. * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
  81. * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
  82. * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
  83. * were enough free buffers and RX_STALLED is set it is cleared.
  84. *
  85. *
  86. * Driver sequence:
  87. *
  88. * iwl_rx_queue_alloc() Allocates rx_free
  89. * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
  90. * iwl_rx_queue_restock
  91. * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
  92. * queue, updates firmware pointers, and updates
  93. * the WRITE index. If insufficient rx_free buffers
  94. * are available, schedules iwl_rx_replenish
  95. *
  96. * -- enable interrupts --
  97. * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
  98. * READ INDEX, detaching the SKB from the pool.
  99. * Moves the packet buffer from queue to rx_used.
  100. * Calls iwl_rx_queue_restock to refill any empty
  101. * slots.
  102. * ...
  103. *
  104. */
  105. /**
  106. * iwl_rx_queue_space - Return number of free slots available in queue.
  107. */
  108. int iwl_rx_queue_space(const struct iwl_rx_queue *q)
  109. {
  110. int s = q->read - q->write;
  111. if (s <= 0)
  112. s += RX_QUEUE_SIZE;
  113. /* keep some buffer to not confuse full and empty queue */
  114. s -= 2;
  115. if (s < 0)
  116. s = 0;
  117. return s;
  118. }
  119. EXPORT_SYMBOL(iwl_rx_queue_space);
  120. /**
  121. * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
  122. */
  123. void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
  124. {
  125. unsigned long flags;
  126. u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
  127. u32 reg;
  128. spin_lock_irqsave(&q->lock, flags);
  129. if (q->need_update == 0)
  130. goto exit_unlock;
  131. if (priv->cfg->base_params->shadow_reg_enable) {
  132. /* shadow register enabled */
  133. /* Device expects a multiple of 8 */
  134. q->write_actual = (q->write & ~0x7);
  135. iwl_write32(priv, rx_wrt_ptr_reg, q->write_actual);
  136. } else {
  137. /* If power-saving is in use, make sure device is awake */
  138. if (test_bit(STATUS_POWER_PMI, &priv->status)) {
  139. reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
  140. if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
  141. IWL_DEBUG_INFO(priv,
  142. "Rx queue requesting wakeup,"
  143. " GP1 = 0x%x\n", reg);
  144. iwl_set_bit(priv, CSR_GP_CNTRL,
  145. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  146. goto exit_unlock;
  147. }
  148. q->write_actual = (q->write & ~0x7);
  149. iwl_write_direct32(priv, rx_wrt_ptr_reg,
  150. q->write_actual);
  151. /* Else device is assumed to be awake */
  152. } else {
  153. /* Device expects a multiple of 8 */
  154. q->write_actual = (q->write & ~0x7);
  155. iwl_write_direct32(priv, rx_wrt_ptr_reg,
  156. q->write_actual);
  157. }
  158. }
  159. q->need_update = 0;
  160. exit_unlock:
  161. spin_unlock_irqrestore(&q->lock, flags);
  162. }
  163. EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
  164. int iwl_rx_queue_alloc(struct iwl_priv *priv)
  165. {
  166. struct iwl_rx_queue *rxq = &priv->rxq;
  167. struct device *dev = &priv->pci_dev->dev;
  168. int i;
  169. spin_lock_init(&rxq->lock);
  170. INIT_LIST_HEAD(&rxq->rx_free);
  171. INIT_LIST_HEAD(&rxq->rx_used);
  172. /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
  173. rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
  174. GFP_KERNEL);
  175. if (!rxq->bd)
  176. goto err_bd;
  177. rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
  178. &rxq->rb_stts_dma, GFP_KERNEL);
  179. if (!rxq->rb_stts)
  180. goto err_rb;
  181. /* Fill the rx_used queue with _all_ of the Rx buffers */
  182. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
  183. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  184. /* Set us so that we have processed and used all buffers, but have
  185. * not restocked the Rx queue with fresh buffers */
  186. rxq->read = rxq->write = 0;
  187. rxq->write_actual = 0;
  188. rxq->free_count = 0;
  189. rxq->need_update = 0;
  190. return 0;
  191. err_rb:
  192. dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
  193. rxq->bd_dma);
  194. err_bd:
  195. return -ENOMEM;
  196. }
  197. EXPORT_SYMBOL(iwl_rx_queue_alloc);
  198. void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
  199. struct iwl_rx_mem_buffer *rxb)
  200. {
  201. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  202. struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
  203. if (!report->state) {
  204. IWL_DEBUG_11H(priv,
  205. "Spectrum Measure Notification: Start\n");
  206. return;
  207. }
  208. memcpy(&priv->measure_report, report, sizeof(*report));
  209. priv->measurement_status |= MEASUREMENT_READY;
  210. }
  211. EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif);
  212. void iwl_recover_from_statistics(struct iwl_priv *priv,
  213. struct iwl_rx_packet *pkt)
  214. {
  215. if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
  216. !iwl_is_any_associated(priv))
  217. return;
  218. if (priv->cfg->ops->lib->check_ack_health &&
  219. !priv->cfg->ops->lib->check_ack_health(priv, pkt)) {
  220. IWL_ERR(priv, "low ack count detected, restart firmware\n");
  221. if (!iwl_force_reset(priv, IWL_FW_RESET, false))
  222. return;
  223. }
  224. if (priv->cfg->ops->lib->check_plcp_health &&
  225. !priv->cfg->ops->lib->check_plcp_health(priv, pkt))
  226. iwl_force_reset(priv, IWL_RF_RESET, false);
  227. }
  228. EXPORT_SYMBOL(iwl_recover_from_statistics);
  229. /*
  230. * returns non-zero if packet should be dropped
  231. */
  232. int iwl_set_decrypted_flag(struct iwl_priv *priv,
  233. struct ieee80211_hdr *hdr,
  234. u32 decrypt_res,
  235. struct ieee80211_rx_status *stats)
  236. {
  237. u16 fc = le16_to_cpu(hdr->frame_control);
  238. /*
  239. * All contexts have the same setting here due to it being
  240. * a module parameter, so OK to check any context.
  241. */
  242. if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
  243. RXON_FILTER_DIS_DECRYPT_MSK)
  244. return 0;
  245. if (!(fc & IEEE80211_FCTL_PROTECTED))
  246. return 0;
  247. IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
  248. switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
  249. case RX_RES_STATUS_SEC_TYPE_TKIP:
  250. /* The uCode has got a bad phase 1 Key, pushes the packet.
  251. * Decryption will be done in SW. */
  252. if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
  253. RX_RES_STATUS_BAD_KEY_TTAK)
  254. break;
  255. case RX_RES_STATUS_SEC_TYPE_WEP:
  256. if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
  257. RX_RES_STATUS_BAD_ICV_MIC) {
  258. /* bad ICV, the packet is destroyed since the
  259. * decryption is inplace, drop it */
  260. IWL_DEBUG_RX(priv, "Packet destroyed\n");
  261. return -1;
  262. }
  263. case RX_RES_STATUS_SEC_TYPE_CCMP:
  264. if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
  265. RX_RES_STATUS_DECRYPT_OK) {
  266. IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
  267. stats->flag |= RX_FLAG_DECRYPTED;
  268. }
  269. break;
  270. default:
  271. break;
  272. }
  273. return 0;
  274. }
  275. EXPORT_SYMBOL(iwl_set_decrypted_flag);