iwl-rx.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
  4. *
  5. * Portions of this file are derived from the ipw3945 project, as well
  6. * as portions of the ieee80211 subsystem header files.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of version 2 of the GNU General Public License as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20. *
  21. * The full GNU General Public License is included in this distribution in the
  22. * file called LICENSE.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/etherdevice.h>
  30. #include <linux/slab.h>
  31. #include <net/mac80211.h>
  32. #include <asm/unaligned.h>
  33. #include "iwl-eeprom.h"
  34. #include "iwl-dev.h"
  35. #include "iwl-core.h"
  36. #include "iwl-sta.h"
  37. #include "iwl-io.h"
  38. #include "iwl-calib.h"
  39. #include "iwl-helpers.h"
  40. /************************** RX-FUNCTIONS ****************************/
  41. /*
  42. * Rx theory of operation
  43. *
  44. * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
  45. * each of which point to Receive Buffers to be filled by the NIC. These get
  46. * used not only for Rx frames, but for any command response or notification
  47. * from the NIC. The driver and NIC manage the Rx buffers by means
  48. * of indexes into the circular buffer.
  49. *
  50. * Rx Queue Indexes
  51. * The host/firmware share two index registers for managing the Rx buffers.
  52. *
  53. * The READ index maps to the first position that the firmware may be writing
  54. * to -- the driver can read up to (but not including) this position and get
  55. * good data.
  56. * The READ index is managed by the firmware once the card is enabled.
  57. *
  58. * The WRITE index maps to the last position the driver has read from -- the
  59. * position preceding WRITE is the last slot the firmware can place a packet.
  60. *
  61. * The queue is empty (no good data) if WRITE = READ - 1, and is full if
  62. * WRITE = READ.
  63. *
  64. * During initialization, the host sets up the READ queue position to the first
  65. * INDEX position, and WRITE to the last (READ - 1 wrapped)
  66. *
  67. * When the firmware places a packet in a buffer, it will advance the READ index
  68. * and fire the RX interrupt. The driver can then query the READ index and
  69. * process as many packets as possible, moving the WRITE index forward as it
  70. * resets the Rx queue buffers with new memory.
  71. *
  72. * The management in the driver is as follows:
  73. * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
  74. * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
  75. * to replenish the iwl->rxq->rx_free.
  76. * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
  77. * iwl->rxq is replenished and the READ INDEX is updated (updating the
  78. * 'processed' and 'read' driver indexes as well)
  79. * + A received packet is processed and handed to the kernel network stack,
  80. * detached from the iwl->rxq. The driver 'processed' index is updated.
  81. * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
  82. * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
  83. * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
  84. * were enough free buffers and RX_STALLED is set it is cleared.
  85. *
  86. *
  87. * Driver sequence:
  88. *
  89. * iwl_rx_queue_alloc() Allocates rx_free
  90. * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
  91. * iwl_rx_queue_restock
  92. * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
  93. * queue, updates firmware pointers, and updates
  94. * the WRITE index. If insufficient rx_free buffers
  95. * are available, schedules iwl_rx_replenish
  96. *
  97. * -- enable interrupts --
  98. * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
  99. * READ INDEX, detaching the SKB from the pool.
  100. * Moves the packet buffer from queue to rx_used.
  101. * Calls iwl_rx_queue_restock to refill any empty
  102. * slots.
  103. * ...
  104. *
  105. */
  106. /**
  107. * iwl_rx_queue_space - Return number of free slots available in queue.
  108. */
  109. int iwl_rx_queue_space(const struct iwl_rx_queue *q)
  110. {
  111. int s = q->read - q->write;
  112. if (s <= 0)
  113. s += RX_QUEUE_SIZE;
  114. /* keep some buffer to not confuse full and empty queue */
  115. s -= 2;
  116. if (s < 0)
  117. s = 0;
  118. return s;
  119. }
  120. EXPORT_SYMBOL(iwl_rx_queue_space);
  121. /**
  122. * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
  123. */
  124. void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
  125. {
  126. unsigned long flags;
  127. u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
  128. u32 reg;
  129. spin_lock_irqsave(&q->lock, flags);
  130. if (q->need_update == 0)
  131. goto exit_unlock;
  132. /* If power-saving is in use, make sure device is awake */
  133. if (test_bit(STATUS_POWER_PMI, &priv->status)) {
  134. reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
  135. if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
  136. IWL_DEBUG_INFO(priv, "Rx queue requesting wakeup, GP1 = 0x%x\n",
  137. reg);
  138. iwl_set_bit(priv, CSR_GP_CNTRL,
  139. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  140. goto exit_unlock;
  141. }
  142. q->write_actual = (q->write & ~0x7);
  143. iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual);
  144. /* Else device is assumed to be awake */
  145. } else {
  146. /* Device expects a multiple of 8 */
  147. q->write_actual = (q->write & ~0x7);
  148. iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual);
  149. }
  150. q->need_update = 0;
  151. exit_unlock:
  152. spin_unlock_irqrestore(&q->lock, flags);
  153. }
  154. EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
  155. int iwl_rx_queue_alloc(struct iwl_priv *priv)
  156. {
  157. struct iwl_rx_queue *rxq = &priv->rxq;
  158. struct device *dev = &priv->pci_dev->dev;
  159. int i;
  160. spin_lock_init(&rxq->lock);
  161. INIT_LIST_HEAD(&rxq->rx_free);
  162. INIT_LIST_HEAD(&rxq->rx_used);
  163. /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
  164. rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr,
  165. GFP_KERNEL);
  166. if (!rxq->bd)
  167. goto err_bd;
  168. rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
  169. &rxq->rb_stts_dma, GFP_KERNEL);
  170. if (!rxq->rb_stts)
  171. goto err_rb;
  172. /* Fill the rx_used queue with _all_ of the Rx buffers */
  173. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
  174. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  175. /* Set us so that we have processed and used all buffers, but have
  176. * not restocked the Rx queue with fresh buffers */
  177. rxq->read = rxq->write = 0;
  178. rxq->write_actual = 0;
  179. rxq->free_count = 0;
  180. rxq->need_update = 0;
  181. return 0;
  182. err_rb:
  183. dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
  184. rxq->dma_addr);
  185. err_bd:
  186. return -ENOMEM;
  187. }
  188. EXPORT_SYMBOL(iwl_rx_queue_alloc);
  189. void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
  190. struct iwl_rx_mem_buffer *rxb)
  191. {
  192. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  193. struct iwl_missed_beacon_notif *missed_beacon;
  194. missed_beacon = &pkt->u.missed_beacon;
  195. if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
  196. priv->missed_beacon_threshold) {
  197. IWL_DEBUG_CALIB(priv, "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
  198. le32_to_cpu(missed_beacon->consecutive_missed_beacons),
  199. le32_to_cpu(missed_beacon->total_missed_becons),
  200. le32_to_cpu(missed_beacon->num_recvd_beacons),
  201. le32_to_cpu(missed_beacon->num_expected_beacons));
  202. if (!test_bit(STATUS_SCANNING, &priv->status))
  203. iwl_init_sensitivity(priv);
  204. }
  205. }
  206. EXPORT_SYMBOL(iwl_rx_missed_beacon_notif);
  207. void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
  208. struct iwl_rx_mem_buffer *rxb)
  209. {
  210. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  211. struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
  212. if (!report->state) {
  213. IWL_DEBUG_11H(priv,
  214. "Spectrum Measure Notification: Start\n");
  215. return;
  216. }
  217. memcpy(&priv->measure_report, report, sizeof(*report));
  218. priv->measurement_status |= MEASUREMENT_READY;
  219. }
  220. EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif);
  221. /* Calculate noise level, based on measurements during network silence just
  222. * before arriving beacon. This measurement can be done only if we know
  223. * exactly when to expect beacons, therefore only when we're associated. */
  224. static void iwl_rx_calc_noise(struct iwl_priv *priv)
  225. {
  226. struct statistics_rx_non_phy *rx_info
  227. = &(priv->statistics.rx.general);
  228. int num_active_rx = 0;
  229. int total_silence = 0;
  230. int bcn_silence_a =
  231. le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
  232. int bcn_silence_b =
  233. le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
  234. int bcn_silence_c =
  235. le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
  236. int last_rx_noise;
  237. if (bcn_silence_a) {
  238. total_silence += bcn_silence_a;
  239. num_active_rx++;
  240. }
  241. if (bcn_silence_b) {
  242. total_silence += bcn_silence_b;
  243. num_active_rx++;
  244. }
  245. if (bcn_silence_c) {
  246. total_silence += bcn_silence_c;
  247. num_active_rx++;
  248. }
  249. /* Average among active antennas */
  250. if (num_active_rx)
  251. last_rx_noise = (total_silence / num_active_rx) - 107;
  252. else
  253. last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
  254. IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
  255. bcn_silence_a, bcn_silence_b, bcn_silence_c,
  256. last_rx_noise);
  257. }
  258. #ifdef CONFIG_IWLWIFI_DEBUG
  259. /*
  260. * based on the assumption of all statistics counter are in DWORD
  261. * FIXME: This function is for debugging, do not deal with
  262. * the case of counters roll-over.
  263. */
  264. static void iwl_accumulative_statistics(struct iwl_priv *priv,
  265. __le32 *stats)
  266. {
  267. int i;
  268. __le32 *prev_stats;
  269. u32 *accum_stats;
  270. u32 *delta, *max_delta;
  271. prev_stats = (__le32 *)&priv->statistics;
  272. accum_stats = (u32 *)&priv->accum_statistics;
  273. delta = (u32 *)&priv->delta_statistics;
  274. max_delta = (u32 *)&priv->max_delta;
  275. for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics);
  276. i += sizeof(__le32), stats++, prev_stats++, delta++,
  277. max_delta++, accum_stats++) {
  278. if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
  279. *delta = (le32_to_cpu(*stats) -
  280. le32_to_cpu(*prev_stats));
  281. *accum_stats += *delta;
  282. if (*delta > *max_delta)
  283. *max_delta = *delta;
  284. }
  285. }
  286. /* reset accumulative statistics for "no-counter" type statistics */
  287. priv->accum_statistics.general.temperature =
  288. priv->statistics.general.temperature;
  289. priv->accum_statistics.general.temperature_m =
  290. priv->statistics.general.temperature_m;
  291. priv->accum_statistics.general.ttl_timestamp =
  292. priv->statistics.general.ttl_timestamp;
  293. priv->accum_statistics.tx.tx_power.ant_a =
  294. priv->statistics.tx.tx_power.ant_a;
  295. priv->accum_statistics.tx.tx_power.ant_b =
  296. priv->statistics.tx.tx_power.ant_b;
  297. priv->accum_statistics.tx.tx_power.ant_c =
  298. priv->statistics.tx.tx_power.ant_c;
  299. }
  300. #endif
  301. #define REG_RECALIB_PERIOD (60)
  302. /**
  303. * iwl_good_plcp_health - checks for plcp error.
  304. *
  305. * When the plcp error is exceeding the thresholds, reset the radio
  306. * to improve the throughput.
  307. */
  308. bool iwl_good_plcp_health(struct iwl_priv *priv,
  309. struct iwl_rx_packet *pkt)
  310. {
  311. bool rc = true;
  312. int combined_plcp_delta;
  313. unsigned int plcp_msec;
  314. unsigned long plcp_received_jiffies;
  315. /*
  316. * check for plcp_err and trigger radio reset if it exceeds
  317. * the plcp error threshold plcp_delta.
  318. */
  319. plcp_received_jiffies = jiffies;
  320. plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
  321. (long) priv->plcp_jiffies);
  322. priv->plcp_jiffies = plcp_received_jiffies;
  323. /*
  324. * check to make sure plcp_msec is not 0 to prevent division
  325. * by zero.
  326. */
  327. if (plcp_msec) {
  328. combined_plcp_delta =
  329. (le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err) -
  330. le32_to_cpu(priv->statistics.rx.ofdm.plcp_err)) +
  331. (le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err) -
  332. le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err));
  333. if ((combined_plcp_delta > 0) &&
  334. ((combined_plcp_delta * 100) / plcp_msec) >
  335. priv->cfg->plcp_delta_threshold) {
  336. /*
  337. * if plcp_err exceed the threshold,
  338. * the following data is printed in csv format:
  339. * Text: plcp_err exceeded %d,
  340. * Received ofdm.plcp_err,
  341. * Current ofdm.plcp_err,
  342. * Received ofdm_ht.plcp_err,
  343. * Current ofdm_ht.plcp_err,
  344. * combined_plcp_delta,
  345. * plcp_msec
  346. */
  347. IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
  348. "%u, %u, %u, %u, %d, %u mSecs\n",
  349. priv->cfg->plcp_delta_threshold,
  350. le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err),
  351. le32_to_cpu(priv->statistics.rx.ofdm.plcp_err),
  352. le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err),
  353. le32_to_cpu(
  354. priv->statistics.rx.ofdm_ht.plcp_err),
  355. combined_plcp_delta, plcp_msec);
  356. rc = false;
  357. }
  358. }
  359. return rc;
  360. }
  361. EXPORT_SYMBOL(iwl_good_plcp_health);
  362. static void iwl_recover_from_statistics(struct iwl_priv *priv,
  363. struct iwl_rx_packet *pkt)
  364. {
  365. if (test_bit(STATUS_EXIT_PENDING, &priv->status))
  366. return;
  367. if (iwl_is_associated(priv)) {
  368. if (priv->cfg->ops->lib->check_ack_health) {
  369. if (!priv->cfg->ops->lib->check_ack_health(
  370. priv, pkt)) {
  371. /*
  372. * low ack count detected
  373. * restart Firmware
  374. */
  375. IWL_ERR(priv, "low ack count detected, "
  376. "restart firmware\n");
  377. iwl_force_reset(priv, IWL_FW_RESET);
  378. }
  379. } else if (priv->cfg->ops->lib->check_plcp_health) {
  380. if (!priv->cfg->ops->lib->check_plcp_health(
  381. priv, pkt)) {
  382. /*
  383. * high plcp error detected
  384. * reset Radio
  385. */
  386. iwl_force_reset(priv, IWL_RF_RESET);
  387. }
  388. }
  389. }
  390. }
  391. void iwl_rx_statistics(struct iwl_priv *priv,
  392. struct iwl_rx_mem_buffer *rxb)
  393. {
  394. int change;
  395. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  396. IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
  397. (int)sizeof(priv->statistics),
  398. le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
  399. change = ((priv->statistics.general.temperature !=
  400. pkt->u.stats.general.temperature) ||
  401. ((priv->statistics.flag &
  402. STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
  403. (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
  404. #ifdef CONFIG_IWLWIFI_DEBUG
  405. iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
  406. #endif
  407. iwl_recover_from_statistics(priv, pkt);
  408. memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
  409. set_bit(STATUS_STATISTICS, &priv->status);
  410. /* Reschedule the statistics timer to occur in
  411. * REG_RECALIB_PERIOD seconds to ensure we get a
  412. * thermal update even if the uCode doesn't give
  413. * us one */
  414. mod_timer(&priv->statistics_periodic, jiffies +
  415. msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
  416. if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
  417. (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
  418. iwl_rx_calc_noise(priv);
  419. queue_work(priv->workqueue, &priv->run_time_calib_work);
  420. }
  421. if (priv->cfg->ops->lib->temp_ops.temperature && change)
  422. priv->cfg->ops->lib->temp_ops.temperature(priv);
  423. }
  424. EXPORT_SYMBOL(iwl_rx_statistics);
  425. void iwl_reply_statistics(struct iwl_priv *priv,
  426. struct iwl_rx_mem_buffer *rxb)
  427. {
  428. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  429. if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
  430. #ifdef CONFIG_IWLWIFI_DEBUG
  431. memset(&priv->accum_statistics, 0,
  432. sizeof(struct iwl_notif_statistics));
  433. memset(&priv->delta_statistics, 0,
  434. sizeof(struct iwl_notif_statistics));
  435. memset(&priv->max_delta, 0,
  436. sizeof(struct iwl_notif_statistics));
  437. #endif
  438. IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
  439. }
  440. iwl_rx_statistics(priv, rxb);
  441. }
  442. EXPORT_SYMBOL(iwl_reply_statistics);
  443. /*
  444. * returns non-zero if packet should be dropped
  445. */
  446. int iwl_set_decrypted_flag(struct iwl_priv *priv,
  447. struct ieee80211_hdr *hdr,
  448. u32 decrypt_res,
  449. struct ieee80211_rx_status *stats)
  450. {
  451. u16 fc = le16_to_cpu(hdr->frame_control);
  452. if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
  453. return 0;
  454. if (!(fc & IEEE80211_FCTL_PROTECTED))
  455. return 0;
  456. IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
  457. switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
  458. case RX_RES_STATUS_SEC_TYPE_TKIP:
  459. /* The uCode has got a bad phase 1 Key, pushes the packet.
  460. * Decryption will be done in SW. */
  461. if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
  462. RX_RES_STATUS_BAD_KEY_TTAK)
  463. break;
  464. case RX_RES_STATUS_SEC_TYPE_WEP:
  465. if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
  466. RX_RES_STATUS_BAD_ICV_MIC) {
  467. /* bad ICV, the packet is destroyed since the
  468. * decryption is inplace, drop it */
  469. IWL_DEBUG_RX(priv, "Packet destroyed\n");
  470. return -1;
  471. }
  472. case RX_RES_STATUS_SEC_TYPE_CCMP:
  473. if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
  474. RX_RES_STATUS_DECRYPT_OK) {
  475. IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
  476. stats->flag |= RX_FLAG_DECRYPTED;
  477. }
  478. break;
  479. default:
  480. break;
  481. }
  482. return 0;
  483. }
  484. EXPORT_SYMBOL(iwl_set_decrypted_flag);