iwl-rx.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
  4. *
  5. * Portions of this file are derived from the ipw3945 project, as well
  6. * as portions of the ieee80211 subsystem header files.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of version 2 of the GNU General Public License as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20. *
  21. * The full GNU General Public License is included in this distribution in the
  22. * file called LICENSE.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/etherdevice.h>
  30. #include <linux/slab.h>
  31. #include <linux/sched.h>
  32. #include <net/mac80211.h>
  33. #include <asm/unaligned.h>
  34. #include "iwl-eeprom.h"
  35. #include "iwl-dev.h"
  36. #include "iwl-core.h"
  37. #include "iwl-sta.h"
  38. #include "iwl-io.h"
  39. #include "iwl-helpers.h"
  40. #include "iwl-agn-calib.h"
  41. #include "iwl-agn.h"
  42. /******************************************************************************
  43. *
  44. * RX path functions
  45. *
  46. ******************************************************************************/
  47. /*
  48. * Rx theory of operation
  49. *
  50. * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
  51. * each of which point to Receive Buffers to be filled by the NIC. These get
  52. * used not only for Rx frames, but for any command response or notification
  53. * from the NIC. The driver and NIC manage the Rx buffers by means
  54. * of indexes into the circular buffer.
  55. *
  56. * Rx Queue Indexes
  57. * The host/firmware share two index registers for managing the Rx buffers.
  58. *
  59. * The READ index maps to the first position that the firmware may be writing
  60. * to -- the driver can read up to (but not including) this position and get
  61. * good data.
  62. * The READ index is managed by the firmware once the card is enabled.
  63. *
  64. * The WRITE index maps to the last position the driver has read from -- the
  65. * position preceding WRITE is the last slot the firmware can place a packet.
  66. *
  67. * The queue is empty (no good data) if WRITE = READ - 1, and is full if
  68. * WRITE = READ.
  69. *
  70. * During initialization, the host sets up the READ queue position to the first
  71. * INDEX position, and WRITE to the last (READ - 1 wrapped)
  72. *
  73. * When the firmware places a packet in a buffer, it will advance the READ index
  74. * and fire the RX interrupt. The driver can then query the READ index and
  75. * process as many packets as possible, moving the WRITE index forward as it
  76. * resets the Rx queue buffers with new memory.
  77. *
  78. * The management in the driver is as follows:
  79. * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
  80. * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
  81. * to replenish the iwl->rxq->rx_free.
  82. * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
  83. * iwl->rxq is replenished and the READ INDEX is updated (updating the
  84. * 'processed' and 'read' driver indexes as well)
  85. * + A received packet is processed and handed to the kernel network stack,
  86. * detached from the iwl->rxq. The driver 'processed' index is updated.
  87. * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
  88. * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
  89. * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
  90. * were enough free buffers and RX_STALLED is set it is cleared.
  91. *
  92. *
  93. * Driver sequence:
  94. *
  95. * iwl_rx_queue_alloc() Allocates rx_free
  96. * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
  97. * iwl_rx_queue_restock
  98. * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
  99. * queue, updates firmware pointers, and updates
  100. * the WRITE index. If insufficient rx_free buffers
  101. * are available, schedules iwl_rx_replenish
  102. *
  103. * -- enable interrupts --
  104. * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
  105. * READ INDEX, detaching the SKB from the pool.
  106. * Moves the packet buffer from queue to rx_used.
  107. * Calls iwl_rx_queue_restock to refill any empty
  108. * slots.
  109. * ...
  110. *
  111. */
  112. /**
  113. * iwl_rx_queue_space - Return number of free slots available in queue.
  114. */
  115. int iwl_rx_queue_space(const struct iwl_rx_queue *q)
  116. {
  117. int s = q->read - q->write;
  118. if (s <= 0)
  119. s += RX_QUEUE_SIZE;
  120. /* keep some buffer to not confuse full and empty queue */
  121. s -= 2;
  122. if (s < 0)
  123. s = 0;
  124. return s;
  125. }
  126. /**
  127. * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
  128. */
  129. void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
  130. {
  131. unsigned long flags;
  132. u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
  133. u32 reg;
  134. spin_lock_irqsave(&q->lock, flags);
  135. if (q->need_update == 0)
  136. goto exit_unlock;
  137. if (priv->cfg->base_params->shadow_reg_enable) {
  138. /* shadow register enabled */
  139. /* Device expects a multiple of 8 */
  140. q->write_actual = (q->write & ~0x7);
  141. iwl_write32(priv, rx_wrt_ptr_reg, q->write_actual);
  142. } else {
  143. /* If power-saving is in use, make sure device is awake */
  144. if (test_bit(STATUS_POWER_PMI, &priv->status)) {
  145. reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
  146. if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
  147. IWL_DEBUG_INFO(priv,
  148. "Rx queue requesting wakeup,"
  149. " GP1 = 0x%x\n", reg);
  150. iwl_set_bit(priv, CSR_GP_CNTRL,
  151. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  152. goto exit_unlock;
  153. }
  154. q->write_actual = (q->write & ~0x7);
  155. iwl_write_direct32(priv, rx_wrt_ptr_reg,
  156. q->write_actual);
  157. /* Else device is assumed to be awake */
  158. } else {
  159. /* Device expects a multiple of 8 */
  160. q->write_actual = (q->write & ~0x7);
  161. iwl_write_direct32(priv, rx_wrt_ptr_reg,
  162. q->write_actual);
  163. }
  164. }
  165. q->need_update = 0;
  166. exit_unlock:
  167. spin_unlock_irqrestore(&q->lock, flags);
  168. }
  169. int iwl_rx_queue_alloc(struct iwl_priv *priv)
  170. {
  171. struct iwl_rx_queue *rxq = &priv->rxq;
  172. struct device *dev = &priv->pci_dev->dev;
  173. int i;
  174. spin_lock_init(&rxq->lock);
  175. INIT_LIST_HEAD(&rxq->rx_free);
  176. INIT_LIST_HEAD(&rxq->rx_used);
  177. /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
  178. rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
  179. GFP_KERNEL);
  180. if (!rxq->bd)
  181. goto err_bd;
  182. rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
  183. &rxq->rb_stts_dma, GFP_KERNEL);
  184. if (!rxq->rb_stts)
  185. goto err_rb;
  186. /* Fill the rx_used queue with _all_ of the Rx buffers */
  187. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
  188. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  189. /* Set us so that we have processed and used all buffers, but have
  190. * not restocked the Rx queue with fresh buffers */
  191. rxq->read = rxq->write = 0;
  192. rxq->write_actual = 0;
  193. rxq->free_count = 0;
  194. rxq->need_update = 0;
  195. return 0;
  196. err_rb:
  197. dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
  198. rxq->bd_dma);
  199. err_bd:
  200. return -ENOMEM;
  201. }
  202. /******************************************************************************
  203. *
  204. * Generic RX handler implementations
  205. *
  206. ******************************************************************************/
  207. static void iwl_rx_reply_alive(struct iwl_priv *priv,
  208. struct iwl_rx_mem_buffer *rxb)
  209. {
  210. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  211. struct iwl_alive_resp *palive;
  212. struct delayed_work *pwork;
  213. palive = &pkt->u.alive_frame;
  214. IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
  215. "0x%01X 0x%01X\n",
  216. palive->is_valid, palive->ver_type,
  217. palive->ver_subtype);
  218. if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
  219. IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
  220. memcpy(&priv->card_alive_init,
  221. &pkt->u.alive_frame,
  222. sizeof(struct iwl_init_alive_resp));
  223. pwork = &priv->init_alive_start;
  224. } else {
  225. IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
  226. memcpy(&priv->card_alive, &pkt->u.alive_frame,
  227. sizeof(struct iwl_alive_resp));
  228. pwork = &priv->alive_start;
  229. }
  230. /* We delay the ALIVE response by 5ms to
  231. * give the HW RF Kill time to activate... */
  232. if (palive->is_valid == UCODE_VALID_OK)
  233. queue_delayed_work(priv->workqueue, pwork,
  234. msecs_to_jiffies(5));
  235. else {
  236. IWL_WARN(priv, "%s uCode did not respond OK.\n",
  237. (palive->ver_subtype == INITIALIZE_SUBTYPE) ?
  238. "init" : "runtime");
  239. /*
  240. * If fail to load init uCode,
  241. * let's try to load the init uCode again.
  242. * We should not get into this situation, but if it
  243. * does happen, we should not move on and loading "runtime"
  244. * without proper calibrate the device.
  245. */
  246. if (palive->ver_subtype == INITIALIZE_SUBTYPE)
  247. priv->ucode_type = UCODE_NONE;
  248. queue_work(priv->workqueue, &priv->restart);
  249. }
  250. }
  251. static void iwl_rx_reply_error(struct iwl_priv *priv,
  252. struct iwl_rx_mem_buffer *rxb)
  253. {
  254. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  255. IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
  256. "seq 0x%04X ser 0x%08X\n",
  257. le32_to_cpu(pkt->u.err_resp.error_type),
  258. get_cmd_string(pkt->u.err_resp.cmd_id),
  259. pkt->u.err_resp.cmd_id,
  260. le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
  261. le32_to_cpu(pkt->u.err_resp.error_info));
  262. }
  263. static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
  264. {
  265. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  266. struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
  267. /*
  268. * MULTI-FIXME
  269. * See iwl_mac_channel_switch.
  270. */
  271. struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
  272. struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
  273. if (priv->switch_rxon.switch_in_progress) {
  274. if (!le32_to_cpu(csa->status) &&
  275. (csa->channel == priv->switch_rxon.channel)) {
  276. rxon->channel = csa->channel;
  277. ctx->staging.channel = csa->channel;
  278. IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
  279. le16_to_cpu(csa->channel));
  280. iwl_chswitch_done(priv, true);
  281. } else {
  282. IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
  283. le16_to_cpu(csa->channel));
  284. iwl_chswitch_done(priv, false);
  285. }
  286. }
  287. }
  288. static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
  289. struct iwl_rx_mem_buffer *rxb)
  290. {
  291. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  292. struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
  293. if (!report->state) {
  294. IWL_DEBUG_11H(priv,
  295. "Spectrum Measure Notification: Start\n");
  296. return;
  297. }
  298. memcpy(&priv->measure_report, report, sizeof(*report));
  299. priv->measurement_status |= MEASUREMENT_READY;
  300. }
  301. static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
  302. struct iwl_rx_mem_buffer *rxb)
  303. {
  304. #ifdef CONFIG_IWLWIFI_DEBUG
  305. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  306. struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
  307. IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
  308. sleep->pm_sleep_mode, sleep->pm_wakeup_src);
  309. #endif
  310. }
  311. static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
  312. struct iwl_rx_mem_buffer *rxb)
  313. {
  314. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  315. u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
  316. IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
  317. "notification for %s:\n", len,
  318. get_cmd_string(pkt->hdr.cmd));
  319. iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
  320. }
  321. static void iwl_rx_beacon_notif(struct iwl_priv *priv,
  322. struct iwl_rx_mem_buffer *rxb)
  323. {
  324. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  325. struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
  326. #ifdef CONFIG_IWLWIFI_DEBUG
  327. u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
  328. u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
  329. IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
  330. "tsf:0x%.8x%.8x rate:%d\n",
  331. status & TX_STATUS_MSK,
  332. beacon->beacon_notify_hdr.failure_frame,
  333. le32_to_cpu(beacon->ibss_mgr_status),
  334. le32_to_cpu(beacon->high_tsf),
  335. le32_to_cpu(beacon->low_tsf), rate);
  336. #endif
  337. priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
  338. if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
  339. queue_work(priv->workqueue, &priv->beacon_update);
  340. }
  341. /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
  342. #define ACK_CNT_RATIO (50)
  343. #define BA_TIMEOUT_CNT (5)
  344. #define BA_TIMEOUT_MAX (16)
  345. /**
  346. * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
  347. *
  348. * When the ACK count ratio is low and aggregated BA timeout retries exceeding
  349. * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
  350. * operation state.
  351. */
  352. static bool iwl_good_ack_health(struct iwl_priv *priv, struct iwl_rx_packet *pkt)
  353. {
  354. int actual_delta, expected_delta, ba_timeout_delta;
  355. struct statistics_tx *cur, *old;
  356. if (priv->_agn.agg_tids_count)
  357. return true;
  358. if (iwl_bt_statistics(priv)) {
  359. cur = &pkt->u.stats_bt.tx;
  360. old = &priv->_agn.statistics_bt.tx;
  361. } else {
  362. cur = &pkt->u.stats.tx;
  363. old = &priv->_agn.statistics.tx;
  364. }
  365. actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
  366. le32_to_cpu(old->actual_ack_cnt);
  367. expected_delta = le32_to_cpu(cur->expected_ack_cnt) -
  368. le32_to_cpu(old->expected_ack_cnt);
  369. /* Values should not be negative, but we do not trust the firmware */
  370. if (actual_delta <= 0 || expected_delta <= 0)
  371. return true;
  372. ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) -
  373. le32_to_cpu(old->agg.ba_timeout);
  374. if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO &&
  375. ba_timeout_delta > BA_TIMEOUT_CNT) {
  376. IWL_DEBUG_RADIO(priv, "deltas: actual %d expected %d ba_timeout %d\n",
  377. actual_delta, expected_delta, ba_timeout_delta);
  378. #ifdef CONFIG_IWLWIFI_DEBUGFS
  379. /*
  380. * This is ifdef'ed on DEBUGFS because otherwise the
  381. * statistics aren't available. If DEBUGFS is set but
  382. * DEBUG is not, these will just compile out.
  383. */
  384. IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n",
  385. priv->_agn.delta_statistics.tx.rx_detected_cnt);
  386. IWL_DEBUG_RADIO(priv,
  387. "ack_or_ba_timeout_collision delta %d\n",
  388. priv->_agn.delta_statistics.tx.ack_or_ba_timeout_collision);
  389. #endif
  390. if (ba_timeout_delta >= BA_TIMEOUT_MAX)
  391. return false;
  392. }
  393. return true;
  394. }
  395. /**
  396. * iwl_good_plcp_health - checks for plcp error.
  397. *
  398. * When the plcp error is exceeding the thresholds, reset the radio
  399. * to improve the throughput.
  400. */
  401. static bool iwl_good_plcp_health(struct iwl_priv *priv,
  402. struct iwl_rx_packet *pkt, unsigned int msecs)
  403. {
  404. int delta;
  405. int threshold = priv->cfg->base_params->plcp_delta_threshold;
  406. if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
  407. IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
  408. return true;
  409. }
  410. if (iwl_bt_statistics(priv)) {
  411. struct statistics_rx_bt *cur, *old;
  412. cur = &pkt->u.stats_bt.rx;
  413. old = &priv->_agn.statistics_bt.rx;
  414. delta = le32_to_cpu(cur->ofdm.plcp_err) -
  415. le32_to_cpu(old->ofdm.plcp_err) +
  416. le32_to_cpu(cur->ofdm_ht.plcp_err) -
  417. le32_to_cpu(old->ofdm_ht.plcp_err);
  418. } else {
  419. struct statistics_rx *cur, *old;
  420. cur = &pkt->u.stats.rx;
  421. old = &priv->_agn.statistics.rx;
  422. delta = le32_to_cpu(cur->ofdm.plcp_err) -
  423. le32_to_cpu(old->ofdm.plcp_err) +
  424. le32_to_cpu(cur->ofdm_ht.plcp_err) -
  425. le32_to_cpu(old->ofdm_ht.plcp_err);
  426. }
  427. /* Can be negative if firmware reseted statistics */
  428. if (delta <= 0)
  429. return true;
  430. if ((delta * 100 / msecs) > threshold) {
  431. IWL_DEBUG_RADIO(priv,
  432. "plcp health threshold %u delta %d msecs %u\n",
  433. threshold, delta, msecs);
  434. return false;
  435. }
  436. return true;
  437. }
  438. static void iwl_recover_from_statistics(struct iwl_priv *priv,
  439. struct iwl_rx_packet *pkt)
  440. {
  441. const struct iwl_mod_params *mod_params = priv->cfg->mod_params;
  442. unsigned int msecs;
  443. unsigned long stamp;
  444. if (test_bit(STATUS_EXIT_PENDING, &priv->status))
  445. return;
  446. stamp = jiffies;
  447. msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies);
  448. /* Only gather statistics and update time stamp when not associated */
  449. if (!iwl_is_any_associated(priv))
  450. goto out;
  451. /* Do not check/recover when do not have enough statistics data */
  452. if (msecs < 99)
  453. return;
  454. if (mod_params->ack_check && !iwl_good_ack_health(priv, pkt)) {
  455. IWL_ERR(priv, "low ack count detected, restart firmware\n");
  456. if (!iwl_force_reset(priv, IWL_FW_RESET, false))
  457. return;
  458. }
  459. if (mod_params->plcp_check && !iwl_good_plcp_health(priv, pkt, msecs))
  460. iwl_force_reset(priv, IWL_RF_RESET, false);
  461. out:
  462. if (iwl_bt_statistics(priv))
  463. memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt,
  464. sizeof(priv->_agn.statistics_bt));
  465. else
  466. memcpy(&priv->_agn.statistics, &pkt->u.stats,
  467. sizeof(priv->_agn.statistics));
  468. priv->rx_statistics_jiffies = stamp;
  469. }
  470. /* Calculate noise level, based on measurements during network silence just
  471. * before arriving beacon. This measurement can be done only if we know
  472. * exactly when to expect beacons, therefore only when we're associated. */
  473. static void iwl_rx_calc_noise(struct iwl_priv *priv)
  474. {
  475. struct statistics_rx_non_phy *rx_info;
  476. int num_active_rx = 0;
  477. int total_silence = 0;
  478. int bcn_silence_a, bcn_silence_b, bcn_silence_c;
  479. int last_rx_noise;
  480. if (iwl_bt_statistics(priv))
  481. rx_info = &(priv->_agn.statistics_bt.rx.general.common);
  482. else
  483. rx_info = &(priv->_agn.statistics.rx.general);
  484. bcn_silence_a =
  485. le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
  486. bcn_silence_b =
  487. le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
  488. bcn_silence_c =
  489. le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
  490. if (bcn_silence_a) {
  491. total_silence += bcn_silence_a;
  492. num_active_rx++;
  493. }
  494. if (bcn_silence_b) {
  495. total_silence += bcn_silence_b;
  496. num_active_rx++;
  497. }
  498. if (bcn_silence_c) {
  499. total_silence += bcn_silence_c;
  500. num_active_rx++;
  501. }
  502. /* Average among active antennas */
  503. if (num_active_rx)
  504. last_rx_noise = (total_silence / num_active_rx) - 107;
  505. else
  506. last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
  507. IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
  508. bcn_silence_a, bcn_silence_b, bcn_silence_c,
  509. last_rx_noise);
  510. }
  511. /*
  512. * based on the assumption of all statistics counter are in DWORD
  513. * FIXME: This function is for debugging, do not deal with
  514. * the case of counters roll-over.
  515. */
  516. static void iwl_accumulative_statistics(struct iwl_priv *priv,
  517. __le32 *stats)
  518. {
  519. #ifdef CONFIG_IWLWIFI_DEBUGFS
  520. int i, size;
  521. __le32 *prev_stats;
  522. u32 *accum_stats;
  523. u32 *delta, *max_delta;
  524. struct statistics_general_common *general, *accum_general;
  525. struct statistics_tx *tx, *accum_tx;
  526. if (iwl_bt_statistics(priv)) {
  527. prev_stats = (__le32 *)&priv->_agn.statistics_bt;
  528. accum_stats = (u32 *)&priv->_agn.accum_statistics_bt;
  529. size = sizeof(struct iwl_bt_notif_statistics);
  530. general = &priv->_agn.statistics_bt.general.common;
  531. accum_general = &priv->_agn.accum_statistics_bt.general.common;
  532. tx = &priv->_agn.statistics_bt.tx;
  533. accum_tx = &priv->_agn.accum_statistics_bt.tx;
  534. delta = (u32 *)&priv->_agn.delta_statistics_bt;
  535. max_delta = (u32 *)&priv->_agn.max_delta_bt;
  536. } else {
  537. prev_stats = (__le32 *)&priv->_agn.statistics;
  538. accum_stats = (u32 *)&priv->_agn.accum_statistics;
  539. size = sizeof(struct iwl_notif_statistics);
  540. general = &priv->_agn.statistics.general.common;
  541. accum_general = &priv->_agn.accum_statistics.general.common;
  542. tx = &priv->_agn.statistics.tx;
  543. accum_tx = &priv->_agn.accum_statistics.tx;
  544. delta = (u32 *)&priv->_agn.delta_statistics;
  545. max_delta = (u32 *)&priv->_agn.max_delta;
  546. }
  547. for (i = sizeof(__le32); i < size;
  548. i += sizeof(__le32), stats++, prev_stats++, delta++,
  549. max_delta++, accum_stats++) {
  550. if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
  551. *delta = (le32_to_cpu(*stats) -
  552. le32_to_cpu(*prev_stats));
  553. *accum_stats += *delta;
  554. if (*delta > *max_delta)
  555. *max_delta = *delta;
  556. }
  557. }
  558. /* reset accumulative statistics for "no-counter" type statistics */
  559. accum_general->temperature = general->temperature;
  560. accum_general->temperature_m = general->temperature_m;
  561. accum_general->ttl_timestamp = general->ttl_timestamp;
  562. accum_tx->tx_power.ant_a = tx->tx_power.ant_a;
  563. accum_tx->tx_power.ant_b = tx->tx_power.ant_b;
  564. accum_tx->tx_power.ant_c = tx->tx_power.ant_c;
  565. #endif
  566. }
  567. static void iwl_rx_statistics(struct iwl_priv *priv,
  568. struct iwl_rx_mem_buffer *rxb)
  569. {
  570. const int reg_recalib_period = 60;
  571. int change;
  572. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  573. if (iwl_bt_statistics(priv)) {
  574. IWL_DEBUG_RX(priv,
  575. "Statistics notification received (%d vs %d).\n",
  576. (int)sizeof(struct iwl_bt_notif_statistics),
  577. le32_to_cpu(pkt->len_n_flags) &
  578. FH_RSCSR_FRAME_SIZE_MSK);
  579. change = ((priv->_agn.statistics_bt.general.common.temperature !=
  580. pkt->u.stats_bt.general.common.temperature) ||
  581. ((priv->_agn.statistics_bt.flag &
  582. STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
  583. (pkt->u.stats_bt.flag &
  584. STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
  585. iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt);
  586. } else {
  587. IWL_DEBUG_RX(priv,
  588. "Statistics notification received (%d vs %d).\n",
  589. (int)sizeof(struct iwl_notif_statistics),
  590. le32_to_cpu(pkt->len_n_flags) &
  591. FH_RSCSR_FRAME_SIZE_MSK);
  592. change = ((priv->_agn.statistics.general.common.temperature !=
  593. pkt->u.stats.general.common.temperature) ||
  594. ((priv->_agn.statistics.flag &
  595. STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
  596. (pkt->u.stats.flag &
  597. STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
  598. iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
  599. }
  600. iwl_recover_from_statistics(priv, pkt);
  601. set_bit(STATUS_STATISTICS, &priv->status);
  602. /* Reschedule the statistics timer to occur in
  603. * reg_recalib_period seconds to ensure we get a
  604. * thermal update even if the uCode doesn't give
  605. * us one */
  606. mod_timer(&priv->statistics_periodic, jiffies +
  607. msecs_to_jiffies(reg_recalib_period * 1000));
  608. if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
  609. (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
  610. iwl_rx_calc_noise(priv);
  611. queue_work(priv->workqueue, &priv->run_time_calib_work);
  612. }
  613. if (priv->cfg->ops->lib->temp_ops.temperature && change)
  614. priv->cfg->ops->lib->temp_ops.temperature(priv);
  615. }
  616. static void iwl_rx_reply_statistics(struct iwl_priv *priv,
  617. struct iwl_rx_mem_buffer *rxb)
  618. {
  619. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  620. if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
  621. #ifdef CONFIG_IWLWIFI_DEBUGFS
  622. memset(&priv->_agn.accum_statistics, 0,
  623. sizeof(struct iwl_notif_statistics));
  624. memset(&priv->_agn.delta_statistics, 0,
  625. sizeof(struct iwl_notif_statistics));
  626. memset(&priv->_agn.max_delta, 0,
  627. sizeof(struct iwl_notif_statistics));
  628. memset(&priv->_agn.accum_statistics_bt, 0,
  629. sizeof(struct iwl_bt_notif_statistics));
  630. memset(&priv->_agn.delta_statistics_bt, 0,
  631. sizeof(struct iwl_bt_notif_statistics));
  632. memset(&priv->_agn.max_delta_bt, 0,
  633. sizeof(struct iwl_bt_notif_statistics));
  634. #endif
  635. IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
  636. }
  637. iwl_rx_statistics(priv, rxb);
  638. }
  639. /* Handle notification from uCode that card's power state is changing
  640. * due to software, hardware, or critical temperature RFKILL */
  641. static void iwl_rx_card_state_notif(struct iwl_priv *priv,
  642. struct iwl_rx_mem_buffer *rxb)
  643. {
  644. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  645. u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
  646. unsigned long status = priv->status;
  647. IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
  648. (flags & HW_CARD_DISABLED) ? "Kill" : "On",
  649. (flags & SW_CARD_DISABLED) ? "Kill" : "On",
  650. (flags & CT_CARD_DISABLED) ?
  651. "Reached" : "Not reached");
  652. if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
  653. CT_CARD_DISABLED)) {
  654. iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
  655. CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
  656. iwl_write_direct32(priv, HBUS_TARG_MBX_C,
  657. HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
  658. if (!(flags & RXON_CARD_DISABLED)) {
  659. iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
  660. CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
  661. iwl_write_direct32(priv, HBUS_TARG_MBX_C,
  662. HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
  663. }
  664. if (flags & CT_CARD_DISABLED)
  665. iwl_tt_enter_ct_kill(priv);
  666. }
  667. if (!(flags & CT_CARD_DISABLED))
  668. iwl_tt_exit_ct_kill(priv);
  669. if (flags & HW_CARD_DISABLED)
  670. set_bit(STATUS_RF_KILL_HW, &priv->status);
  671. else
  672. clear_bit(STATUS_RF_KILL_HW, &priv->status);
  673. if (!(flags & RXON_CARD_DISABLED))
  674. iwl_scan_cancel(priv);
  675. if ((test_bit(STATUS_RF_KILL_HW, &status) !=
  676. test_bit(STATUS_RF_KILL_HW, &priv->status)))
  677. wiphy_rfkill_set_hw_state(priv->hw->wiphy,
  678. test_bit(STATUS_RF_KILL_HW, &priv->status));
  679. else
  680. wake_up_interruptible(&priv->wait_command_queue);
  681. }
  682. static void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
  683. struct iwl_rx_mem_buffer *rxb)
  684. {
  685. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  686. struct iwl_missed_beacon_notif *missed_beacon;
  687. missed_beacon = &pkt->u.missed_beacon;
  688. if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
  689. priv->missed_beacon_threshold) {
  690. IWL_DEBUG_CALIB(priv,
  691. "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
  692. le32_to_cpu(missed_beacon->consecutive_missed_beacons),
  693. le32_to_cpu(missed_beacon->total_missed_becons),
  694. le32_to_cpu(missed_beacon->num_recvd_beacons),
  695. le32_to_cpu(missed_beacon->num_expected_beacons));
  696. if (!test_bit(STATUS_SCANNING, &priv->status))
  697. iwl_init_sensitivity(priv);
  698. }
  699. }
  700. /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
  701. * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
  702. static void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
  703. struct iwl_rx_mem_buffer *rxb)
  704. {
  705. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  706. priv->_agn.last_phy_res_valid = true;
  707. memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
  708. sizeof(struct iwl_rx_phy_res));
  709. }
  710. /*
  711. * returns non-zero if packet should be dropped
  712. */
  713. static int iwl_set_decrypted_flag(struct iwl_priv *priv,
  714. struct ieee80211_hdr *hdr,
  715. u32 decrypt_res,
  716. struct ieee80211_rx_status *stats)
  717. {
  718. u16 fc = le16_to_cpu(hdr->frame_control);
  719. /*
  720. * All contexts have the same setting here due to it being
  721. * a module parameter, so OK to check any context.
  722. */
  723. if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
  724. RXON_FILTER_DIS_DECRYPT_MSK)
  725. return 0;
  726. if (!(fc & IEEE80211_FCTL_PROTECTED))
  727. return 0;
  728. IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
  729. switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
  730. case RX_RES_STATUS_SEC_TYPE_TKIP:
  731. /* The uCode has got a bad phase 1 Key, pushes the packet.
  732. * Decryption will be done in SW. */
  733. if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
  734. RX_RES_STATUS_BAD_KEY_TTAK)
  735. break;
  736. case RX_RES_STATUS_SEC_TYPE_WEP:
  737. if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
  738. RX_RES_STATUS_BAD_ICV_MIC) {
  739. /* bad ICV, the packet is destroyed since the
  740. * decryption is inplace, drop it */
  741. IWL_DEBUG_RX(priv, "Packet destroyed\n");
  742. return -1;
  743. }
  744. case RX_RES_STATUS_SEC_TYPE_CCMP:
  745. if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
  746. RX_RES_STATUS_DECRYPT_OK) {
  747. IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
  748. stats->flag |= RX_FLAG_DECRYPTED;
  749. }
  750. break;
  751. default:
  752. break;
  753. }
  754. return 0;
  755. }
  756. static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
  757. struct ieee80211_hdr *hdr,
  758. u16 len,
  759. u32 ampdu_status,
  760. struct iwl_rx_mem_buffer *rxb,
  761. struct ieee80211_rx_status *stats)
  762. {
  763. struct sk_buff *skb;
  764. __le16 fc = hdr->frame_control;
  765. /* We only process data packets if the interface is open */
  766. if (unlikely(!priv->is_open)) {
  767. IWL_DEBUG_DROP_LIMIT(priv,
  768. "Dropping packet while interface is not open.\n");
  769. return;
  770. }
  771. /* In case of HW accelerated crypto and bad decryption, drop */
  772. if (!priv->cfg->mod_params->sw_crypto &&
  773. iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
  774. return;
  775. skb = dev_alloc_skb(128);
  776. if (!skb) {
  777. IWL_ERR(priv, "dev_alloc_skb failed\n");
  778. return;
  779. }
  780. skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
  781. iwl_update_stats(priv, false, fc, len);
  782. memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
  783. ieee80211_rx(priv->hw, skb);
  784. priv->alloc_rxb_page--;
  785. rxb->page = NULL;
  786. }
  787. static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
  788. {
  789. u32 decrypt_out = 0;
  790. if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
  791. RX_RES_STATUS_STATION_FOUND)
  792. decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
  793. RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
  794. decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
  795. /* packet was not encrypted */
  796. if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
  797. RX_RES_STATUS_SEC_TYPE_NONE)
  798. return decrypt_out;
  799. /* packet was encrypted with unknown alg */
  800. if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
  801. RX_RES_STATUS_SEC_TYPE_ERR)
  802. return decrypt_out;
  803. /* decryption was not done in HW */
  804. if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
  805. RX_MPDU_RES_STATUS_DEC_DONE_MSK)
  806. return decrypt_out;
  807. switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
  808. case RX_RES_STATUS_SEC_TYPE_CCMP:
  809. /* alg is CCM: check MIC only */
  810. if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
  811. /* Bad MIC */
  812. decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
  813. else
  814. decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
  815. break;
  816. case RX_RES_STATUS_SEC_TYPE_TKIP:
  817. if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
  818. /* Bad TTAK */
  819. decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
  820. break;
  821. }
  822. /* fall through if TTAK OK */
  823. default:
  824. if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
  825. decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
  826. else
  827. decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
  828. break;
  829. }
  830. IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
  831. decrypt_in, decrypt_out);
  832. return decrypt_out;
  833. }
  834. /* Called for REPLY_RX (legacy ABG frames), or
  835. * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
  836. static void iwl_rx_reply_rx(struct iwl_priv *priv,
  837. struct iwl_rx_mem_buffer *rxb)
  838. {
  839. struct ieee80211_hdr *header;
  840. struct ieee80211_rx_status rx_status;
  841. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  842. struct iwl_rx_phy_res *phy_res;
  843. __le32 rx_pkt_status;
  844. struct iwl_rx_mpdu_res_start *amsdu;
  845. u32 len;
  846. u32 ampdu_status;
  847. u32 rate_n_flags;
  848. /**
  849. * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
  850. * REPLY_RX: physical layer info is in this buffer
  851. * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
  852. * command and cached in priv->last_phy_res
  853. *
  854. * Here we set up local variables depending on which command is
  855. * received.
  856. */
  857. if (pkt->hdr.cmd == REPLY_RX) {
  858. phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
  859. header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
  860. + phy_res->cfg_phy_cnt);
  861. len = le16_to_cpu(phy_res->byte_count);
  862. rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
  863. phy_res->cfg_phy_cnt + len);
  864. ampdu_status = le32_to_cpu(rx_pkt_status);
  865. } else {
  866. if (!priv->_agn.last_phy_res_valid) {
  867. IWL_ERR(priv, "MPDU frame without cached PHY data\n");
  868. return;
  869. }
  870. phy_res = &priv->_agn.last_phy_res;
  871. amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
  872. header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
  873. len = le16_to_cpu(amsdu->byte_count);
  874. rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
  875. ampdu_status = iwl_translate_rx_status(priv,
  876. le32_to_cpu(rx_pkt_status));
  877. }
  878. if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
  879. IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
  880. phy_res->cfg_phy_cnt);
  881. return;
  882. }
  883. if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
  884. !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
  885. IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
  886. le32_to_cpu(rx_pkt_status));
  887. return;
  888. }
  889. /* This will be used in several places later */
  890. rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
  891. /* rx_status carries information about the packet to mac80211 */
  892. rx_status.mactime = le64_to_cpu(phy_res->timestamp);
  893. rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
  894. IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
  895. rx_status.freq =
  896. ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
  897. rx_status.band);
  898. rx_status.rate_idx =
  899. iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
  900. rx_status.flag = 0;
  901. /* TSF isn't reliable. In order to allow smooth user experience,
  902. * this W/A doesn't propagate it to the mac80211 */
  903. /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
  904. priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
  905. /* Find max signal strength (dBm) among 3 antenna/receiver chains */
  906. rx_status.signal = priv->cfg->ops->utils->calc_rssi(priv, phy_res);
  907. iwl_dbg_log_rx_data_frame(priv, len, header);
  908. IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
  909. rx_status.signal, (unsigned long long)rx_status.mactime);
  910. /*
  911. * "antenna number"
  912. *
  913. * It seems that the antenna field in the phy flags value
  914. * is actually a bit field. This is undefined by radiotap,
  915. * it wants an actual antenna number but I always get "7"
  916. * for most legacy frames I receive indicating that the
  917. * same frame was received on all three RX chains.
  918. *
  919. * I think this field should be removed in favor of a
  920. * new 802.11n radiotap field "RX chains" that is defined
  921. * as a bitmask.
  922. */
  923. rx_status.antenna =
  924. (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
  925. >> RX_RES_PHY_FLAGS_ANTENNA_POS;
  926. /* set the preamble flag if appropriate */
  927. if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
  928. rx_status.flag |= RX_FLAG_SHORTPRE;
  929. /* Set up the HT phy flags */
  930. if (rate_n_flags & RATE_MCS_HT_MSK)
  931. rx_status.flag |= RX_FLAG_HT;
  932. if (rate_n_flags & RATE_MCS_HT40_MSK)
  933. rx_status.flag |= RX_FLAG_40MHZ;
  934. if (rate_n_flags & RATE_MCS_SGI_MSK)
  935. rx_status.flag |= RX_FLAG_SHORT_GI;
  936. iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
  937. rxb, &rx_status);
  938. }
  939. /**
  940. * iwl_setup_rx_handlers - Initialize Rx handler callbacks
  941. *
  942. * Setup the RX handlers for each of the reply types sent from the uCode
  943. * to the host.
  944. */
  945. void iwl_setup_rx_handlers(struct iwl_priv *priv)
  946. {
  947. void (**handlers)(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
  948. handlers = priv->rx_handlers;
  949. handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
  950. handlers[REPLY_ERROR] = iwl_rx_reply_error;
  951. handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
  952. handlers[SPECTRUM_MEASURE_NOTIFICATION] = iwl_rx_spectrum_measure_notif;
  953. handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
  954. handlers[PM_DEBUG_STATISTIC_NOTIFIC] = iwl_rx_pm_debug_statistics_notif;
  955. handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
  956. /*
  957. * The same handler is used for both the REPLY to a discrete
  958. * statistics request from the host as well as for the periodic
  959. * statistics notifications (after received beacons) from the uCode.
  960. */
  961. handlers[REPLY_STATISTICS_CMD] = iwl_rx_reply_statistics;
  962. handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
  963. iwl_setup_rx_scan_handlers(priv);
  964. handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
  965. handlers[MISSED_BEACONS_NOTIFICATION] = iwl_rx_missed_beacon_notif;
  966. /* Rx handlers */
  967. handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy;
  968. handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx;
  969. /* block ack */
  970. handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
  971. /* Set up hardware specific Rx handlers */
  972. priv->cfg->ops->lib->rx_handler_setup(priv);
  973. }