iwl-rx.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
  4. *
  5. * Portions of this file are derived from the ipw3945 project, as well
  6. * as portions of the ieee80211 subsystem header files.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of version 2 of the GNU General Public License as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20. *
  21. * The full GNU General Public License is included in this distribution in the
  22. * file called LICENSE.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/etherdevice.h>
  30. #include <linux/slab.h>
  31. #include <linux/sched.h>
  32. #include <net/mac80211.h>
  33. #include <asm/unaligned.h>
  34. #include "iwl-eeprom.h"
  35. #include "iwl-dev.h"
  36. #include "iwl-core.h"
  37. #include "iwl-sta.h"
  38. #include "iwl-io.h"
  39. #include "iwl-helpers.h"
  40. #include "iwl-agn-calib.h"
  41. #include "iwl-agn.h"
  42. /******************************************************************************
  43. *
  44. * RX path functions
  45. *
  46. ******************************************************************************/
  47. /*
  48. * Rx theory of operation
  49. *
  50. * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
  51. * each of which point to Receive Buffers to be filled by the NIC. These get
  52. * used not only for Rx frames, but for any command response or notification
  53. * from the NIC. The driver and NIC manage the Rx buffers by means
  54. * of indexes into the circular buffer.
  55. *
  56. * Rx Queue Indexes
  57. * The host/firmware share two index registers for managing the Rx buffers.
  58. *
  59. * The READ index maps to the first position that the firmware may be writing
  60. * to -- the driver can read up to (but not including) this position and get
  61. * good data.
  62. * The READ index is managed by the firmware once the card is enabled.
  63. *
  64. * The WRITE index maps to the last position the driver has read from -- the
  65. * position preceding WRITE is the last slot the firmware can place a packet.
  66. *
  67. * The queue is empty (no good data) if WRITE = READ - 1, and is full if
  68. * WRITE = READ.
  69. *
  70. * During initialization, the host sets up the READ queue position to the first
  71. * INDEX position, and WRITE to the last (READ - 1 wrapped)
  72. *
  73. * When the firmware places a packet in a buffer, it will advance the READ index
  74. * and fire the RX interrupt. The driver can then query the READ index and
  75. * process as many packets as possible, moving the WRITE index forward as it
  76. * resets the Rx queue buffers with new memory.
  77. *
  78. * The management in the driver is as follows:
  79. * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
  80. * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
  81. * to replenish the iwl->rxq->rx_free.
  82. * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
  83. * iwl->rxq is replenished and the READ INDEX is updated (updating the
  84. * 'processed' and 'read' driver indexes as well)
  85. * + A received packet is processed and handed to the kernel network stack,
  86. * detached from the iwl->rxq. The driver 'processed' index is updated.
  87. * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
  88. * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
  89. * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
  90. * were enough free buffers and RX_STALLED is set it is cleared.
  91. *
  92. *
  93. * Driver sequence:
  94. *
  95. * iwl_rx_queue_alloc() Allocates rx_free
  96. * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
  97. * iwl_rx_queue_restock
  98. * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
  99. * queue, updates firmware pointers, and updates
  100. * the WRITE index. If insufficient rx_free buffers
  101. * are available, schedules iwl_rx_replenish
  102. *
  103. * -- enable interrupts --
  104. * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
  105. * READ INDEX, detaching the SKB from the pool.
  106. * Moves the packet buffer from queue to rx_used.
  107. * Calls iwl_rx_queue_restock to refill any empty
  108. * slots.
  109. * ...
  110. *
  111. */
  112. /**
  113. * iwl_rx_queue_space - Return number of free slots available in queue.
  114. */
  115. int iwl_rx_queue_space(const struct iwl_rx_queue *q)
  116. {
  117. int s = q->read - q->write;
  118. if (s <= 0)
  119. s += RX_QUEUE_SIZE;
  120. /* keep some buffer to not confuse full and empty queue */
  121. s -= 2;
  122. if (s < 0)
  123. s = 0;
  124. return s;
  125. }
  126. /**
  127. * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
  128. */
  129. void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
  130. {
  131. unsigned long flags;
  132. u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
  133. u32 reg;
  134. spin_lock_irqsave(&q->lock, flags);
  135. if (q->need_update == 0)
  136. goto exit_unlock;
  137. if (priv->cfg->base_params->shadow_reg_enable) {
  138. /* shadow register enabled */
  139. /* Device expects a multiple of 8 */
  140. q->write_actual = (q->write & ~0x7);
  141. iwl_write32(priv, rx_wrt_ptr_reg, q->write_actual);
  142. } else {
  143. /* If power-saving is in use, make sure device is awake */
  144. if (test_bit(STATUS_POWER_PMI, &priv->status)) {
  145. reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
  146. if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
  147. IWL_DEBUG_INFO(priv,
  148. "Rx queue requesting wakeup,"
  149. " GP1 = 0x%x\n", reg);
  150. iwl_set_bit(priv, CSR_GP_CNTRL,
  151. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  152. goto exit_unlock;
  153. }
  154. q->write_actual = (q->write & ~0x7);
  155. iwl_write_direct32(priv, rx_wrt_ptr_reg,
  156. q->write_actual);
  157. /* Else device is assumed to be awake */
  158. } else {
  159. /* Device expects a multiple of 8 */
  160. q->write_actual = (q->write & ~0x7);
  161. iwl_write_direct32(priv, rx_wrt_ptr_reg,
  162. q->write_actual);
  163. }
  164. }
  165. q->need_update = 0;
  166. exit_unlock:
  167. spin_unlock_irqrestore(&q->lock, flags);
  168. }
  169. int iwl_rx_queue_alloc(struct iwl_priv *priv)
  170. {
  171. struct iwl_rx_queue *rxq = &priv->rxq;
  172. struct device *dev = &priv->pci_dev->dev;
  173. int i;
  174. spin_lock_init(&rxq->lock);
  175. INIT_LIST_HEAD(&rxq->rx_free);
  176. INIT_LIST_HEAD(&rxq->rx_used);
  177. /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
  178. rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
  179. GFP_KERNEL);
  180. if (!rxq->bd)
  181. goto err_bd;
  182. rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
  183. &rxq->rb_stts_dma, GFP_KERNEL);
  184. if (!rxq->rb_stts)
  185. goto err_rb;
  186. /* Fill the rx_used queue with _all_ of the Rx buffers */
  187. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
  188. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  189. /* Set us so that we have processed and used all buffers, but have
  190. * not restocked the Rx queue with fresh buffers */
  191. rxq->read = rxq->write = 0;
  192. rxq->write_actual = 0;
  193. rxq->free_count = 0;
  194. rxq->need_update = 0;
  195. return 0;
  196. err_rb:
  197. dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
  198. rxq->bd_dma);
  199. err_bd:
  200. return -ENOMEM;
  201. }
  202. /******************************************************************************
  203. *
  204. * Generic RX handler implementations
  205. *
  206. ******************************************************************************/
  207. static void iwl_rx_reply_alive(struct iwl_priv *priv,
  208. struct iwl_rx_mem_buffer *rxb)
  209. {
  210. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  211. struct iwl_alive_resp *palive;
  212. struct delayed_work *pwork;
  213. palive = &pkt->u.alive_frame;
  214. IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
  215. "0x%01X 0x%01X\n",
  216. palive->is_valid, palive->ver_type,
  217. palive->ver_subtype);
  218. priv->device_pointers.log_event_table =
  219. le32_to_cpu(palive->log_event_table_ptr);
  220. priv->device_pointers.error_event_table =
  221. le32_to_cpu(palive->error_event_table_ptr);
  222. if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
  223. IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
  224. pwork = &priv->init_alive_start;
  225. } else {
  226. IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
  227. pwork = &priv->alive_start;
  228. }
  229. /* We delay the ALIVE response by 5ms to
  230. * give the HW RF Kill time to activate... */
  231. if (palive->is_valid == UCODE_VALID_OK)
  232. queue_delayed_work(priv->workqueue, pwork,
  233. msecs_to_jiffies(5));
  234. else {
  235. IWL_WARN(priv, "%s uCode did not respond OK.\n",
  236. (palive->ver_subtype == INITIALIZE_SUBTYPE) ?
  237. "init" : "runtime");
  238. /*
  239. * If fail to load init uCode,
  240. * let's try to load the init uCode again.
  241. * We should not get into this situation, but if it
  242. * does happen, we should not move on and loading "runtime"
  243. * without proper calibrate the device.
  244. */
  245. if (palive->ver_subtype == INITIALIZE_SUBTYPE)
  246. priv->ucode_type = UCODE_NONE;
  247. queue_work(priv->workqueue, &priv->restart);
  248. }
  249. }
  250. static void iwl_rx_reply_error(struct iwl_priv *priv,
  251. struct iwl_rx_mem_buffer *rxb)
  252. {
  253. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  254. IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
  255. "seq 0x%04X ser 0x%08X\n",
  256. le32_to_cpu(pkt->u.err_resp.error_type),
  257. get_cmd_string(pkt->u.err_resp.cmd_id),
  258. pkt->u.err_resp.cmd_id,
  259. le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
  260. le32_to_cpu(pkt->u.err_resp.error_info));
  261. }
  262. static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
  263. {
  264. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  265. struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
  266. /*
  267. * MULTI-FIXME
  268. * See iwl_mac_channel_switch.
  269. */
  270. struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
  271. struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
  272. if (priv->switch_rxon.switch_in_progress) {
  273. if (!le32_to_cpu(csa->status) &&
  274. (csa->channel == priv->switch_rxon.channel)) {
  275. rxon->channel = csa->channel;
  276. ctx->staging.channel = csa->channel;
  277. IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
  278. le16_to_cpu(csa->channel));
  279. iwl_chswitch_done(priv, true);
  280. } else {
  281. IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
  282. le16_to_cpu(csa->channel));
  283. iwl_chswitch_done(priv, false);
  284. }
  285. }
  286. }
  287. static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
  288. struct iwl_rx_mem_buffer *rxb)
  289. {
  290. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  291. struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
  292. if (!report->state) {
  293. IWL_DEBUG_11H(priv,
  294. "Spectrum Measure Notification: Start\n");
  295. return;
  296. }
  297. memcpy(&priv->measure_report, report, sizeof(*report));
  298. priv->measurement_status |= MEASUREMENT_READY;
  299. }
  300. static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
  301. struct iwl_rx_mem_buffer *rxb)
  302. {
  303. #ifdef CONFIG_IWLWIFI_DEBUG
  304. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  305. struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
  306. IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
  307. sleep->pm_sleep_mode, sleep->pm_wakeup_src);
  308. #endif
  309. }
  310. static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
  311. struct iwl_rx_mem_buffer *rxb)
  312. {
  313. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  314. u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
  315. IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
  316. "notification for %s:\n", len,
  317. get_cmd_string(pkt->hdr.cmd));
  318. iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
  319. }
  320. static void iwl_rx_beacon_notif(struct iwl_priv *priv,
  321. struct iwl_rx_mem_buffer *rxb)
  322. {
  323. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  324. struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
  325. #ifdef CONFIG_IWLWIFI_DEBUG
  326. u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
  327. u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
  328. IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
  329. "tsf:0x%.8x%.8x rate:%d\n",
  330. status & TX_STATUS_MSK,
  331. beacon->beacon_notify_hdr.failure_frame,
  332. le32_to_cpu(beacon->ibss_mgr_status),
  333. le32_to_cpu(beacon->high_tsf),
  334. le32_to_cpu(beacon->low_tsf), rate);
  335. #endif
  336. priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
  337. if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
  338. queue_work(priv->workqueue, &priv->beacon_update);
  339. }
  340. /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
  341. #define ACK_CNT_RATIO (50)
  342. #define BA_TIMEOUT_CNT (5)
  343. #define BA_TIMEOUT_MAX (16)
  344. /**
  345. * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
  346. *
  347. * When the ACK count ratio is low and aggregated BA timeout retries exceeding
  348. * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
  349. * operation state.
  350. */
  351. static bool iwl_good_ack_health(struct iwl_priv *priv,
  352. struct statistics_tx *cur)
  353. {
  354. int actual_delta, expected_delta, ba_timeout_delta;
  355. struct statistics_tx *old;
  356. if (priv->_agn.agg_tids_count)
  357. return true;
  358. old = &priv->statistics.tx;
  359. actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
  360. le32_to_cpu(old->actual_ack_cnt);
  361. expected_delta = le32_to_cpu(cur->expected_ack_cnt) -
  362. le32_to_cpu(old->expected_ack_cnt);
  363. /* Values should not be negative, but we do not trust the firmware */
  364. if (actual_delta <= 0 || expected_delta <= 0)
  365. return true;
  366. ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) -
  367. le32_to_cpu(old->agg.ba_timeout);
  368. if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO &&
  369. ba_timeout_delta > BA_TIMEOUT_CNT) {
  370. IWL_DEBUG_RADIO(priv, "deltas: actual %d expected %d ba_timeout %d\n",
  371. actual_delta, expected_delta, ba_timeout_delta);
  372. #ifdef CONFIG_IWLWIFI_DEBUGFS
  373. /*
  374. * This is ifdef'ed on DEBUGFS because otherwise the
  375. * statistics aren't available. If DEBUGFS is set but
  376. * DEBUG is not, these will just compile out.
  377. */
  378. IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n",
  379. priv->delta_stats.tx.rx_detected_cnt);
  380. IWL_DEBUG_RADIO(priv,
  381. "ack_or_ba_timeout_collision delta %d\n",
  382. priv->delta_stats.tx.ack_or_ba_timeout_collision);
  383. #endif
  384. if (ba_timeout_delta >= BA_TIMEOUT_MAX)
  385. return false;
  386. }
  387. return true;
  388. }
  389. /**
  390. * iwl_good_plcp_health - checks for plcp error.
  391. *
  392. * When the plcp error is exceeding the thresholds, reset the radio
  393. * to improve the throughput.
  394. */
  395. static bool iwl_good_plcp_health(struct iwl_priv *priv,
  396. struct statistics_rx_phy *cur_ofdm,
  397. struct statistics_rx_ht_phy *cur_ofdm_ht,
  398. unsigned int msecs)
  399. {
  400. int delta;
  401. int threshold = priv->cfg->base_params->plcp_delta_threshold;
  402. if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
  403. IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
  404. return true;
  405. }
  406. delta = le32_to_cpu(cur_ofdm->plcp_err) -
  407. le32_to_cpu(priv->statistics.rx_ofdm.plcp_err) +
  408. le32_to_cpu(cur_ofdm_ht->plcp_err) -
  409. le32_to_cpu(priv->statistics.rx_ofdm_ht.plcp_err);
  410. /* Can be negative if firmware reset statistics */
  411. if (delta <= 0)
  412. return true;
  413. if ((delta * 100 / msecs) > threshold) {
  414. IWL_DEBUG_RADIO(priv,
  415. "plcp health threshold %u delta %d msecs %u\n",
  416. threshold, delta, msecs);
  417. return false;
  418. }
  419. return true;
  420. }
  421. static void iwl_recover_from_statistics(struct iwl_priv *priv,
  422. struct statistics_rx_phy *cur_ofdm,
  423. struct statistics_rx_ht_phy *cur_ofdm_ht,
  424. struct statistics_tx *tx,
  425. unsigned long stamp)
  426. {
  427. const struct iwl_mod_params *mod_params = priv->cfg->mod_params;
  428. unsigned int msecs;
  429. if (test_bit(STATUS_EXIT_PENDING, &priv->status))
  430. return;
  431. msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies);
  432. /* Only gather statistics and update time stamp when not associated */
  433. if (!iwl_is_any_associated(priv))
  434. return;
  435. /* Do not check/recover when do not have enough statistics data */
  436. if (msecs < 99)
  437. return;
  438. if (mod_params->ack_check && !iwl_good_ack_health(priv, tx)) {
  439. IWL_ERR(priv, "low ack count detected, restart firmware\n");
  440. if (!iwl_force_reset(priv, IWL_FW_RESET, false))
  441. return;
  442. }
  443. if (mod_params->plcp_check &&
  444. !iwl_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
  445. iwl_force_reset(priv, IWL_RF_RESET, false);
  446. }
  447. /* Calculate noise level, based on measurements during network silence just
  448. * before arriving beacon. This measurement can be done only if we know
  449. * exactly when to expect beacons, therefore only when we're associated. */
  450. static void iwl_rx_calc_noise(struct iwl_priv *priv)
  451. {
  452. struct statistics_rx_non_phy *rx_info;
  453. int num_active_rx = 0;
  454. int total_silence = 0;
  455. int bcn_silence_a, bcn_silence_b, bcn_silence_c;
  456. int last_rx_noise;
  457. rx_info = &priv->statistics.rx_non_phy;
  458. bcn_silence_a =
  459. le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
  460. bcn_silence_b =
  461. le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
  462. bcn_silence_c =
  463. le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
  464. if (bcn_silence_a) {
  465. total_silence += bcn_silence_a;
  466. num_active_rx++;
  467. }
  468. if (bcn_silence_b) {
  469. total_silence += bcn_silence_b;
  470. num_active_rx++;
  471. }
  472. if (bcn_silence_c) {
  473. total_silence += bcn_silence_c;
  474. num_active_rx++;
  475. }
  476. /* Average among active antennas */
  477. if (num_active_rx)
  478. last_rx_noise = (total_silence / num_active_rx) - 107;
  479. else
  480. last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
  481. IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
  482. bcn_silence_a, bcn_silence_b, bcn_silence_c,
  483. last_rx_noise);
  484. }
  485. #ifdef CONFIG_IWLWIFI_DEBUGFS
  486. /*
  487. * based on the assumption of all statistics counter are in DWORD
  488. * FIXME: This function is for debugging, do not deal with
  489. * the case of counters roll-over.
  490. */
  491. static void accum_stats(__le32 *prev, __le32 *cur, __le32 *delta,
  492. __le32 *max_delta, __le32 *accum, int size)
  493. {
  494. int i;
  495. for (i = 0;
  496. i < size / sizeof(__le32);
  497. i++, prev++, cur++, delta++, max_delta++, accum++) {
  498. if (le32_to_cpu(*cur) > le32_to_cpu(*prev)) {
  499. *delta = cpu_to_le32(
  500. le32_to_cpu(*cur) - le32_to_cpu(*prev));
  501. le32_add_cpu(accum, le32_to_cpu(*delta));
  502. if (le32_to_cpu(*delta) > le32_to_cpu(*max_delta))
  503. *max_delta = *delta;
  504. }
  505. }
  506. }
  507. static void
  508. iwl_accumulative_statistics(struct iwl_priv *priv,
  509. struct statistics_general_common *common,
  510. struct statistics_rx_non_phy *rx_non_phy,
  511. struct statistics_rx_phy *rx_ofdm,
  512. struct statistics_rx_ht_phy *rx_ofdm_ht,
  513. struct statistics_rx_phy *rx_cck,
  514. struct statistics_tx *tx,
  515. struct statistics_bt_activity *bt_activity)
  516. {
  517. #define ACCUM(_name) \
  518. accum_stats((__le32 *)&priv->statistics._name, \
  519. (__le32 *)_name, \
  520. (__le32 *)&priv->delta_stats._name, \
  521. (__le32 *)&priv->max_delta_stats._name, \
  522. (__le32 *)&priv->accum_stats._name, \
  523. sizeof(*_name));
  524. ACCUM(common);
  525. ACCUM(rx_non_phy);
  526. ACCUM(rx_ofdm);
  527. ACCUM(rx_ofdm_ht);
  528. ACCUM(rx_cck);
  529. ACCUM(tx);
  530. if (bt_activity)
  531. ACCUM(bt_activity);
  532. #undef ACCUM
  533. }
  534. #else
  535. static inline void
  536. iwl_accumulative_statistics(struct iwl_priv *priv,
  537. struct statistics_general_common *common,
  538. struct statistics_rx_non_phy *rx_non_phy,
  539. struct statistics_rx_phy *rx_ofdm,
  540. struct statistics_rx_ht_phy *rx_ofdm_ht,
  541. struct statistics_rx_phy *rx_cck,
  542. struct statistics_tx *tx,
  543. struct statistics_bt_activity *bt_activity)
  544. {
  545. }
  546. #endif
  547. static void iwl_rx_statistics(struct iwl_priv *priv,
  548. struct iwl_rx_mem_buffer *rxb)
  549. {
  550. unsigned long stamp = jiffies;
  551. const int reg_recalib_period = 60;
  552. int change;
  553. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  554. u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
  555. __le32 *flag;
  556. struct statistics_general_common *common;
  557. struct statistics_rx_non_phy *rx_non_phy;
  558. struct statistics_rx_phy *rx_ofdm;
  559. struct statistics_rx_ht_phy *rx_ofdm_ht;
  560. struct statistics_rx_phy *rx_cck;
  561. struct statistics_tx *tx;
  562. struct statistics_bt_activity *bt_activity;
  563. len -= sizeof(struct iwl_cmd_header); /* skip header */
  564. IWL_DEBUG_RX(priv, "Statistics notification received (%d bytes).\n",
  565. len);
  566. if (len == sizeof(struct iwl_bt_notif_statistics)) {
  567. struct iwl_bt_notif_statistics *stats;
  568. stats = &pkt->u.stats_bt;
  569. flag = &stats->flag;
  570. common = &stats->general.common;
  571. rx_non_phy = &stats->rx.general.common;
  572. rx_ofdm = &stats->rx.ofdm;
  573. rx_ofdm_ht = &stats->rx.ofdm_ht;
  574. rx_cck = &stats->rx.cck;
  575. tx = &stats->tx;
  576. bt_activity = &stats->general.activity;
  577. #ifdef CONFIG_IWLWIFI_DEBUGFS
  578. /* handle this exception directly */
  579. priv->statistics.num_bt_kills = stats->rx.general.num_bt_kills;
  580. le32_add_cpu(&priv->statistics.accum_num_bt_kills,
  581. le32_to_cpu(stats->rx.general.num_bt_kills));
  582. #endif
  583. } else if (len == sizeof(struct iwl_notif_statistics)) {
  584. struct iwl_notif_statistics *stats;
  585. stats = &pkt->u.stats;
  586. flag = &stats->flag;
  587. common = &stats->general.common;
  588. rx_non_phy = &stats->rx.general;
  589. rx_ofdm = &stats->rx.ofdm;
  590. rx_ofdm_ht = &stats->rx.ofdm_ht;
  591. rx_cck = &stats->rx.cck;
  592. tx = &stats->tx;
  593. bt_activity = NULL;
  594. } else {
  595. WARN_ONCE(1, "len %d doesn't match BT (%zu) or normal (%zu)\n",
  596. len, sizeof(struct iwl_bt_notif_statistics),
  597. sizeof(struct iwl_notif_statistics));
  598. return;
  599. }
  600. change = common->temperature != priv->statistics.common.temperature ||
  601. (*flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
  602. (priv->statistics.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK);
  603. iwl_accumulative_statistics(priv, common, rx_non_phy, rx_ofdm,
  604. rx_ofdm_ht, rx_cck, tx, bt_activity);
  605. iwl_recover_from_statistics(priv, rx_ofdm, rx_ofdm_ht, tx, stamp);
  606. priv->statistics.flag = *flag;
  607. memcpy(&priv->statistics.common, common, sizeof(*common));
  608. memcpy(&priv->statistics.rx_non_phy, rx_non_phy, sizeof(*rx_non_phy));
  609. memcpy(&priv->statistics.rx_ofdm, rx_ofdm, sizeof(*rx_ofdm));
  610. memcpy(&priv->statistics.rx_ofdm_ht, rx_ofdm_ht, sizeof(*rx_ofdm_ht));
  611. memcpy(&priv->statistics.rx_cck, rx_cck, sizeof(*rx_cck));
  612. memcpy(&priv->statistics.tx, tx, sizeof(*tx));
  613. #ifdef CONFIG_IWLWIFI_DEBUGFS
  614. if (bt_activity)
  615. memcpy(&priv->statistics.bt_activity, bt_activity,
  616. sizeof(*bt_activity));
  617. #endif
  618. priv->rx_statistics_jiffies = stamp;
  619. set_bit(STATUS_STATISTICS, &priv->status);
  620. /* Reschedule the statistics timer to occur in
  621. * reg_recalib_period seconds to ensure we get a
  622. * thermal update even if the uCode doesn't give
  623. * us one */
  624. mod_timer(&priv->statistics_periodic, jiffies +
  625. msecs_to_jiffies(reg_recalib_period * 1000));
  626. if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
  627. (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
  628. iwl_rx_calc_noise(priv);
  629. queue_work(priv->workqueue, &priv->run_time_calib_work);
  630. }
  631. if (priv->cfg->ops->lib->temp_ops.temperature && change)
  632. priv->cfg->ops->lib->temp_ops.temperature(priv);
  633. }
  634. static void iwl_rx_reply_statistics(struct iwl_priv *priv,
  635. struct iwl_rx_mem_buffer *rxb)
  636. {
  637. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  638. if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
  639. #ifdef CONFIG_IWLWIFI_DEBUGFS
  640. memset(&priv->accum_stats, 0,
  641. sizeof(priv->accum_stats));
  642. memset(&priv->delta_stats, 0,
  643. sizeof(priv->delta_stats));
  644. memset(&priv->max_delta_stats, 0,
  645. sizeof(priv->max_delta_stats));
  646. #endif
  647. IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
  648. }
  649. iwl_rx_statistics(priv, rxb);
  650. }
  651. /* Handle notification from uCode that card's power state is changing
  652. * due to software, hardware, or critical temperature RFKILL */
  653. static void iwl_rx_card_state_notif(struct iwl_priv *priv,
  654. struct iwl_rx_mem_buffer *rxb)
  655. {
  656. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  657. u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
  658. unsigned long status = priv->status;
  659. IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
  660. (flags & HW_CARD_DISABLED) ? "Kill" : "On",
  661. (flags & SW_CARD_DISABLED) ? "Kill" : "On",
  662. (flags & CT_CARD_DISABLED) ?
  663. "Reached" : "Not reached");
  664. if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
  665. CT_CARD_DISABLED)) {
  666. iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
  667. CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
  668. iwl_write_direct32(priv, HBUS_TARG_MBX_C,
  669. HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
  670. if (!(flags & RXON_CARD_DISABLED)) {
  671. iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
  672. CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
  673. iwl_write_direct32(priv, HBUS_TARG_MBX_C,
  674. HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
  675. }
  676. if (flags & CT_CARD_DISABLED)
  677. iwl_tt_enter_ct_kill(priv);
  678. }
  679. if (!(flags & CT_CARD_DISABLED))
  680. iwl_tt_exit_ct_kill(priv);
  681. if (flags & HW_CARD_DISABLED)
  682. set_bit(STATUS_RF_KILL_HW, &priv->status);
  683. else
  684. clear_bit(STATUS_RF_KILL_HW, &priv->status);
  685. if (!(flags & RXON_CARD_DISABLED))
  686. iwl_scan_cancel(priv);
  687. if ((test_bit(STATUS_RF_KILL_HW, &status) !=
  688. test_bit(STATUS_RF_KILL_HW, &priv->status)))
  689. wiphy_rfkill_set_hw_state(priv->hw->wiphy,
  690. test_bit(STATUS_RF_KILL_HW, &priv->status));
  691. else
  692. wake_up_interruptible(&priv->wait_command_queue);
  693. }
  694. static void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
  695. struct iwl_rx_mem_buffer *rxb)
  696. {
  697. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  698. struct iwl_missed_beacon_notif *missed_beacon;
  699. missed_beacon = &pkt->u.missed_beacon;
  700. if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
  701. priv->missed_beacon_threshold) {
  702. IWL_DEBUG_CALIB(priv,
  703. "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
  704. le32_to_cpu(missed_beacon->consecutive_missed_beacons),
  705. le32_to_cpu(missed_beacon->total_missed_becons),
  706. le32_to_cpu(missed_beacon->num_recvd_beacons),
  707. le32_to_cpu(missed_beacon->num_expected_beacons));
  708. if (!test_bit(STATUS_SCANNING, &priv->status))
  709. iwl_init_sensitivity(priv);
  710. }
  711. }
  712. /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
  713. * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
  714. static void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
  715. struct iwl_rx_mem_buffer *rxb)
  716. {
  717. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  718. priv->_agn.last_phy_res_valid = true;
  719. memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
  720. sizeof(struct iwl_rx_phy_res));
  721. }
  722. /*
  723. * returns non-zero if packet should be dropped
  724. */
  725. static int iwl_set_decrypted_flag(struct iwl_priv *priv,
  726. struct ieee80211_hdr *hdr,
  727. u32 decrypt_res,
  728. struct ieee80211_rx_status *stats)
  729. {
  730. u16 fc = le16_to_cpu(hdr->frame_control);
  731. /*
  732. * All contexts have the same setting here due to it being
  733. * a module parameter, so OK to check any context.
  734. */
  735. if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
  736. RXON_FILTER_DIS_DECRYPT_MSK)
  737. return 0;
  738. if (!(fc & IEEE80211_FCTL_PROTECTED))
  739. return 0;
  740. IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
  741. switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
  742. case RX_RES_STATUS_SEC_TYPE_TKIP:
  743. /* The uCode has got a bad phase 1 Key, pushes the packet.
  744. * Decryption will be done in SW. */
  745. if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
  746. RX_RES_STATUS_BAD_KEY_TTAK)
  747. break;
  748. case RX_RES_STATUS_SEC_TYPE_WEP:
  749. if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
  750. RX_RES_STATUS_BAD_ICV_MIC) {
  751. /* bad ICV, the packet is destroyed since the
  752. * decryption is inplace, drop it */
  753. IWL_DEBUG_RX(priv, "Packet destroyed\n");
  754. return -1;
  755. }
  756. case RX_RES_STATUS_SEC_TYPE_CCMP:
  757. if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
  758. RX_RES_STATUS_DECRYPT_OK) {
  759. IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
  760. stats->flag |= RX_FLAG_DECRYPTED;
  761. }
  762. break;
  763. default:
  764. break;
  765. }
  766. return 0;
  767. }
  768. static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
  769. struct ieee80211_hdr *hdr,
  770. u16 len,
  771. u32 ampdu_status,
  772. struct iwl_rx_mem_buffer *rxb,
  773. struct ieee80211_rx_status *stats)
  774. {
  775. struct sk_buff *skb;
  776. __le16 fc = hdr->frame_control;
  777. struct iwl_rxon_context *ctx;
  778. /* We only process data packets if the interface is open */
  779. if (unlikely(!priv->is_open)) {
  780. IWL_DEBUG_DROP_LIMIT(priv,
  781. "Dropping packet while interface is not open.\n");
  782. return;
  783. }
  784. /* In case of HW accelerated crypto and bad decryption, drop */
  785. if (!priv->cfg->mod_params->sw_crypto &&
  786. iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
  787. return;
  788. skb = dev_alloc_skb(128);
  789. if (!skb) {
  790. IWL_ERR(priv, "dev_alloc_skb failed\n");
  791. return;
  792. }
  793. skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
  794. iwl_update_stats(priv, false, fc, len);
  795. /*
  796. * Wake any queues that were stopped due to a passive channel tx
  797. * failure. This can happen because the regulatory enforcement in
  798. * the device waits for a beacon before allowing transmission,
  799. * sometimes even after already having transmitted frames for the
  800. * association because the new RXON may reset the information.
  801. */
  802. if (unlikely(ieee80211_is_beacon(fc))) {
  803. for_each_context(priv, ctx) {
  804. if (!ctx->last_tx_rejected)
  805. continue;
  806. if (compare_ether_addr(hdr->addr3,
  807. ctx->active.bssid_addr))
  808. continue;
  809. ctx->last_tx_rejected = false;
  810. iwl_wake_any_queue(priv, ctx);
  811. }
  812. }
  813. memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
  814. ieee80211_rx(priv->hw, skb);
  815. rxb->page = NULL;
  816. }
  817. static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
  818. {
  819. u32 decrypt_out = 0;
  820. if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
  821. RX_RES_STATUS_STATION_FOUND)
  822. decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
  823. RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
  824. decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
  825. /* packet was not encrypted */
  826. if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
  827. RX_RES_STATUS_SEC_TYPE_NONE)
  828. return decrypt_out;
  829. /* packet was encrypted with unknown alg */
  830. if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
  831. RX_RES_STATUS_SEC_TYPE_ERR)
  832. return decrypt_out;
  833. /* decryption was not done in HW */
  834. if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
  835. RX_MPDU_RES_STATUS_DEC_DONE_MSK)
  836. return decrypt_out;
  837. switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
  838. case RX_RES_STATUS_SEC_TYPE_CCMP:
  839. /* alg is CCM: check MIC only */
  840. if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
  841. /* Bad MIC */
  842. decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
  843. else
  844. decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
  845. break;
  846. case RX_RES_STATUS_SEC_TYPE_TKIP:
  847. if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
  848. /* Bad TTAK */
  849. decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
  850. break;
  851. }
  852. /* fall through if TTAK OK */
  853. default:
  854. if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
  855. decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
  856. else
  857. decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
  858. break;
  859. }
  860. IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
  861. decrypt_in, decrypt_out);
  862. return decrypt_out;
  863. }
  864. /* Called for REPLY_RX (legacy ABG frames), or
  865. * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
  866. static void iwl_rx_reply_rx(struct iwl_priv *priv,
  867. struct iwl_rx_mem_buffer *rxb)
  868. {
  869. struct ieee80211_hdr *header;
  870. struct ieee80211_rx_status rx_status;
  871. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  872. struct iwl_rx_phy_res *phy_res;
  873. __le32 rx_pkt_status;
  874. struct iwl_rx_mpdu_res_start *amsdu;
  875. u32 len;
  876. u32 ampdu_status;
  877. u32 rate_n_flags;
  878. /**
  879. * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
  880. * REPLY_RX: physical layer info is in this buffer
  881. * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
  882. * command and cached in priv->last_phy_res
  883. *
  884. * Here we set up local variables depending on which command is
  885. * received.
  886. */
  887. if (pkt->hdr.cmd == REPLY_RX) {
  888. phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
  889. header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
  890. + phy_res->cfg_phy_cnt);
  891. len = le16_to_cpu(phy_res->byte_count);
  892. rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
  893. phy_res->cfg_phy_cnt + len);
  894. ampdu_status = le32_to_cpu(rx_pkt_status);
  895. } else {
  896. if (!priv->_agn.last_phy_res_valid) {
  897. IWL_ERR(priv, "MPDU frame without cached PHY data\n");
  898. return;
  899. }
  900. phy_res = &priv->_agn.last_phy_res;
  901. amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
  902. header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
  903. len = le16_to_cpu(amsdu->byte_count);
  904. rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
  905. ampdu_status = iwl_translate_rx_status(priv,
  906. le32_to_cpu(rx_pkt_status));
  907. }
  908. if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
  909. IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
  910. phy_res->cfg_phy_cnt);
  911. return;
  912. }
  913. if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
  914. !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
  915. IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
  916. le32_to_cpu(rx_pkt_status));
  917. return;
  918. }
  919. /* This will be used in several places later */
  920. rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
  921. /* rx_status carries information about the packet to mac80211 */
  922. rx_status.mactime = le64_to_cpu(phy_res->timestamp);
  923. rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
  924. IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
  925. rx_status.freq =
  926. ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
  927. rx_status.band);
  928. rx_status.rate_idx =
  929. iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
  930. rx_status.flag = 0;
  931. /* TSF isn't reliable. In order to allow smooth user experience,
  932. * this W/A doesn't propagate it to the mac80211 */
  933. /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
  934. priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
  935. /* Find max signal strength (dBm) among 3 antenna/receiver chains */
  936. rx_status.signal = priv->cfg->ops->utils->calc_rssi(priv, phy_res);
  937. iwl_dbg_log_rx_data_frame(priv, len, header);
  938. IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
  939. rx_status.signal, (unsigned long long)rx_status.mactime);
  940. /*
  941. * "antenna number"
  942. *
  943. * It seems that the antenna field in the phy flags value
  944. * is actually a bit field. This is undefined by radiotap,
  945. * it wants an actual antenna number but I always get "7"
  946. * for most legacy frames I receive indicating that the
  947. * same frame was received on all three RX chains.
  948. *
  949. * I think this field should be removed in favor of a
  950. * new 802.11n radiotap field "RX chains" that is defined
  951. * as a bitmask.
  952. */
  953. rx_status.antenna =
  954. (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
  955. >> RX_RES_PHY_FLAGS_ANTENNA_POS;
  956. /* set the preamble flag if appropriate */
  957. if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
  958. rx_status.flag |= RX_FLAG_SHORTPRE;
  959. /* Set up the HT phy flags */
  960. if (rate_n_flags & RATE_MCS_HT_MSK)
  961. rx_status.flag |= RX_FLAG_HT;
  962. if (rate_n_flags & RATE_MCS_HT40_MSK)
  963. rx_status.flag |= RX_FLAG_40MHZ;
  964. if (rate_n_flags & RATE_MCS_SGI_MSK)
  965. rx_status.flag |= RX_FLAG_SHORT_GI;
  966. iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
  967. rxb, &rx_status);
  968. }
  969. /**
  970. * iwl_setup_rx_handlers - Initialize Rx handler callbacks
  971. *
  972. * Setup the RX handlers for each of the reply types sent from the uCode
  973. * to the host.
  974. */
  975. void iwl_setup_rx_handlers(struct iwl_priv *priv)
  976. {
  977. void (**handlers)(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
  978. handlers = priv->rx_handlers;
  979. handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
  980. handlers[REPLY_ERROR] = iwl_rx_reply_error;
  981. handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
  982. handlers[SPECTRUM_MEASURE_NOTIFICATION] = iwl_rx_spectrum_measure_notif;
  983. handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
  984. handlers[PM_DEBUG_STATISTIC_NOTIFIC] = iwl_rx_pm_debug_statistics_notif;
  985. handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
  986. /*
  987. * The same handler is used for both the REPLY to a discrete
  988. * statistics request from the host as well as for the periodic
  989. * statistics notifications (after received beacons) from the uCode.
  990. */
  991. handlers[REPLY_STATISTICS_CMD] = iwl_rx_reply_statistics;
  992. handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
  993. iwl_setup_rx_scan_handlers(priv);
  994. handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
  995. handlers[MISSED_BEACONS_NOTIFICATION] = iwl_rx_missed_beacon_notif;
  996. /* Rx handlers */
  997. handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy;
  998. handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx;
  999. /* block ack */
  1000. handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
  1001. /* Set up hardware specific Rx handlers */
  1002. priv->cfg->ops->lib->rx_handler_setup(priv);
  1003. }