iwl-rx.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
  4. *
  5. * Portions of this file are derived from the ipw3945 project, as well
  6. * as portions of the ieee80211 subsystem header files.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of version 2 of the GNU General Public License as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20. *
  21. * The full GNU General Public License is included in this distribution in the
  22. * file called LICENSE.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/etherdevice.h>
  30. #include <linux/slab.h>
  31. #include <linux/sched.h>
  32. #include <net/mac80211.h>
  33. #include <asm/unaligned.h>
  34. #include "iwl-eeprom.h"
  35. #include "iwl-dev.h"
  36. #include "iwl-core.h"
  37. #include "iwl-sta.h"
  38. #include "iwl-io.h"
  39. #include "iwl-helpers.h"
  40. #include "iwl-agn-calib.h"
  41. #include "iwl-agn.h"
  42. /******************************************************************************
  43. *
  44. * RX path functions
  45. *
  46. ******************************************************************************/
  47. /*
  48. * Rx theory of operation
  49. *
  50. * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
  51. * each of which point to Receive Buffers to be filled by the NIC. These get
  52. * used not only for Rx frames, but for any command response or notification
  53. * from the NIC. The driver and NIC manage the Rx buffers by means
  54. * of indexes into the circular buffer.
  55. *
  56. * Rx Queue Indexes
  57. * The host/firmware share two index registers for managing the Rx buffers.
  58. *
  59. * The READ index maps to the first position that the firmware may be writing
  60. * to -- the driver can read up to (but not including) this position and get
  61. * good data.
  62. * The READ index is managed by the firmware once the card is enabled.
  63. *
  64. * The WRITE index maps to the last position the driver has read from -- the
  65. * position preceding WRITE is the last slot the firmware can place a packet.
  66. *
  67. * The queue is empty (no good data) if WRITE = READ - 1, and is full if
  68. * WRITE = READ.
  69. *
  70. * During initialization, the host sets up the READ queue position to the first
  71. * INDEX position, and WRITE to the last (READ - 1 wrapped)
  72. *
  73. * When the firmware places a packet in a buffer, it will advance the READ index
  74. * and fire the RX interrupt. The driver can then query the READ index and
  75. * process as many packets as possible, moving the WRITE index forward as it
  76. * resets the Rx queue buffers with new memory.
  77. *
  78. * The management in the driver is as follows:
  79. * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
  80. * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
  81. * to replenish the iwl->rxq->rx_free.
  82. * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
  83. * iwl->rxq is replenished and the READ INDEX is updated (updating the
  84. * 'processed' and 'read' driver indexes as well)
  85. * + A received packet is processed and handed to the kernel network stack,
  86. * detached from the iwl->rxq. The driver 'processed' index is updated.
  87. * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
  88. * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
  89. * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
  90. * were enough free buffers and RX_STALLED is set it is cleared.
  91. *
  92. *
  93. * Driver sequence:
  94. *
  95. * iwl_rx_queue_alloc() Allocates rx_free
  96. * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
  97. * iwl_rx_queue_restock
  98. * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
  99. * queue, updates firmware pointers, and updates
  100. * the WRITE index. If insufficient rx_free buffers
  101. * are available, schedules iwl_rx_replenish
  102. *
  103. * -- enable interrupts --
  104. * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
  105. * READ INDEX, detaching the SKB from the pool.
  106. * Moves the packet buffer from queue to rx_used.
  107. * Calls iwl_rx_queue_restock to refill any empty
  108. * slots.
  109. * ...
  110. *
  111. */
  112. /**
  113. * iwl_rx_queue_space - Return number of free slots available in queue.
  114. */
  115. int iwl_rx_queue_space(const struct iwl_rx_queue *q)
  116. {
  117. int s = q->read - q->write;
  118. if (s <= 0)
  119. s += RX_QUEUE_SIZE;
  120. /* keep some buffer to not confuse full and empty queue */
  121. s -= 2;
  122. if (s < 0)
  123. s = 0;
  124. return s;
  125. }
  126. /**
  127. * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
  128. */
  129. void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
  130. {
  131. unsigned long flags;
  132. u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
  133. u32 reg;
  134. spin_lock_irqsave(&q->lock, flags);
  135. if (q->need_update == 0)
  136. goto exit_unlock;
  137. if (priv->cfg->base_params->shadow_reg_enable) {
  138. /* shadow register enabled */
  139. /* Device expects a multiple of 8 */
  140. q->write_actual = (q->write & ~0x7);
  141. iwl_write32(priv, rx_wrt_ptr_reg, q->write_actual);
  142. } else {
  143. /* If power-saving is in use, make sure device is awake */
  144. if (test_bit(STATUS_POWER_PMI, &priv->status)) {
  145. reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
  146. if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
  147. IWL_DEBUG_INFO(priv,
  148. "Rx queue requesting wakeup,"
  149. " GP1 = 0x%x\n", reg);
  150. iwl_set_bit(priv, CSR_GP_CNTRL,
  151. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  152. goto exit_unlock;
  153. }
  154. q->write_actual = (q->write & ~0x7);
  155. iwl_write_direct32(priv, rx_wrt_ptr_reg,
  156. q->write_actual);
  157. /* Else device is assumed to be awake */
  158. } else {
  159. /* Device expects a multiple of 8 */
  160. q->write_actual = (q->write & ~0x7);
  161. iwl_write_direct32(priv, rx_wrt_ptr_reg,
  162. q->write_actual);
  163. }
  164. }
  165. q->need_update = 0;
  166. exit_unlock:
  167. spin_unlock_irqrestore(&q->lock, flags);
  168. }
  169. int iwl_rx_queue_alloc(struct iwl_priv *priv)
  170. {
  171. struct iwl_rx_queue *rxq = &priv->rxq;
  172. struct device *dev = &priv->pci_dev->dev;
  173. int i;
  174. spin_lock_init(&rxq->lock);
  175. INIT_LIST_HEAD(&rxq->rx_free);
  176. INIT_LIST_HEAD(&rxq->rx_used);
  177. /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
  178. rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
  179. GFP_KERNEL);
  180. if (!rxq->bd)
  181. goto err_bd;
  182. rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
  183. &rxq->rb_stts_dma, GFP_KERNEL);
  184. if (!rxq->rb_stts)
  185. goto err_rb;
  186. /* Fill the rx_used queue with _all_ of the Rx buffers */
  187. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
  188. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  189. /* Set us so that we have processed and used all buffers, but have
  190. * not restocked the Rx queue with fresh buffers */
  191. rxq->read = rxq->write = 0;
  192. rxq->write_actual = 0;
  193. rxq->free_count = 0;
  194. rxq->need_update = 0;
  195. return 0;
  196. err_rb:
  197. dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
  198. rxq->bd_dma);
  199. err_bd:
  200. return -ENOMEM;
  201. }
  202. /******************************************************************************
  203. *
  204. * Generic RX handler implementations
  205. *
  206. ******************************************************************************/
  207. static void iwl_rx_reply_error(struct iwl_priv *priv,
  208. struct iwl_rx_mem_buffer *rxb)
  209. {
  210. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  211. IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
  212. "seq 0x%04X ser 0x%08X\n",
  213. le32_to_cpu(pkt->u.err_resp.error_type),
  214. get_cmd_string(pkt->u.err_resp.cmd_id),
  215. pkt->u.err_resp.cmd_id,
  216. le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
  217. le32_to_cpu(pkt->u.err_resp.error_info));
  218. }
  219. static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
  220. {
  221. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  222. struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
  223. /*
  224. * MULTI-FIXME
  225. * See iwl_mac_channel_switch.
  226. */
  227. struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
  228. struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
  229. if (priv->switch_rxon.switch_in_progress) {
  230. if (!le32_to_cpu(csa->status) &&
  231. (csa->channel == priv->switch_rxon.channel)) {
  232. rxon->channel = csa->channel;
  233. ctx->staging.channel = csa->channel;
  234. IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
  235. le16_to_cpu(csa->channel));
  236. iwl_chswitch_done(priv, true);
  237. } else {
  238. IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
  239. le16_to_cpu(csa->channel));
  240. iwl_chswitch_done(priv, false);
  241. }
  242. }
  243. }
  244. static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
  245. struct iwl_rx_mem_buffer *rxb)
  246. {
  247. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  248. struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
  249. if (!report->state) {
  250. IWL_DEBUG_11H(priv,
  251. "Spectrum Measure Notification: Start\n");
  252. return;
  253. }
  254. memcpy(&priv->measure_report, report, sizeof(*report));
  255. priv->measurement_status |= MEASUREMENT_READY;
  256. }
  257. static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
  258. struct iwl_rx_mem_buffer *rxb)
  259. {
  260. #ifdef CONFIG_IWLWIFI_DEBUG
  261. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  262. struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
  263. IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
  264. sleep->pm_sleep_mode, sleep->pm_wakeup_src);
  265. #endif
  266. }
  267. static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
  268. struct iwl_rx_mem_buffer *rxb)
  269. {
  270. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  271. u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
  272. IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
  273. "notification for %s:\n", len,
  274. get_cmd_string(pkt->hdr.cmd));
  275. iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
  276. }
  277. static void iwl_rx_beacon_notif(struct iwl_priv *priv,
  278. struct iwl_rx_mem_buffer *rxb)
  279. {
  280. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  281. struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
  282. #ifdef CONFIG_IWLWIFI_DEBUG
  283. u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
  284. u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
  285. IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
  286. "tsf:0x%.8x%.8x rate:%d\n",
  287. status & TX_STATUS_MSK,
  288. beacon->beacon_notify_hdr.failure_frame,
  289. le32_to_cpu(beacon->ibss_mgr_status),
  290. le32_to_cpu(beacon->high_tsf),
  291. le32_to_cpu(beacon->low_tsf), rate);
  292. #endif
  293. priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
  294. if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
  295. queue_work(priv->workqueue, &priv->beacon_update);
  296. }
  297. /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
  298. #define ACK_CNT_RATIO (50)
  299. #define BA_TIMEOUT_CNT (5)
  300. #define BA_TIMEOUT_MAX (16)
  301. /**
  302. * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
  303. *
  304. * When the ACK count ratio is low and aggregated BA timeout retries exceeding
  305. * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
  306. * operation state.
  307. */
  308. static bool iwl_good_ack_health(struct iwl_priv *priv,
  309. struct statistics_tx *cur)
  310. {
  311. int actual_delta, expected_delta, ba_timeout_delta;
  312. struct statistics_tx *old;
  313. if (priv->_agn.agg_tids_count)
  314. return true;
  315. old = &priv->statistics.tx;
  316. actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
  317. le32_to_cpu(old->actual_ack_cnt);
  318. expected_delta = le32_to_cpu(cur->expected_ack_cnt) -
  319. le32_to_cpu(old->expected_ack_cnt);
  320. /* Values should not be negative, but we do not trust the firmware */
  321. if (actual_delta <= 0 || expected_delta <= 0)
  322. return true;
  323. ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) -
  324. le32_to_cpu(old->agg.ba_timeout);
  325. if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO &&
  326. ba_timeout_delta > BA_TIMEOUT_CNT) {
  327. IWL_DEBUG_RADIO(priv, "deltas: actual %d expected %d ba_timeout %d\n",
  328. actual_delta, expected_delta, ba_timeout_delta);
  329. #ifdef CONFIG_IWLWIFI_DEBUGFS
  330. /*
  331. * This is ifdef'ed on DEBUGFS because otherwise the
  332. * statistics aren't available. If DEBUGFS is set but
  333. * DEBUG is not, these will just compile out.
  334. */
  335. IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n",
  336. priv->delta_stats.tx.rx_detected_cnt);
  337. IWL_DEBUG_RADIO(priv,
  338. "ack_or_ba_timeout_collision delta %d\n",
  339. priv->delta_stats.tx.ack_or_ba_timeout_collision);
  340. #endif
  341. if (ba_timeout_delta >= BA_TIMEOUT_MAX)
  342. return false;
  343. }
  344. return true;
  345. }
  346. /**
  347. * iwl_good_plcp_health - checks for plcp error.
  348. *
  349. * When the plcp error is exceeding the thresholds, reset the radio
  350. * to improve the throughput.
  351. */
  352. static bool iwl_good_plcp_health(struct iwl_priv *priv,
  353. struct statistics_rx_phy *cur_ofdm,
  354. struct statistics_rx_ht_phy *cur_ofdm_ht,
  355. unsigned int msecs)
  356. {
  357. int delta;
  358. int threshold = priv->cfg->base_params->plcp_delta_threshold;
  359. if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
  360. IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
  361. return true;
  362. }
  363. delta = le32_to_cpu(cur_ofdm->plcp_err) -
  364. le32_to_cpu(priv->statistics.rx_ofdm.plcp_err) +
  365. le32_to_cpu(cur_ofdm_ht->plcp_err) -
  366. le32_to_cpu(priv->statistics.rx_ofdm_ht.plcp_err);
  367. /* Can be negative if firmware reset statistics */
  368. if (delta <= 0)
  369. return true;
  370. if ((delta * 100 / msecs) > threshold) {
  371. IWL_DEBUG_RADIO(priv,
  372. "plcp health threshold %u delta %d msecs %u\n",
  373. threshold, delta, msecs);
  374. return false;
  375. }
  376. return true;
  377. }
  378. static void iwl_recover_from_statistics(struct iwl_priv *priv,
  379. struct statistics_rx_phy *cur_ofdm,
  380. struct statistics_rx_ht_phy *cur_ofdm_ht,
  381. struct statistics_tx *tx,
  382. unsigned long stamp)
  383. {
  384. unsigned int msecs;
  385. if (test_bit(STATUS_EXIT_PENDING, &priv->status))
  386. return;
  387. msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies);
  388. /* Only gather statistics and update time stamp when not associated */
  389. if (!iwl_is_any_associated(priv))
  390. return;
  391. /* Do not check/recover when do not have enough statistics data */
  392. if (msecs < 99)
  393. return;
  394. if (iwlagn_mod_params.ack_check && !iwl_good_ack_health(priv, tx)) {
  395. IWL_ERR(priv, "low ack count detected, restart firmware\n");
  396. if (!iwl_force_reset(priv, IWL_FW_RESET, false))
  397. return;
  398. }
  399. if (iwlagn_mod_params.plcp_check &&
  400. !iwl_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
  401. iwl_force_reset(priv, IWL_RF_RESET, false);
  402. }
  403. /* Calculate noise level, based on measurements during network silence just
  404. * before arriving beacon. This measurement can be done only if we know
  405. * exactly when to expect beacons, therefore only when we're associated. */
  406. static void iwl_rx_calc_noise(struct iwl_priv *priv)
  407. {
  408. struct statistics_rx_non_phy *rx_info;
  409. int num_active_rx = 0;
  410. int total_silence = 0;
  411. int bcn_silence_a, bcn_silence_b, bcn_silence_c;
  412. int last_rx_noise;
  413. rx_info = &priv->statistics.rx_non_phy;
  414. bcn_silence_a =
  415. le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
  416. bcn_silence_b =
  417. le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
  418. bcn_silence_c =
  419. le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
  420. if (bcn_silence_a) {
  421. total_silence += bcn_silence_a;
  422. num_active_rx++;
  423. }
  424. if (bcn_silence_b) {
  425. total_silence += bcn_silence_b;
  426. num_active_rx++;
  427. }
  428. if (bcn_silence_c) {
  429. total_silence += bcn_silence_c;
  430. num_active_rx++;
  431. }
  432. /* Average among active antennas */
  433. if (num_active_rx)
  434. last_rx_noise = (total_silence / num_active_rx) - 107;
  435. else
  436. last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
  437. IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
  438. bcn_silence_a, bcn_silence_b, bcn_silence_c,
  439. last_rx_noise);
  440. }
  441. #ifdef CONFIG_IWLWIFI_DEBUGFS
  442. /*
  443. * based on the assumption of all statistics counter are in DWORD
  444. * FIXME: This function is for debugging, do not deal with
  445. * the case of counters roll-over.
  446. */
  447. static void accum_stats(__le32 *prev, __le32 *cur, __le32 *delta,
  448. __le32 *max_delta, __le32 *accum, int size)
  449. {
  450. int i;
  451. for (i = 0;
  452. i < size / sizeof(__le32);
  453. i++, prev++, cur++, delta++, max_delta++, accum++) {
  454. if (le32_to_cpu(*cur) > le32_to_cpu(*prev)) {
  455. *delta = cpu_to_le32(
  456. le32_to_cpu(*cur) - le32_to_cpu(*prev));
  457. le32_add_cpu(accum, le32_to_cpu(*delta));
  458. if (le32_to_cpu(*delta) > le32_to_cpu(*max_delta))
  459. *max_delta = *delta;
  460. }
  461. }
  462. }
  463. static void
  464. iwl_accumulative_statistics(struct iwl_priv *priv,
  465. struct statistics_general_common *common,
  466. struct statistics_rx_non_phy *rx_non_phy,
  467. struct statistics_rx_phy *rx_ofdm,
  468. struct statistics_rx_ht_phy *rx_ofdm_ht,
  469. struct statistics_rx_phy *rx_cck,
  470. struct statistics_tx *tx,
  471. struct statistics_bt_activity *bt_activity)
  472. {
  473. #define ACCUM(_name) \
  474. accum_stats((__le32 *)&priv->statistics._name, \
  475. (__le32 *)_name, \
  476. (__le32 *)&priv->delta_stats._name, \
  477. (__le32 *)&priv->max_delta_stats._name, \
  478. (__le32 *)&priv->accum_stats._name, \
  479. sizeof(*_name));
  480. ACCUM(common);
  481. ACCUM(rx_non_phy);
  482. ACCUM(rx_ofdm);
  483. ACCUM(rx_ofdm_ht);
  484. ACCUM(rx_cck);
  485. ACCUM(tx);
  486. if (bt_activity)
  487. ACCUM(bt_activity);
  488. #undef ACCUM
  489. }
  490. #else
  491. static inline void
  492. iwl_accumulative_statistics(struct iwl_priv *priv,
  493. struct statistics_general_common *common,
  494. struct statistics_rx_non_phy *rx_non_phy,
  495. struct statistics_rx_phy *rx_ofdm,
  496. struct statistics_rx_ht_phy *rx_ofdm_ht,
  497. struct statistics_rx_phy *rx_cck,
  498. struct statistics_tx *tx,
  499. struct statistics_bt_activity *bt_activity)
  500. {
  501. }
  502. #endif
  503. static void iwl_rx_statistics(struct iwl_priv *priv,
  504. struct iwl_rx_mem_buffer *rxb)
  505. {
  506. unsigned long stamp = jiffies;
  507. const int reg_recalib_period = 60;
  508. int change;
  509. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  510. u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
  511. __le32 *flag;
  512. struct statistics_general_common *common;
  513. struct statistics_rx_non_phy *rx_non_phy;
  514. struct statistics_rx_phy *rx_ofdm;
  515. struct statistics_rx_ht_phy *rx_ofdm_ht;
  516. struct statistics_rx_phy *rx_cck;
  517. struct statistics_tx *tx;
  518. struct statistics_bt_activity *bt_activity;
  519. len -= sizeof(struct iwl_cmd_header); /* skip header */
  520. IWL_DEBUG_RX(priv, "Statistics notification received (%d bytes).\n",
  521. len);
  522. if (len == sizeof(struct iwl_bt_notif_statistics)) {
  523. struct iwl_bt_notif_statistics *stats;
  524. stats = &pkt->u.stats_bt;
  525. flag = &stats->flag;
  526. common = &stats->general.common;
  527. rx_non_phy = &stats->rx.general.common;
  528. rx_ofdm = &stats->rx.ofdm;
  529. rx_ofdm_ht = &stats->rx.ofdm_ht;
  530. rx_cck = &stats->rx.cck;
  531. tx = &stats->tx;
  532. bt_activity = &stats->general.activity;
  533. #ifdef CONFIG_IWLWIFI_DEBUGFS
  534. /* handle this exception directly */
  535. priv->statistics.num_bt_kills = stats->rx.general.num_bt_kills;
  536. le32_add_cpu(&priv->statistics.accum_num_bt_kills,
  537. le32_to_cpu(stats->rx.general.num_bt_kills));
  538. #endif
  539. } else if (len == sizeof(struct iwl_notif_statistics)) {
  540. struct iwl_notif_statistics *stats;
  541. stats = &pkt->u.stats;
  542. flag = &stats->flag;
  543. common = &stats->general.common;
  544. rx_non_phy = &stats->rx.general;
  545. rx_ofdm = &stats->rx.ofdm;
  546. rx_ofdm_ht = &stats->rx.ofdm_ht;
  547. rx_cck = &stats->rx.cck;
  548. tx = &stats->tx;
  549. bt_activity = NULL;
  550. } else {
  551. WARN_ONCE(1, "len %d doesn't match BT (%zu) or normal (%zu)\n",
  552. len, sizeof(struct iwl_bt_notif_statistics),
  553. sizeof(struct iwl_notif_statistics));
  554. return;
  555. }
  556. change = common->temperature != priv->statistics.common.temperature ||
  557. (*flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
  558. (priv->statistics.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK);
  559. iwl_accumulative_statistics(priv, common, rx_non_phy, rx_ofdm,
  560. rx_ofdm_ht, rx_cck, tx, bt_activity);
  561. iwl_recover_from_statistics(priv, rx_ofdm, rx_ofdm_ht, tx, stamp);
  562. priv->statistics.flag = *flag;
  563. memcpy(&priv->statistics.common, common, sizeof(*common));
  564. memcpy(&priv->statistics.rx_non_phy, rx_non_phy, sizeof(*rx_non_phy));
  565. memcpy(&priv->statistics.rx_ofdm, rx_ofdm, sizeof(*rx_ofdm));
  566. memcpy(&priv->statistics.rx_ofdm_ht, rx_ofdm_ht, sizeof(*rx_ofdm_ht));
  567. memcpy(&priv->statistics.rx_cck, rx_cck, sizeof(*rx_cck));
  568. memcpy(&priv->statistics.tx, tx, sizeof(*tx));
  569. #ifdef CONFIG_IWLWIFI_DEBUGFS
  570. if (bt_activity)
  571. memcpy(&priv->statistics.bt_activity, bt_activity,
  572. sizeof(*bt_activity));
  573. #endif
  574. priv->rx_statistics_jiffies = stamp;
  575. set_bit(STATUS_STATISTICS, &priv->status);
  576. /* Reschedule the statistics timer to occur in
  577. * reg_recalib_period seconds to ensure we get a
  578. * thermal update even if the uCode doesn't give
  579. * us one */
  580. mod_timer(&priv->statistics_periodic, jiffies +
  581. msecs_to_jiffies(reg_recalib_period * 1000));
  582. if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
  583. (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
  584. iwl_rx_calc_noise(priv);
  585. queue_work(priv->workqueue, &priv->run_time_calib_work);
  586. }
  587. if (priv->cfg->ops->lib->temp_ops.temperature && change)
  588. priv->cfg->ops->lib->temp_ops.temperature(priv);
  589. }
  590. static void iwl_rx_reply_statistics(struct iwl_priv *priv,
  591. struct iwl_rx_mem_buffer *rxb)
  592. {
  593. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  594. if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
  595. #ifdef CONFIG_IWLWIFI_DEBUGFS
  596. memset(&priv->accum_stats, 0,
  597. sizeof(priv->accum_stats));
  598. memset(&priv->delta_stats, 0,
  599. sizeof(priv->delta_stats));
  600. memset(&priv->max_delta_stats, 0,
  601. sizeof(priv->max_delta_stats));
  602. #endif
  603. IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
  604. }
  605. iwl_rx_statistics(priv, rxb);
  606. }
  607. /* Handle notification from uCode that card's power state is changing
  608. * due to software, hardware, or critical temperature RFKILL */
  609. static void iwl_rx_card_state_notif(struct iwl_priv *priv,
  610. struct iwl_rx_mem_buffer *rxb)
  611. {
  612. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  613. u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
  614. unsigned long status = priv->status;
  615. IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
  616. (flags & HW_CARD_DISABLED) ? "Kill" : "On",
  617. (flags & SW_CARD_DISABLED) ? "Kill" : "On",
  618. (flags & CT_CARD_DISABLED) ?
  619. "Reached" : "Not reached");
  620. if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
  621. CT_CARD_DISABLED)) {
  622. iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
  623. CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
  624. iwl_write_direct32(priv, HBUS_TARG_MBX_C,
  625. HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
  626. if (!(flags & RXON_CARD_DISABLED)) {
  627. iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
  628. CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
  629. iwl_write_direct32(priv, HBUS_TARG_MBX_C,
  630. HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
  631. }
  632. if (flags & CT_CARD_DISABLED)
  633. iwl_tt_enter_ct_kill(priv);
  634. }
  635. if (!(flags & CT_CARD_DISABLED))
  636. iwl_tt_exit_ct_kill(priv);
  637. if (flags & HW_CARD_DISABLED)
  638. set_bit(STATUS_RF_KILL_HW, &priv->status);
  639. else
  640. clear_bit(STATUS_RF_KILL_HW, &priv->status);
  641. if (!(flags & RXON_CARD_DISABLED))
  642. iwl_scan_cancel(priv);
  643. if ((test_bit(STATUS_RF_KILL_HW, &status) !=
  644. test_bit(STATUS_RF_KILL_HW, &priv->status)))
  645. wiphy_rfkill_set_hw_state(priv->hw->wiphy,
  646. test_bit(STATUS_RF_KILL_HW, &priv->status));
  647. else
  648. wake_up_interruptible(&priv->wait_command_queue);
  649. }
  650. static void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
  651. struct iwl_rx_mem_buffer *rxb)
  652. {
  653. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  654. struct iwl_missed_beacon_notif *missed_beacon;
  655. missed_beacon = &pkt->u.missed_beacon;
  656. if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
  657. priv->missed_beacon_threshold) {
  658. IWL_DEBUG_CALIB(priv,
  659. "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
  660. le32_to_cpu(missed_beacon->consecutive_missed_beacons),
  661. le32_to_cpu(missed_beacon->total_missed_becons),
  662. le32_to_cpu(missed_beacon->num_recvd_beacons),
  663. le32_to_cpu(missed_beacon->num_expected_beacons));
  664. if (!test_bit(STATUS_SCANNING, &priv->status))
  665. iwl_init_sensitivity(priv);
  666. }
  667. }
  668. /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
  669. * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
  670. static void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
  671. struct iwl_rx_mem_buffer *rxb)
  672. {
  673. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  674. priv->_agn.last_phy_res_valid = true;
  675. memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
  676. sizeof(struct iwl_rx_phy_res));
  677. }
  678. /*
  679. * returns non-zero if packet should be dropped
  680. */
  681. static int iwl_set_decrypted_flag(struct iwl_priv *priv,
  682. struct ieee80211_hdr *hdr,
  683. u32 decrypt_res,
  684. struct ieee80211_rx_status *stats)
  685. {
  686. u16 fc = le16_to_cpu(hdr->frame_control);
  687. /*
  688. * All contexts have the same setting here due to it being
  689. * a module parameter, so OK to check any context.
  690. */
  691. if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
  692. RXON_FILTER_DIS_DECRYPT_MSK)
  693. return 0;
  694. if (!(fc & IEEE80211_FCTL_PROTECTED))
  695. return 0;
  696. IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
  697. switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
  698. case RX_RES_STATUS_SEC_TYPE_TKIP:
  699. /* The uCode has got a bad phase 1 Key, pushes the packet.
  700. * Decryption will be done in SW. */
  701. if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
  702. RX_RES_STATUS_BAD_KEY_TTAK)
  703. break;
  704. case RX_RES_STATUS_SEC_TYPE_WEP:
  705. if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
  706. RX_RES_STATUS_BAD_ICV_MIC) {
  707. /* bad ICV, the packet is destroyed since the
  708. * decryption is inplace, drop it */
  709. IWL_DEBUG_RX(priv, "Packet destroyed\n");
  710. return -1;
  711. }
  712. case RX_RES_STATUS_SEC_TYPE_CCMP:
  713. if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
  714. RX_RES_STATUS_DECRYPT_OK) {
  715. IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
  716. stats->flag |= RX_FLAG_DECRYPTED;
  717. }
  718. break;
  719. default:
  720. break;
  721. }
  722. return 0;
  723. }
  724. static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
  725. struct ieee80211_hdr *hdr,
  726. u16 len,
  727. u32 ampdu_status,
  728. struct iwl_rx_mem_buffer *rxb,
  729. struct ieee80211_rx_status *stats)
  730. {
  731. struct sk_buff *skb;
  732. __le16 fc = hdr->frame_control;
  733. struct iwl_rxon_context *ctx;
  734. /* We only process data packets if the interface is open */
  735. if (unlikely(!priv->is_open)) {
  736. IWL_DEBUG_DROP_LIMIT(priv,
  737. "Dropping packet while interface is not open.\n");
  738. return;
  739. }
  740. /* In case of HW accelerated crypto and bad decryption, drop */
  741. if (!iwlagn_mod_params.sw_crypto &&
  742. iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
  743. return;
  744. skb = dev_alloc_skb(128);
  745. if (!skb) {
  746. IWL_ERR(priv, "dev_alloc_skb failed\n");
  747. return;
  748. }
  749. skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
  750. iwl_update_stats(priv, false, fc, len);
  751. /*
  752. * Wake any queues that were stopped due to a passive channel tx
  753. * failure. This can happen because the regulatory enforcement in
  754. * the device waits for a beacon before allowing transmission,
  755. * sometimes even after already having transmitted frames for the
  756. * association because the new RXON may reset the information.
  757. */
  758. if (unlikely(ieee80211_is_beacon(fc))) {
  759. for_each_context(priv, ctx) {
  760. if (!ctx->last_tx_rejected)
  761. continue;
  762. if (compare_ether_addr(hdr->addr3,
  763. ctx->active.bssid_addr))
  764. continue;
  765. ctx->last_tx_rejected = false;
  766. iwl_wake_any_queue(priv, ctx);
  767. }
  768. }
  769. memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
  770. ieee80211_rx(priv->hw, skb);
  771. rxb->page = NULL;
  772. }
  773. static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
  774. {
  775. u32 decrypt_out = 0;
  776. if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
  777. RX_RES_STATUS_STATION_FOUND)
  778. decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
  779. RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
  780. decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
  781. /* packet was not encrypted */
  782. if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
  783. RX_RES_STATUS_SEC_TYPE_NONE)
  784. return decrypt_out;
  785. /* packet was encrypted with unknown alg */
  786. if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
  787. RX_RES_STATUS_SEC_TYPE_ERR)
  788. return decrypt_out;
  789. /* decryption was not done in HW */
  790. if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
  791. RX_MPDU_RES_STATUS_DEC_DONE_MSK)
  792. return decrypt_out;
  793. switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
  794. case RX_RES_STATUS_SEC_TYPE_CCMP:
  795. /* alg is CCM: check MIC only */
  796. if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
  797. /* Bad MIC */
  798. decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
  799. else
  800. decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
  801. break;
  802. case RX_RES_STATUS_SEC_TYPE_TKIP:
  803. if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
  804. /* Bad TTAK */
  805. decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
  806. break;
  807. }
  808. /* fall through if TTAK OK */
  809. default:
  810. if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
  811. decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
  812. else
  813. decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
  814. break;
  815. }
  816. IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
  817. decrypt_in, decrypt_out);
  818. return decrypt_out;
  819. }
  820. /* Called for REPLY_RX (legacy ABG frames), or
  821. * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
  822. static void iwl_rx_reply_rx(struct iwl_priv *priv,
  823. struct iwl_rx_mem_buffer *rxb)
  824. {
  825. struct ieee80211_hdr *header;
  826. struct ieee80211_rx_status rx_status;
  827. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  828. struct iwl_rx_phy_res *phy_res;
  829. __le32 rx_pkt_status;
  830. struct iwl_rx_mpdu_res_start *amsdu;
  831. u32 len;
  832. u32 ampdu_status;
  833. u32 rate_n_flags;
  834. /**
  835. * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
  836. * REPLY_RX: physical layer info is in this buffer
  837. * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
  838. * command and cached in priv->last_phy_res
  839. *
  840. * Here we set up local variables depending on which command is
  841. * received.
  842. */
  843. if (pkt->hdr.cmd == REPLY_RX) {
  844. phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
  845. header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
  846. + phy_res->cfg_phy_cnt);
  847. len = le16_to_cpu(phy_res->byte_count);
  848. rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
  849. phy_res->cfg_phy_cnt + len);
  850. ampdu_status = le32_to_cpu(rx_pkt_status);
  851. } else {
  852. if (!priv->_agn.last_phy_res_valid) {
  853. IWL_ERR(priv, "MPDU frame without cached PHY data\n");
  854. return;
  855. }
  856. phy_res = &priv->_agn.last_phy_res;
  857. amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
  858. header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
  859. len = le16_to_cpu(amsdu->byte_count);
  860. rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
  861. ampdu_status = iwl_translate_rx_status(priv,
  862. le32_to_cpu(rx_pkt_status));
  863. }
  864. if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
  865. IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
  866. phy_res->cfg_phy_cnt);
  867. return;
  868. }
  869. if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
  870. !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
  871. IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
  872. le32_to_cpu(rx_pkt_status));
  873. return;
  874. }
  875. /* This will be used in several places later */
  876. rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
  877. /* rx_status carries information about the packet to mac80211 */
  878. rx_status.mactime = le64_to_cpu(phy_res->timestamp);
  879. rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
  880. IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
  881. rx_status.freq =
  882. ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
  883. rx_status.band);
  884. rx_status.rate_idx =
  885. iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
  886. rx_status.flag = 0;
  887. /* TSF isn't reliable. In order to allow smooth user experience,
  888. * this W/A doesn't propagate it to the mac80211 */
  889. /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
  890. priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
  891. /* Find max signal strength (dBm) among 3 antenna/receiver chains */
  892. rx_status.signal = priv->cfg->ops->utils->calc_rssi(priv, phy_res);
  893. iwl_dbg_log_rx_data_frame(priv, len, header);
  894. IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
  895. rx_status.signal, (unsigned long long)rx_status.mactime);
  896. /*
  897. * "antenna number"
  898. *
  899. * It seems that the antenna field in the phy flags value
  900. * is actually a bit field. This is undefined by radiotap,
  901. * it wants an actual antenna number but I always get "7"
  902. * for most legacy frames I receive indicating that the
  903. * same frame was received on all three RX chains.
  904. *
  905. * I think this field should be removed in favor of a
  906. * new 802.11n radiotap field "RX chains" that is defined
  907. * as a bitmask.
  908. */
  909. rx_status.antenna =
  910. (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
  911. >> RX_RES_PHY_FLAGS_ANTENNA_POS;
  912. /* set the preamble flag if appropriate */
  913. if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
  914. rx_status.flag |= RX_FLAG_SHORTPRE;
  915. /* Set up the HT phy flags */
  916. if (rate_n_flags & RATE_MCS_HT_MSK)
  917. rx_status.flag |= RX_FLAG_HT;
  918. if (rate_n_flags & RATE_MCS_HT40_MSK)
  919. rx_status.flag |= RX_FLAG_40MHZ;
  920. if (rate_n_flags & RATE_MCS_SGI_MSK)
  921. rx_status.flag |= RX_FLAG_SHORT_GI;
  922. iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
  923. rxb, &rx_status);
  924. }
  925. /**
  926. * iwl_setup_rx_handlers - Initialize Rx handler callbacks
  927. *
  928. * Setup the RX handlers for each of the reply types sent from the uCode
  929. * to the host.
  930. */
  931. void iwl_setup_rx_handlers(struct iwl_priv *priv)
  932. {
  933. void (**handlers)(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
  934. handlers = priv->rx_handlers;
  935. handlers[REPLY_ERROR] = iwl_rx_reply_error;
  936. handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
  937. handlers[SPECTRUM_MEASURE_NOTIFICATION] = iwl_rx_spectrum_measure_notif;
  938. handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
  939. handlers[PM_DEBUG_STATISTIC_NOTIFIC] = iwl_rx_pm_debug_statistics_notif;
  940. handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
  941. /*
  942. * The same handler is used for both the REPLY to a discrete
  943. * statistics request from the host as well as for the periodic
  944. * statistics notifications (after received beacons) from the uCode.
  945. */
  946. handlers[REPLY_STATISTICS_CMD] = iwl_rx_reply_statistics;
  947. handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
  948. iwl_setup_rx_scan_handlers(priv);
  949. handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
  950. handlers[MISSED_BEACONS_NOTIFICATION] = iwl_rx_missed_beacon_notif;
  951. /* Rx handlers */
  952. handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy;
  953. handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx;
  954. /* block ack */
  955. handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
  956. /* Set up hardware specific Rx handlers */
  957. priv->cfg->ops->lib->rx_handler_setup(priv);
  958. }