iwl-rx.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
  4. *
  5. * Portions of this file are derived from the ipw3945 project, as well
  6. * as portions of the ieee80211 subsystem header files.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of version 2 of the GNU General Public License as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20. *
  21. * The full GNU General Public License is included in this distribution in the
  22. * file called LICENSE.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/etherdevice.h>
  30. #include <linux/slab.h>
  31. #include <linux/sched.h>
  32. #include <net/mac80211.h>
  33. #include <asm/unaligned.h>
  34. #include "iwl-eeprom.h"
  35. #include "iwl-dev.h"
  36. #include "iwl-core.h"
  37. #include "iwl-sta.h"
  38. #include "iwl-io.h"
  39. #include "iwl-helpers.h"
  40. #include "iwl-agn-calib.h"
  41. #include "iwl-agn.h"
  42. /******************************************************************************
  43. *
  44. * RX path functions
  45. *
  46. ******************************************************************************/
  47. /*
  48. * Rx theory of operation
  49. *
  50. * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
  51. * each of which point to Receive Buffers to be filled by the NIC. These get
  52. * used not only for Rx frames, but for any command response or notification
  53. * from the NIC. The driver and NIC manage the Rx buffers by means
  54. * of indexes into the circular buffer.
  55. *
  56. * Rx Queue Indexes
  57. * The host/firmware share two index registers for managing the Rx buffers.
  58. *
  59. * The READ index maps to the first position that the firmware may be writing
  60. * to -- the driver can read up to (but not including) this position and get
  61. * good data.
  62. * The READ index is managed by the firmware once the card is enabled.
  63. *
  64. * The WRITE index maps to the last position the driver has read from -- the
  65. * position preceding WRITE is the last slot the firmware can place a packet.
  66. *
  67. * The queue is empty (no good data) if WRITE = READ - 1, and is full if
  68. * WRITE = READ.
  69. *
  70. * During initialization, the host sets up the READ queue position to the first
  71. * INDEX position, and WRITE to the last (READ - 1 wrapped)
  72. *
  73. * When the firmware places a packet in a buffer, it will advance the READ index
  74. * and fire the RX interrupt. The driver can then query the READ index and
  75. * process as many packets as possible, moving the WRITE index forward as it
  76. * resets the Rx queue buffers with new memory.
  77. *
  78. * The management in the driver is as follows:
  79. * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
  80. * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
  81. * to replenish the iwl->rxq->rx_free.
  82. * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
  83. * iwl->rxq is replenished and the READ INDEX is updated (updating the
  84. * 'processed' and 'read' driver indexes as well)
  85. * + A received packet is processed and handed to the kernel network stack,
  86. * detached from the iwl->rxq. The driver 'processed' index is updated.
  87. * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
  88. * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
  89. * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
  90. * were enough free buffers and RX_STALLED is set it is cleared.
  91. *
  92. *
  93. * Driver sequence:
  94. *
  95. * iwl_rx_queue_alloc() Allocates rx_free
  96. * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
  97. * iwl_rx_queue_restock
  98. * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
  99. * queue, updates firmware pointers, and updates
  100. * the WRITE index. If insufficient rx_free buffers
  101. * are available, schedules iwl_rx_replenish
  102. *
  103. * -- enable interrupts --
  104. * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
  105. * READ INDEX, detaching the SKB from the pool.
  106. * Moves the packet buffer from queue to rx_used.
  107. * Calls iwl_rx_queue_restock to refill any empty
  108. * slots.
  109. * ...
  110. *
  111. */
  112. /**
  113. * iwl_rx_queue_space - Return number of free slots available in queue.
  114. */
  115. int iwl_rx_queue_space(const struct iwl_rx_queue *q)
  116. {
  117. int s = q->read - q->write;
  118. if (s <= 0)
  119. s += RX_QUEUE_SIZE;
  120. /* keep some buffer to not confuse full and empty queue */
  121. s -= 2;
  122. if (s < 0)
  123. s = 0;
  124. return s;
  125. }
  126. /**
  127. * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
  128. */
  129. void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
  130. {
  131. unsigned long flags;
  132. u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
  133. u32 reg;
  134. spin_lock_irqsave(&q->lock, flags);
  135. if (q->need_update == 0)
  136. goto exit_unlock;
  137. if (priv->cfg->base_params->shadow_reg_enable) {
  138. /* shadow register enabled */
  139. /* Device expects a multiple of 8 */
  140. q->write_actual = (q->write & ~0x7);
  141. iwl_write32(priv, rx_wrt_ptr_reg, q->write_actual);
  142. } else {
  143. /* If power-saving is in use, make sure device is awake */
  144. if (test_bit(STATUS_POWER_PMI, &priv->status)) {
  145. reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
  146. if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
  147. IWL_DEBUG_INFO(priv,
  148. "Rx queue requesting wakeup,"
  149. " GP1 = 0x%x\n", reg);
  150. iwl_set_bit(priv, CSR_GP_CNTRL,
  151. CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
  152. goto exit_unlock;
  153. }
  154. q->write_actual = (q->write & ~0x7);
  155. iwl_write_direct32(priv, rx_wrt_ptr_reg,
  156. q->write_actual);
  157. /* Else device is assumed to be awake */
  158. } else {
  159. /* Device expects a multiple of 8 */
  160. q->write_actual = (q->write & ~0x7);
  161. iwl_write_direct32(priv, rx_wrt_ptr_reg,
  162. q->write_actual);
  163. }
  164. }
  165. q->need_update = 0;
  166. exit_unlock:
  167. spin_unlock_irqrestore(&q->lock, flags);
  168. }
  169. int iwl_rx_queue_alloc(struct iwl_priv *priv)
  170. {
  171. struct iwl_rx_queue *rxq = &priv->rxq;
  172. struct device *dev = &priv->pci_dev->dev;
  173. int i;
  174. spin_lock_init(&rxq->lock);
  175. INIT_LIST_HEAD(&rxq->rx_free);
  176. INIT_LIST_HEAD(&rxq->rx_used);
  177. /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
  178. rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
  179. GFP_KERNEL);
  180. if (!rxq->bd)
  181. goto err_bd;
  182. rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
  183. &rxq->rb_stts_dma, GFP_KERNEL);
  184. if (!rxq->rb_stts)
  185. goto err_rb;
  186. /* Fill the rx_used queue with _all_ of the Rx buffers */
  187. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
  188. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  189. /* Set us so that we have processed and used all buffers, but have
  190. * not restocked the Rx queue with fresh buffers */
  191. rxq->read = rxq->write = 0;
  192. rxq->write_actual = 0;
  193. rxq->free_count = 0;
  194. rxq->need_update = 0;
  195. return 0;
  196. err_rb:
  197. dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
  198. rxq->bd_dma);
  199. err_bd:
  200. return -ENOMEM;
  201. }
  202. /******************************************************************************
  203. *
  204. * Generic RX handler implementations
  205. *
  206. ******************************************************************************/
  207. static void iwl_rx_reply_alive(struct iwl_priv *priv,
  208. struct iwl_rx_mem_buffer *rxb)
  209. {
  210. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  211. struct iwl_alive_resp *palive;
  212. struct delayed_work *pwork;
  213. palive = &pkt->u.alive_frame;
  214. IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
  215. "0x%01X 0x%01X\n",
  216. palive->is_valid, palive->ver_type,
  217. palive->ver_subtype);
  218. priv->device_pointers.log_event_table =
  219. le32_to_cpu(palive->log_event_table_ptr);
  220. priv->device_pointers.error_event_table =
  221. le32_to_cpu(palive->error_event_table_ptr);
  222. if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
  223. IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
  224. pwork = &priv->init_alive_start;
  225. } else {
  226. IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
  227. pwork = &priv->alive_start;
  228. }
  229. /* We delay the ALIVE response by 5ms to
  230. * give the HW RF Kill time to activate... */
  231. if (palive->is_valid == UCODE_VALID_OK)
  232. queue_delayed_work(priv->workqueue, pwork,
  233. msecs_to_jiffies(5));
  234. else {
  235. IWL_WARN(priv, "%s uCode did not respond OK.\n",
  236. (palive->ver_subtype == INITIALIZE_SUBTYPE) ?
  237. "init" : "runtime");
  238. /*
  239. * If fail to load init uCode,
  240. * let's try to load the init uCode again.
  241. * We should not get into this situation, but if it
  242. * does happen, we should not move on and loading "runtime"
  243. * without proper calibrate the device.
  244. */
  245. if (palive->ver_subtype == INITIALIZE_SUBTYPE)
  246. priv->ucode_type = UCODE_NONE;
  247. queue_work(priv->workqueue, &priv->restart);
  248. }
  249. }
  250. static void iwl_rx_reply_error(struct iwl_priv *priv,
  251. struct iwl_rx_mem_buffer *rxb)
  252. {
  253. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  254. IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
  255. "seq 0x%04X ser 0x%08X\n",
  256. le32_to_cpu(pkt->u.err_resp.error_type),
  257. get_cmd_string(pkt->u.err_resp.cmd_id),
  258. pkt->u.err_resp.cmd_id,
  259. le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
  260. le32_to_cpu(pkt->u.err_resp.error_info));
  261. }
  262. static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
  263. {
  264. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  265. struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
  266. /*
  267. * MULTI-FIXME
  268. * See iwl_mac_channel_switch.
  269. */
  270. struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
  271. struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
  272. if (priv->switch_rxon.switch_in_progress) {
  273. if (!le32_to_cpu(csa->status) &&
  274. (csa->channel == priv->switch_rxon.channel)) {
  275. rxon->channel = csa->channel;
  276. ctx->staging.channel = csa->channel;
  277. IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
  278. le16_to_cpu(csa->channel));
  279. iwl_chswitch_done(priv, true);
  280. } else {
  281. IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
  282. le16_to_cpu(csa->channel));
  283. iwl_chswitch_done(priv, false);
  284. }
  285. }
  286. }
  287. static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
  288. struct iwl_rx_mem_buffer *rxb)
  289. {
  290. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  291. struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
  292. if (!report->state) {
  293. IWL_DEBUG_11H(priv,
  294. "Spectrum Measure Notification: Start\n");
  295. return;
  296. }
  297. memcpy(&priv->measure_report, report, sizeof(*report));
  298. priv->measurement_status |= MEASUREMENT_READY;
  299. }
  300. static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
  301. struct iwl_rx_mem_buffer *rxb)
  302. {
  303. #ifdef CONFIG_IWLWIFI_DEBUG
  304. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  305. struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
  306. IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
  307. sleep->pm_sleep_mode, sleep->pm_wakeup_src);
  308. #endif
  309. }
  310. static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
  311. struct iwl_rx_mem_buffer *rxb)
  312. {
  313. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  314. u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
  315. IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
  316. "notification for %s:\n", len,
  317. get_cmd_string(pkt->hdr.cmd));
  318. iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
  319. }
  320. static void iwl_rx_beacon_notif(struct iwl_priv *priv,
  321. struct iwl_rx_mem_buffer *rxb)
  322. {
  323. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  324. struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
  325. #ifdef CONFIG_IWLWIFI_DEBUG
  326. u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
  327. u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
  328. IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
  329. "tsf:0x%.8x%.8x rate:%d\n",
  330. status & TX_STATUS_MSK,
  331. beacon->beacon_notify_hdr.failure_frame,
  332. le32_to_cpu(beacon->ibss_mgr_status),
  333. le32_to_cpu(beacon->high_tsf),
  334. le32_to_cpu(beacon->low_tsf), rate);
  335. #endif
  336. priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
  337. if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
  338. queue_work(priv->workqueue, &priv->beacon_update);
  339. }
  340. /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
  341. #define ACK_CNT_RATIO (50)
  342. #define BA_TIMEOUT_CNT (5)
  343. #define BA_TIMEOUT_MAX (16)
  344. /**
  345. * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
  346. *
  347. * When the ACK count ratio is low and aggregated BA timeout retries exceeding
  348. * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
  349. * operation state.
  350. */
  351. static bool iwl_good_ack_health(struct iwl_priv *priv, struct iwl_rx_packet *pkt)
  352. {
  353. int actual_delta, expected_delta, ba_timeout_delta;
  354. struct statistics_tx *cur, *old;
  355. if (priv->_agn.agg_tids_count)
  356. return true;
  357. if (iwl_bt_statistics(priv)) {
  358. cur = &pkt->u.stats_bt.tx;
  359. old = &priv->_agn.statistics_bt.tx;
  360. } else {
  361. cur = &pkt->u.stats.tx;
  362. old = &priv->_agn.statistics.tx;
  363. }
  364. actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
  365. le32_to_cpu(old->actual_ack_cnt);
  366. expected_delta = le32_to_cpu(cur->expected_ack_cnt) -
  367. le32_to_cpu(old->expected_ack_cnt);
  368. /* Values should not be negative, but we do not trust the firmware */
  369. if (actual_delta <= 0 || expected_delta <= 0)
  370. return true;
  371. ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) -
  372. le32_to_cpu(old->agg.ba_timeout);
  373. if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO &&
  374. ba_timeout_delta > BA_TIMEOUT_CNT) {
  375. IWL_DEBUG_RADIO(priv, "deltas: actual %d expected %d ba_timeout %d\n",
  376. actual_delta, expected_delta, ba_timeout_delta);
  377. #ifdef CONFIG_IWLWIFI_DEBUGFS
  378. /*
  379. * This is ifdef'ed on DEBUGFS because otherwise the
  380. * statistics aren't available. If DEBUGFS is set but
  381. * DEBUG is not, these will just compile out.
  382. */
  383. IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n",
  384. priv->_agn.delta_statistics.tx.rx_detected_cnt);
  385. IWL_DEBUG_RADIO(priv,
  386. "ack_or_ba_timeout_collision delta %d\n",
  387. priv->_agn.delta_statistics.tx.ack_or_ba_timeout_collision);
  388. #endif
  389. if (ba_timeout_delta >= BA_TIMEOUT_MAX)
  390. return false;
  391. }
  392. return true;
  393. }
  394. /**
  395. * iwl_good_plcp_health - checks for plcp error.
  396. *
  397. * When the plcp error is exceeding the thresholds, reset the radio
  398. * to improve the throughput.
  399. */
  400. static bool iwl_good_plcp_health(struct iwl_priv *priv,
  401. struct iwl_rx_packet *pkt, unsigned int msecs)
  402. {
  403. int delta;
  404. int threshold = priv->cfg->base_params->plcp_delta_threshold;
  405. if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
  406. IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
  407. return true;
  408. }
  409. if (iwl_bt_statistics(priv)) {
  410. struct statistics_rx_bt *cur, *old;
  411. cur = &pkt->u.stats_bt.rx;
  412. old = &priv->_agn.statistics_bt.rx;
  413. delta = le32_to_cpu(cur->ofdm.plcp_err) -
  414. le32_to_cpu(old->ofdm.plcp_err) +
  415. le32_to_cpu(cur->ofdm_ht.plcp_err) -
  416. le32_to_cpu(old->ofdm_ht.plcp_err);
  417. } else {
  418. struct statistics_rx *cur, *old;
  419. cur = &pkt->u.stats.rx;
  420. old = &priv->_agn.statistics.rx;
  421. delta = le32_to_cpu(cur->ofdm.plcp_err) -
  422. le32_to_cpu(old->ofdm.plcp_err) +
  423. le32_to_cpu(cur->ofdm_ht.plcp_err) -
  424. le32_to_cpu(old->ofdm_ht.plcp_err);
  425. }
  426. /* Can be negative if firmware reseted statistics */
  427. if (delta <= 0)
  428. return true;
  429. if ((delta * 100 / msecs) > threshold) {
  430. IWL_DEBUG_RADIO(priv,
  431. "plcp health threshold %u delta %d msecs %u\n",
  432. threshold, delta, msecs);
  433. return false;
  434. }
  435. return true;
  436. }
  437. static void iwl_recover_from_statistics(struct iwl_priv *priv,
  438. struct iwl_rx_packet *pkt)
  439. {
  440. const struct iwl_mod_params *mod_params = priv->cfg->mod_params;
  441. unsigned int msecs;
  442. unsigned long stamp;
  443. if (test_bit(STATUS_EXIT_PENDING, &priv->status))
  444. return;
  445. stamp = jiffies;
  446. msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies);
  447. /* Only gather statistics and update time stamp when not associated */
  448. if (!iwl_is_any_associated(priv))
  449. goto out;
  450. /* Do not check/recover when do not have enough statistics data */
  451. if (msecs < 99)
  452. return;
  453. if (mod_params->ack_check && !iwl_good_ack_health(priv, pkt)) {
  454. IWL_ERR(priv, "low ack count detected, restart firmware\n");
  455. if (!iwl_force_reset(priv, IWL_FW_RESET, false))
  456. return;
  457. }
  458. if (mod_params->plcp_check && !iwl_good_plcp_health(priv, pkt, msecs))
  459. iwl_force_reset(priv, IWL_RF_RESET, false);
  460. out:
  461. if (iwl_bt_statistics(priv))
  462. memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt,
  463. sizeof(priv->_agn.statistics_bt));
  464. else
  465. memcpy(&priv->_agn.statistics, &pkt->u.stats,
  466. sizeof(priv->_agn.statistics));
  467. priv->rx_statistics_jiffies = stamp;
  468. }
  469. /* Calculate noise level, based on measurements during network silence just
  470. * before arriving beacon. This measurement can be done only if we know
  471. * exactly when to expect beacons, therefore only when we're associated. */
  472. static void iwl_rx_calc_noise(struct iwl_priv *priv)
  473. {
  474. struct statistics_rx_non_phy *rx_info;
  475. int num_active_rx = 0;
  476. int total_silence = 0;
  477. int bcn_silence_a, bcn_silence_b, bcn_silence_c;
  478. int last_rx_noise;
  479. if (iwl_bt_statistics(priv))
  480. rx_info = &(priv->_agn.statistics_bt.rx.general.common);
  481. else
  482. rx_info = &(priv->_agn.statistics.rx.general);
  483. bcn_silence_a =
  484. le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
  485. bcn_silence_b =
  486. le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
  487. bcn_silence_c =
  488. le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
  489. if (bcn_silence_a) {
  490. total_silence += bcn_silence_a;
  491. num_active_rx++;
  492. }
  493. if (bcn_silence_b) {
  494. total_silence += bcn_silence_b;
  495. num_active_rx++;
  496. }
  497. if (bcn_silence_c) {
  498. total_silence += bcn_silence_c;
  499. num_active_rx++;
  500. }
  501. /* Average among active antennas */
  502. if (num_active_rx)
  503. last_rx_noise = (total_silence / num_active_rx) - 107;
  504. else
  505. last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
  506. IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
  507. bcn_silence_a, bcn_silence_b, bcn_silence_c,
  508. last_rx_noise);
  509. }
  510. /*
  511. * based on the assumption of all statistics counter are in DWORD
  512. * FIXME: This function is for debugging, do not deal with
  513. * the case of counters roll-over.
  514. */
  515. static void iwl_accumulative_statistics(struct iwl_priv *priv,
  516. __le32 *stats)
  517. {
  518. #ifdef CONFIG_IWLWIFI_DEBUGFS
  519. int i, size;
  520. __le32 *prev_stats;
  521. u32 *accum_stats;
  522. u32 *delta, *max_delta;
  523. struct statistics_general_common *general, *accum_general;
  524. struct statistics_tx *tx, *accum_tx;
  525. if (iwl_bt_statistics(priv)) {
  526. prev_stats = (__le32 *)&priv->_agn.statistics_bt;
  527. accum_stats = (u32 *)&priv->_agn.accum_statistics_bt;
  528. size = sizeof(struct iwl_bt_notif_statistics);
  529. general = &priv->_agn.statistics_bt.general.common;
  530. accum_general = &priv->_agn.accum_statistics_bt.general.common;
  531. tx = &priv->_agn.statistics_bt.tx;
  532. accum_tx = &priv->_agn.accum_statistics_bt.tx;
  533. delta = (u32 *)&priv->_agn.delta_statistics_bt;
  534. max_delta = (u32 *)&priv->_agn.max_delta_bt;
  535. } else {
  536. prev_stats = (__le32 *)&priv->_agn.statistics;
  537. accum_stats = (u32 *)&priv->_agn.accum_statistics;
  538. size = sizeof(struct iwl_notif_statistics);
  539. general = &priv->_agn.statistics.general.common;
  540. accum_general = &priv->_agn.accum_statistics.general.common;
  541. tx = &priv->_agn.statistics.tx;
  542. accum_tx = &priv->_agn.accum_statistics.tx;
  543. delta = (u32 *)&priv->_agn.delta_statistics;
  544. max_delta = (u32 *)&priv->_agn.max_delta;
  545. }
  546. for (i = sizeof(__le32); i < size;
  547. i += sizeof(__le32), stats++, prev_stats++, delta++,
  548. max_delta++, accum_stats++) {
  549. if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
  550. *delta = (le32_to_cpu(*stats) -
  551. le32_to_cpu(*prev_stats));
  552. *accum_stats += *delta;
  553. if (*delta > *max_delta)
  554. *max_delta = *delta;
  555. }
  556. }
  557. /* reset accumulative statistics for "no-counter" type statistics */
  558. accum_general->temperature = general->temperature;
  559. accum_general->temperature_m = general->temperature_m;
  560. accum_general->ttl_timestamp = general->ttl_timestamp;
  561. accum_tx->tx_power.ant_a = tx->tx_power.ant_a;
  562. accum_tx->tx_power.ant_b = tx->tx_power.ant_b;
  563. accum_tx->tx_power.ant_c = tx->tx_power.ant_c;
  564. #endif
  565. }
  566. static void iwl_rx_statistics(struct iwl_priv *priv,
  567. struct iwl_rx_mem_buffer *rxb)
  568. {
  569. const int reg_recalib_period = 60;
  570. int change;
  571. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  572. if (iwl_bt_statistics(priv)) {
  573. IWL_DEBUG_RX(priv,
  574. "Statistics notification received (%d vs %d).\n",
  575. (int)sizeof(struct iwl_bt_notif_statistics),
  576. le32_to_cpu(pkt->len_n_flags) &
  577. FH_RSCSR_FRAME_SIZE_MSK);
  578. change = ((priv->_agn.statistics_bt.general.common.temperature !=
  579. pkt->u.stats_bt.general.common.temperature) ||
  580. ((priv->_agn.statistics_bt.flag &
  581. STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
  582. (pkt->u.stats_bt.flag &
  583. STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
  584. iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt);
  585. } else {
  586. IWL_DEBUG_RX(priv,
  587. "Statistics notification received (%d vs %d).\n",
  588. (int)sizeof(struct iwl_notif_statistics),
  589. le32_to_cpu(pkt->len_n_flags) &
  590. FH_RSCSR_FRAME_SIZE_MSK);
  591. change = ((priv->_agn.statistics.general.common.temperature !=
  592. pkt->u.stats.general.common.temperature) ||
  593. ((priv->_agn.statistics.flag &
  594. STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
  595. (pkt->u.stats.flag &
  596. STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
  597. iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
  598. }
  599. iwl_recover_from_statistics(priv, pkt);
  600. set_bit(STATUS_STATISTICS, &priv->status);
  601. /* Reschedule the statistics timer to occur in
  602. * reg_recalib_period seconds to ensure we get a
  603. * thermal update even if the uCode doesn't give
  604. * us one */
  605. mod_timer(&priv->statistics_periodic, jiffies +
  606. msecs_to_jiffies(reg_recalib_period * 1000));
  607. if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
  608. (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
  609. iwl_rx_calc_noise(priv);
  610. queue_work(priv->workqueue, &priv->run_time_calib_work);
  611. }
  612. if (priv->cfg->ops->lib->temp_ops.temperature && change)
  613. priv->cfg->ops->lib->temp_ops.temperature(priv);
  614. }
  615. static void iwl_rx_reply_statistics(struct iwl_priv *priv,
  616. struct iwl_rx_mem_buffer *rxb)
  617. {
  618. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  619. if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
  620. #ifdef CONFIG_IWLWIFI_DEBUGFS
  621. memset(&priv->_agn.accum_statistics, 0,
  622. sizeof(struct iwl_notif_statistics));
  623. memset(&priv->_agn.delta_statistics, 0,
  624. sizeof(struct iwl_notif_statistics));
  625. memset(&priv->_agn.max_delta, 0,
  626. sizeof(struct iwl_notif_statistics));
  627. memset(&priv->_agn.accum_statistics_bt, 0,
  628. sizeof(struct iwl_bt_notif_statistics));
  629. memset(&priv->_agn.delta_statistics_bt, 0,
  630. sizeof(struct iwl_bt_notif_statistics));
  631. memset(&priv->_agn.max_delta_bt, 0,
  632. sizeof(struct iwl_bt_notif_statistics));
  633. #endif
  634. IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
  635. }
  636. iwl_rx_statistics(priv, rxb);
  637. }
  638. /* Handle notification from uCode that card's power state is changing
  639. * due to software, hardware, or critical temperature RFKILL */
  640. static void iwl_rx_card_state_notif(struct iwl_priv *priv,
  641. struct iwl_rx_mem_buffer *rxb)
  642. {
  643. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  644. u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
  645. unsigned long status = priv->status;
  646. IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
  647. (flags & HW_CARD_DISABLED) ? "Kill" : "On",
  648. (flags & SW_CARD_DISABLED) ? "Kill" : "On",
  649. (flags & CT_CARD_DISABLED) ?
  650. "Reached" : "Not reached");
  651. if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
  652. CT_CARD_DISABLED)) {
  653. iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
  654. CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
  655. iwl_write_direct32(priv, HBUS_TARG_MBX_C,
  656. HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
  657. if (!(flags & RXON_CARD_DISABLED)) {
  658. iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
  659. CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
  660. iwl_write_direct32(priv, HBUS_TARG_MBX_C,
  661. HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
  662. }
  663. if (flags & CT_CARD_DISABLED)
  664. iwl_tt_enter_ct_kill(priv);
  665. }
  666. if (!(flags & CT_CARD_DISABLED))
  667. iwl_tt_exit_ct_kill(priv);
  668. if (flags & HW_CARD_DISABLED)
  669. set_bit(STATUS_RF_KILL_HW, &priv->status);
  670. else
  671. clear_bit(STATUS_RF_KILL_HW, &priv->status);
  672. if (!(flags & RXON_CARD_DISABLED))
  673. iwl_scan_cancel(priv);
  674. if ((test_bit(STATUS_RF_KILL_HW, &status) !=
  675. test_bit(STATUS_RF_KILL_HW, &priv->status)))
  676. wiphy_rfkill_set_hw_state(priv->hw->wiphy,
  677. test_bit(STATUS_RF_KILL_HW, &priv->status));
  678. else
  679. wake_up_interruptible(&priv->wait_command_queue);
  680. }
  681. static void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
  682. struct iwl_rx_mem_buffer *rxb)
  683. {
  684. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  685. struct iwl_missed_beacon_notif *missed_beacon;
  686. missed_beacon = &pkt->u.missed_beacon;
  687. if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
  688. priv->missed_beacon_threshold) {
  689. IWL_DEBUG_CALIB(priv,
  690. "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
  691. le32_to_cpu(missed_beacon->consecutive_missed_beacons),
  692. le32_to_cpu(missed_beacon->total_missed_becons),
  693. le32_to_cpu(missed_beacon->num_recvd_beacons),
  694. le32_to_cpu(missed_beacon->num_expected_beacons));
  695. if (!test_bit(STATUS_SCANNING, &priv->status))
  696. iwl_init_sensitivity(priv);
  697. }
  698. }
  699. /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
  700. * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
  701. static void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
  702. struct iwl_rx_mem_buffer *rxb)
  703. {
  704. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  705. priv->_agn.last_phy_res_valid = true;
  706. memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
  707. sizeof(struct iwl_rx_phy_res));
  708. }
  709. /*
  710. * returns non-zero if packet should be dropped
  711. */
  712. static int iwl_set_decrypted_flag(struct iwl_priv *priv,
  713. struct ieee80211_hdr *hdr,
  714. u32 decrypt_res,
  715. struct ieee80211_rx_status *stats)
  716. {
  717. u16 fc = le16_to_cpu(hdr->frame_control);
  718. /*
  719. * All contexts have the same setting here due to it being
  720. * a module parameter, so OK to check any context.
  721. */
  722. if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
  723. RXON_FILTER_DIS_DECRYPT_MSK)
  724. return 0;
  725. if (!(fc & IEEE80211_FCTL_PROTECTED))
  726. return 0;
  727. IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
  728. switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
  729. case RX_RES_STATUS_SEC_TYPE_TKIP:
  730. /* The uCode has got a bad phase 1 Key, pushes the packet.
  731. * Decryption will be done in SW. */
  732. if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
  733. RX_RES_STATUS_BAD_KEY_TTAK)
  734. break;
  735. case RX_RES_STATUS_SEC_TYPE_WEP:
  736. if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
  737. RX_RES_STATUS_BAD_ICV_MIC) {
  738. /* bad ICV, the packet is destroyed since the
  739. * decryption is inplace, drop it */
  740. IWL_DEBUG_RX(priv, "Packet destroyed\n");
  741. return -1;
  742. }
  743. case RX_RES_STATUS_SEC_TYPE_CCMP:
  744. if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
  745. RX_RES_STATUS_DECRYPT_OK) {
  746. IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
  747. stats->flag |= RX_FLAG_DECRYPTED;
  748. }
  749. break;
  750. default:
  751. break;
  752. }
  753. return 0;
  754. }
  755. static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
  756. struct ieee80211_hdr *hdr,
  757. u16 len,
  758. u32 ampdu_status,
  759. struct iwl_rx_mem_buffer *rxb,
  760. struct ieee80211_rx_status *stats)
  761. {
  762. struct sk_buff *skb;
  763. __le16 fc = hdr->frame_control;
  764. /* We only process data packets if the interface is open */
  765. if (unlikely(!priv->is_open)) {
  766. IWL_DEBUG_DROP_LIMIT(priv,
  767. "Dropping packet while interface is not open.\n");
  768. return;
  769. }
  770. /* In case of HW accelerated crypto and bad decryption, drop */
  771. if (!priv->cfg->mod_params->sw_crypto &&
  772. iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
  773. return;
  774. skb = dev_alloc_skb(128);
  775. if (!skb) {
  776. IWL_ERR(priv, "dev_alloc_skb failed\n");
  777. return;
  778. }
  779. skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
  780. iwl_update_stats(priv, false, fc, len);
  781. memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
  782. ieee80211_rx(priv->hw, skb);
  783. rxb->page = NULL;
  784. }
  785. static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
  786. {
  787. u32 decrypt_out = 0;
  788. if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
  789. RX_RES_STATUS_STATION_FOUND)
  790. decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
  791. RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
  792. decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
  793. /* packet was not encrypted */
  794. if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
  795. RX_RES_STATUS_SEC_TYPE_NONE)
  796. return decrypt_out;
  797. /* packet was encrypted with unknown alg */
  798. if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
  799. RX_RES_STATUS_SEC_TYPE_ERR)
  800. return decrypt_out;
  801. /* decryption was not done in HW */
  802. if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
  803. RX_MPDU_RES_STATUS_DEC_DONE_MSK)
  804. return decrypt_out;
  805. switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
  806. case RX_RES_STATUS_SEC_TYPE_CCMP:
  807. /* alg is CCM: check MIC only */
  808. if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
  809. /* Bad MIC */
  810. decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
  811. else
  812. decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
  813. break;
  814. case RX_RES_STATUS_SEC_TYPE_TKIP:
  815. if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
  816. /* Bad TTAK */
  817. decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
  818. break;
  819. }
  820. /* fall through if TTAK OK */
  821. default:
  822. if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
  823. decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
  824. else
  825. decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
  826. break;
  827. }
  828. IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
  829. decrypt_in, decrypt_out);
  830. return decrypt_out;
  831. }
  832. /* Called for REPLY_RX (legacy ABG frames), or
  833. * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
  834. static void iwl_rx_reply_rx(struct iwl_priv *priv,
  835. struct iwl_rx_mem_buffer *rxb)
  836. {
  837. struct ieee80211_hdr *header;
  838. struct ieee80211_rx_status rx_status;
  839. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  840. struct iwl_rx_phy_res *phy_res;
  841. __le32 rx_pkt_status;
  842. struct iwl_rx_mpdu_res_start *amsdu;
  843. u32 len;
  844. u32 ampdu_status;
  845. u32 rate_n_flags;
  846. /**
  847. * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
  848. * REPLY_RX: physical layer info is in this buffer
  849. * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
  850. * command and cached in priv->last_phy_res
  851. *
  852. * Here we set up local variables depending on which command is
  853. * received.
  854. */
  855. if (pkt->hdr.cmd == REPLY_RX) {
  856. phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
  857. header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
  858. + phy_res->cfg_phy_cnt);
  859. len = le16_to_cpu(phy_res->byte_count);
  860. rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
  861. phy_res->cfg_phy_cnt + len);
  862. ampdu_status = le32_to_cpu(rx_pkt_status);
  863. } else {
  864. if (!priv->_agn.last_phy_res_valid) {
  865. IWL_ERR(priv, "MPDU frame without cached PHY data\n");
  866. return;
  867. }
  868. phy_res = &priv->_agn.last_phy_res;
  869. amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
  870. header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
  871. len = le16_to_cpu(amsdu->byte_count);
  872. rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
  873. ampdu_status = iwl_translate_rx_status(priv,
  874. le32_to_cpu(rx_pkt_status));
  875. }
  876. if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
  877. IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
  878. phy_res->cfg_phy_cnt);
  879. return;
  880. }
  881. if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
  882. !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
  883. IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
  884. le32_to_cpu(rx_pkt_status));
  885. return;
  886. }
  887. /* This will be used in several places later */
  888. rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
  889. /* rx_status carries information about the packet to mac80211 */
  890. rx_status.mactime = le64_to_cpu(phy_res->timestamp);
  891. rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
  892. IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
  893. rx_status.freq =
  894. ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
  895. rx_status.band);
  896. rx_status.rate_idx =
  897. iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
  898. rx_status.flag = 0;
  899. /* TSF isn't reliable. In order to allow smooth user experience,
  900. * this W/A doesn't propagate it to the mac80211 */
  901. /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
  902. priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
  903. /* Find max signal strength (dBm) among 3 antenna/receiver chains */
  904. rx_status.signal = priv->cfg->ops->utils->calc_rssi(priv, phy_res);
  905. iwl_dbg_log_rx_data_frame(priv, len, header);
  906. IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
  907. rx_status.signal, (unsigned long long)rx_status.mactime);
  908. /*
  909. * "antenna number"
  910. *
  911. * It seems that the antenna field in the phy flags value
  912. * is actually a bit field. This is undefined by radiotap,
  913. * it wants an actual antenna number but I always get "7"
  914. * for most legacy frames I receive indicating that the
  915. * same frame was received on all three RX chains.
  916. *
  917. * I think this field should be removed in favor of a
  918. * new 802.11n radiotap field "RX chains" that is defined
  919. * as a bitmask.
  920. */
  921. rx_status.antenna =
  922. (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
  923. >> RX_RES_PHY_FLAGS_ANTENNA_POS;
  924. /* set the preamble flag if appropriate */
  925. if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
  926. rx_status.flag |= RX_FLAG_SHORTPRE;
  927. /* Set up the HT phy flags */
  928. if (rate_n_flags & RATE_MCS_HT_MSK)
  929. rx_status.flag |= RX_FLAG_HT;
  930. if (rate_n_flags & RATE_MCS_HT40_MSK)
  931. rx_status.flag |= RX_FLAG_40MHZ;
  932. if (rate_n_flags & RATE_MCS_SGI_MSK)
  933. rx_status.flag |= RX_FLAG_SHORT_GI;
  934. iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
  935. rxb, &rx_status);
  936. }
  937. /**
  938. * iwl_setup_rx_handlers - Initialize Rx handler callbacks
  939. *
  940. * Setup the RX handlers for each of the reply types sent from the uCode
  941. * to the host.
  942. */
  943. void iwl_setup_rx_handlers(struct iwl_priv *priv)
  944. {
  945. void (**handlers)(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
  946. handlers = priv->rx_handlers;
  947. handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
  948. handlers[REPLY_ERROR] = iwl_rx_reply_error;
  949. handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
  950. handlers[SPECTRUM_MEASURE_NOTIFICATION] = iwl_rx_spectrum_measure_notif;
  951. handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
  952. handlers[PM_DEBUG_STATISTIC_NOTIFIC] = iwl_rx_pm_debug_statistics_notif;
  953. handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
  954. /*
  955. * The same handler is used for both the REPLY to a discrete
  956. * statistics request from the host as well as for the periodic
  957. * statistics notifications (after received beacons) from the uCode.
  958. */
  959. handlers[REPLY_STATISTICS_CMD] = iwl_rx_reply_statistics;
  960. handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
  961. iwl_setup_rx_scan_handlers(priv);
  962. handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
  963. handlers[MISSED_BEACONS_NOTIFICATION] = iwl_rx_missed_beacon_notif;
  964. /* Rx handlers */
  965. handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy;
  966. handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx;
  967. /* block ack */
  968. handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
  969. /* Set up hardware specific Rx handlers */
  970. priv->cfg->ops->lib->rx_handler_setup(priv);
  971. }