ops.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of version 2 of the GNU General Public License as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  22. * USA
  23. *
  24. * The full GNU General Public License is included in this distribution
  25. * in the file called LICENSE.GPL.
  26. *
  27. * Contact Information:
  28. * Intel Linux Wireless <ilw@linux.intel.com>
  29. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  30. *
  31. * BSD LICENSE
  32. *
  33. * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
  34. * All rights reserved.
  35. *
  36. * Redistribution and use in source and binary forms, with or without
  37. * modification, are permitted provided that the following conditions
  38. * are met:
  39. *
  40. * * Redistributions of source code must retain the above copyright
  41. * notice, this list of conditions and the following disclaimer.
  42. * * Redistributions in binary form must reproduce the above copyright
  43. * notice, this list of conditions and the following disclaimer in
  44. * the documentation and/or other materials provided with the
  45. * distribution.
  46. * * Neither the name Intel Corporation nor the names of its
  47. * contributors may be used to endorse or promote products derived
  48. * from this software without specific prior written permission.
  49. *
  50. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  51. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  52. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  53. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  54. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  55. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  56. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  57. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  58. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  59. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  60. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  61. *
  62. *****************************************************************************/
  63. #include <linux/module.h>
  64. #include <net/mac80211.h>
  65. #include "iwl-notif-wait.h"
  66. #include "iwl-trans.h"
  67. #include "iwl-op-mode.h"
  68. #include "iwl-fw.h"
  69. #include "iwl-debug.h"
  70. #include "iwl-drv.h"
  71. #include "iwl-modparams.h"
  72. #include "mvm.h"
  73. #include "iwl-phy-db.h"
  74. #include "iwl-eeprom-parse.h"
  75. #include "iwl-csr.h"
  76. #include "iwl-io.h"
  77. #include "iwl-prph.h"
  78. #include "rs.h"
  79. #include "fw-api-scan.h"
  80. #include "time-event.h"
  81. /*
  82. * module name, copyright, version, etc.
  83. */
  84. #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
  85. #define DRV_VERSION IWLWIFI_VERSION
  86. MODULE_DESCRIPTION(DRV_DESCRIPTION);
  87. MODULE_VERSION(DRV_VERSION);
  88. MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
  89. MODULE_LICENSE("GPL");
  90. static const struct iwl_op_mode_ops iwl_mvm_ops;
  91. struct iwl_mvm_mod_params iwlmvm_mod_params = {
  92. .power_scheme = IWL_POWER_SCHEME_BPS,
  93. /* rest of fields are 0 by default */
  94. };
  95. module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, S_IRUGO);
  96. MODULE_PARM_DESC(init_dbg,
  97. "set to true to debug an ASSERT in INIT fw (default: false");
  98. module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, S_IRUGO);
  99. MODULE_PARM_DESC(power_scheme,
  100. "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
  101. /*
  102. * module init and exit functions
  103. */
  104. static int __init iwl_mvm_init(void)
  105. {
  106. int ret;
  107. ret = iwl_mvm_rate_control_register();
  108. if (ret) {
  109. pr_err("Unable to register rate control algorithm: %d\n", ret);
  110. return ret;
  111. }
  112. ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
  113. if (ret) {
  114. pr_err("Unable to register MVM op_mode: %d\n", ret);
  115. iwl_mvm_rate_control_unregister();
  116. }
  117. return ret;
  118. }
  119. module_init(iwl_mvm_init);
  120. static void __exit iwl_mvm_exit(void)
  121. {
  122. iwl_opmode_deregister("iwlmvm");
  123. iwl_mvm_rate_control_unregister();
  124. }
  125. module_exit(iwl_mvm_exit);
  126. static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
  127. {
  128. struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
  129. u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
  130. u32 reg_val = 0;
  131. /*
  132. * We can't upload the correct value to the INIT image
  133. * as we don't have nvm_data by that time.
  134. *
  135. * TODO: Figure out what we should do here
  136. */
  137. if (mvm->nvm_data) {
  138. radio_cfg_type = mvm->nvm_data->radio_cfg_type;
  139. radio_cfg_step = mvm->nvm_data->radio_cfg_step;
  140. radio_cfg_dash = mvm->nvm_data->radio_cfg_dash;
  141. } else {
  142. radio_cfg_type = 0;
  143. radio_cfg_step = 0;
  144. radio_cfg_dash = 0;
  145. }
  146. /* SKU control */
  147. reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) <<
  148. CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
  149. reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) <<
  150. CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
  151. /* radio configuration */
  152. reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
  153. reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
  154. reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
  155. WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
  156. ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
  157. /* silicon bits */
  158. reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
  159. reg_val |= CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
  160. iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
  161. CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
  162. CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
  163. CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
  164. CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
  165. CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
  166. CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
  167. CSR_HW_IF_CONFIG_REG_BIT_MAC_SI,
  168. reg_val);
  169. IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
  170. radio_cfg_step, radio_cfg_dash);
  171. /*
  172. * W/A : NIC is stuck in a reset state after Early PCIe power off
  173. * (PCIe power is lost before PERST# is asserted), causing ME FW
  174. * to lose ownership and not being able to obtain it back.
  175. */
  176. iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
  177. APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
  178. ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
  179. }
  180. struct iwl_rx_handlers {
  181. u8 cmd_id;
  182. bool async;
  183. int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
  184. struct iwl_device_cmd *cmd);
  185. };
  186. #define RX_HANDLER(_cmd_id, _fn, _async) \
  187. { .cmd_id = _cmd_id , .fn = _fn , .async = _async }
  188. /*
  189. * Handlers for fw notifications
  190. * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
  191. * This list should be in order of frequency for performance purposes.
  192. *
  193. * The handler can be SYNC - this means that it will be called in the Rx path
  194. * which can't acquire mvm->mutex. If the handler needs to hold mvm->mutex (and
  195. * only in this case!), it should be set as ASYNC. In that case, it will be
  196. * called from a worker with mvm->mutex held.
  197. */
  198. static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
  199. RX_HANDLER(REPLY_RX_MPDU_CMD, iwl_mvm_rx_rx_mpdu, false),
  200. RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false),
  201. RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
  202. RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
  203. RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
  204. RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
  205. RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
  206. RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
  207. RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
  208. RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false),
  209. };
  210. #undef RX_HANDLER
  211. #define CMD(x) [x] = #x
  212. static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
  213. CMD(MVM_ALIVE),
  214. CMD(REPLY_ERROR),
  215. CMD(INIT_COMPLETE_NOTIF),
  216. CMD(PHY_CONTEXT_CMD),
  217. CMD(MGMT_MCAST_KEY),
  218. CMD(TX_CMD),
  219. CMD(TXPATH_FLUSH),
  220. CMD(MAC_CONTEXT_CMD),
  221. CMD(TIME_EVENT_CMD),
  222. CMD(TIME_EVENT_NOTIFICATION),
  223. CMD(BINDING_CONTEXT_CMD),
  224. CMD(TIME_QUOTA_CMD),
  225. CMD(RADIO_VERSION_NOTIFICATION),
  226. CMD(SCAN_REQUEST_CMD),
  227. CMD(SCAN_ABORT_CMD),
  228. CMD(SCAN_START_NOTIFICATION),
  229. CMD(SCAN_RESULTS_NOTIFICATION),
  230. CMD(SCAN_COMPLETE_NOTIFICATION),
  231. CMD(NVM_ACCESS_CMD),
  232. CMD(PHY_CONFIGURATION_CMD),
  233. CMD(CALIB_RES_NOTIF_PHY_DB),
  234. CMD(SET_CALIB_DEFAULT_CMD),
  235. CMD(CALIBRATION_COMPLETE_NOTIFICATION),
  236. CMD(ADD_STA),
  237. CMD(REMOVE_STA),
  238. CMD(LQ_CMD),
  239. CMD(SCAN_OFFLOAD_CONFIG_CMD),
  240. CMD(SCAN_OFFLOAD_REQUEST_CMD),
  241. CMD(SCAN_OFFLOAD_ABORT_CMD),
  242. CMD(SCAN_OFFLOAD_COMPLETE),
  243. CMD(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
  244. CMD(POWER_TABLE_CMD),
  245. CMD(WEP_KEY),
  246. CMD(REPLY_RX_PHY_CMD),
  247. CMD(REPLY_RX_MPDU_CMD),
  248. CMD(BEACON_TEMPLATE_CMD),
  249. CMD(STATISTICS_NOTIFICATION),
  250. CMD(TX_ANT_CONFIGURATION_CMD),
  251. CMD(D3_CONFIG_CMD),
  252. CMD(PROT_OFFLOAD_CONFIG_CMD),
  253. CMD(OFFLOADS_QUERY_CMD),
  254. CMD(REMOTE_WAKE_CONFIG_CMD),
  255. CMD(WOWLAN_PATTERNS),
  256. CMD(WOWLAN_CONFIGURATION),
  257. CMD(WOWLAN_TSC_RSC_PARAM),
  258. CMD(WOWLAN_TKIP_PARAM),
  259. CMD(WOWLAN_KEK_KCK_MATERIAL),
  260. CMD(WOWLAN_GET_STATUSES),
  261. CMD(WOWLAN_TX_POWER_PER_DB),
  262. CMD(NET_DETECT_CONFIG_CMD),
  263. CMD(NET_DETECT_PROFILES_QUERY_CMD),
  264. CMD(NET_DETECT_PROFILES_CMD),
  265. CMD(NET_DETECT_HOTSPOTS_CMD),
  266. CMD(NET_DETECT_HOTSPOTS_QUERY_CMD),
  267. };
  268. #undef CMD
  269. /* this forward declaration can avoid to export the function */
  270. static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
  271. static struct iwl_op_mode *
  272. iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
  273. const struct iwl_fw *fw, struct dentry *dbgfs_dir)
  274. {
  275. struct ieee80211_hw *hw;
  276. struct iwl_op_mode *op_mode;
  277. struct iwl_mvm *mvm;
  278. struct iwl_trans_config trans_cfg = {};
  279. static const u8 no_reclaim_cmds[] = {
  280. TX_CMD,
  281. };
  282. int err, scan_size;
  283. switch (cfg->device_family) {
  284. case IWL_DEVICE_FAMILY_6030:
  285. case IWL_DEVICE_FAMILY_6005:
  286. case IWL_DEVICE_FAMILY_7000:
  287. break;
  288. default:
  289. IWL_ERR(trans, "Trying to load mvm on an unsupported device\n");
  290. return NULL;
  291. }
  292. /********************************
  293. * 1. Allocating and configuring HW data
  294. ********************************/
  295. hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
  296. sizeof(struct iwl_mvm),
  297. &iwl_mvm_hw_ops);
  298. if (!hw)
  299. return NULL;
  300. op_mode = hw->priv;
  301. op_mode->ops = &iwl_mvm_ops;
  302. op_mode->trans = trans;
  303. mvm = IWL_OP_MODE_GET_MVM(op_mode);
  304. mvm->dev = trans->dev;
  305. mvm->trans = trans;
  306. mvm->cfg = cfg;
  307. mvm->fw = fw;
  308. mvm->hw = hw;
  309. mutex_init(&mvm->mutex);
  310. spin_lock_init(&mvm->async_handlers_lock);
  311. INIT_LIST_HEAD(&mvm->time_event_list);
  312. INIT_LIST_HEAD(&mvm->async_handlers_list);
  313. spin_lock_init(&mvm->time_event_lock);
  314. INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
  315. INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
  316. INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
  317. SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
  318. /*
  319. * Populate the state variables that the transport layer needs
  320. * to know about.
  321. */
  322. trans_cfg.op_mode = op_mode;
  323. trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
  324. trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
  325. trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
  326. /* TODO: this should really be a TLV */
  327. if (cfg->device_family == IWL_DEVICE_FAMILY_7000)
  328. trans_cfg.bc_table_dword = true;
  329. if (!iwlwifi_mod_params.wd_disable)
  330. trans_cfg.queue_watchdog_timeout = cfg->base_params->wd_timeout;
  331. else
  332. trans_cfg.queue_watchdog_timeout = IWL_WATCHDOG_DISABLED;
  333. trans_cfg.command_names = iwl_mvm_cmd_strings;
  334. trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
  335. trans_cfg.cmd_fifo = IWL_MVM_CMD_FIFO;
  336. snprintf(mvm->hw->wiphy->fw_version,
  337. sizeof(mvm->hw->wiphy->fw_version),
  338. "%s", fw->fw_version);
  339. /* Configure transport layer */
  340. iwl_trans_configure(mvm->trans, &trans_cfg);
  341. trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
  342. trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
  343. /* set up notification wait support */
  344. iwl_notification_wait_init(&mvm->notif_wait);
  345. /* Init phy db */
  346. mvm->phy_db = iwl_phy_db_init(trans);
  347. if (!mvm->phy_db) {
  348. IWL_ERR(mvm, "Cannot init phy_db\n");
  349. goto out_free;
  350. }
  351. IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
  352. mvm->cfg->name, mvm->trans->hw_rev);
  353. err = iwl_trans_start_hw(mvm->trans);
  354. if (err)
  355. goto out_free;
  356. mutex_lock(&mvm->mutex);
  357. err = iwl_run_init_mvm_ucode(mvm, true);
  358. mutex_unlock(&mvm->mutex);
  359. if (err && !iwlmvm_mod_params.init_dbg) {
  360. IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
  361. goto out_free;
  362. }
  363. /* Stop the hw after the ALIVE and NVM has been read */
  364. if (!iwlmvm_mod_params.init_dbg)
  365. iwl_trans_stop_hw(mvm->trans, false);
  366. scan_size = sizeof(struct iwl_scan_cmd) +
  367. mvm->fw->ucode_capa.max_probe_length +
  368. (MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel));
  369. mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
  370. if (!mvm->scan_cmd)
  371. goto out_free;
  372. err = iwl_mvm_mac_setup_register(mvm);
  373. if (err)
  374. goto out_free;
  375. err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
  376. if (err)
  377. goto out_unregister;
  378. return op_mode;
  379. out_unregister:
  380. ieee80211_unregister_hw(mvm->hw);
  381. out_free:
  382. iwl_phy_db_free(mvm->phy_db);
  383. kfree(mvm->scan_cmd);
  384. kfree(mvm->eeprom_blob);
  385. iwl_trans_stop_hw(trans, true);
  386. ieee80211_free_hw(mvm->hw);
  387. return NULL;
  388. }
  389. static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
  390. {
  391. struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
  392. int i;
  393. iwl_mvm_leds_exit(mvm);
  394. ieee80211_unregister_hw(mvm->hw);
  395. kfree(mvm->scan_cmd);
  396. iwl_trans_stop_hw(mvm->trans, true);
  397. iwl_phy_db_free(mvm->phy_db);
  398. mvm->phy_db = NULL;
  399. kfree(mvm->eeprom_blob);
  400. iwl_free_nvm_data(mvm->nvm_data);
  401. for (i = 0; i < NVM_NUM_OF_SECTIONS; i++)
  402. kfree(mvm->nvm_sections[i].data);
  403. ieee80211_free_hw(mvm->hw);
  404. }
  405. struct iwl_async_handler_entry {
  406. struct list_head list;
  407. struct iwl_rx_cmd_buffer rxb;
  408. int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
  409. struct iwl_device_cmd *cmd);
  410. };
  411. void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
  412. {
  413. struct iwl_async_handler_entry *entry, *tmp;
  414. spin_lock_bh(&mvm->async_handlers_lock);
  415. list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
  416. iwl_free_rxb(&entry->rxb);
  417. list_del(&entry->list);
  418. kfree(entry);
  419. }
  420. spin_unlock_bh(&mvm->async_handlers_lock);
  421. }
  422. static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
  423. {
  424. struct iwl_mvm *mvm =
  425. container_of(wk, struct iwl_mvm, async_handlers_wk);
  426. struct iwl_async_handler_entry *entry, *tmp;
  427. struct list_head local_list;
  428. INIT_LIST_HEAD(&local_list);
  429. /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
  430. mutex_lock(&mvm->mutex);
  431. /*
  432. * Sync with Rx path with a lock. Remove all the entries from this list,
  433. * add them to a local one (lock free), and then handle them.
  434. */
  435. spin_lock_bh(&mvm->async_handlers_lock);
  436. list_splice_init(&mvm->async_handlers_list, &local_list);
  437. spin_unlock_bh(&mvm->async_handlers_lock);
  438. list_for_each_entry_safe(entry, tmp, &local_list, list) {
  439. if (entry->fn(mvm, &entry->rxb, NULL))
  440. IWL_WARN(mvm,
  441. "returned value from ASYNC handlers are ignored\n");
  442. iwl_free_rxb(&entry->rxb);
  443. list_del(&entry->list);
  444. kfree(entry);
  445. }
  446. mutex_unlock(&mvm->mutex);
  447. }
  448. static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
  449. struct iwl_rx_cmd_buffer *rxb,
  450. struct iwl_device_cmd *cmd)
  451. {
  452. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  453. struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
  454. u8 i;
  455. /*
  456. * Do the notification wait before RX handlers so
  457. * even if the RX handler consumes the RXB we have
  458. * access to it in the notification wait entry.
  459. */
  460. iwl_notification_wait_notify(&mvm->notif_wait, pkt);
  461. for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
  462. const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
  463. struct iwl_async_handler_entry *entry;
  464. if (rx_h->cmd_id != pkt->hdr.cmd)
  465. continue;
  466. if (!rx_h->async)
  467. return rx_h->fn(mvm, rxb, cmd);
  468. entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
  469. /* we can't do much... */
  470. if (!entry)
  471. return 0;
  472. entry->rxb._page = rxb_steal_page(rxb);
  473. entry->rxb._offset = rxb->_offset;
  474. entry->rxb._rx_page_order = rxb->_rx_page_order;
  475. entry->fn = rx_h->fn;
  476. spin_lock(&mvm->async_handlers_lock);
  477. list_add_tail(&entry->list, &mvm->async_handlers_list);
  478. spin_unlock(&mvm->async_handlers_lock);
  479. schedule_work(&mvm->async_handlers_wk);
  480. break;
  481. }
  482. return 0;
  483. }
  484. static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
  485. {
  486. struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
  487. int mq = mvm->queue_to_mac80211[queue];
  488. if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
  489. return;
  490. if (atomic_inc_return(&mvm->queue_stop_count[mq]) > 1) {
  491. IWL_DEBUG_TX_QUEUES(mvm,
  492. "queue %d (mac80211 %d) already stopped\n",
  493. queue, mq);
  494. return;
  495. }
  496. set_bit(mq, &mvm->transport_queue_stop);
  497. ieee80211_stop_queue(mvm->hw, mq);
  498. }
  499. static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
  500. {
  501. struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
  502. int mq = mvm->queue_to_mac80211[queue];
  503. if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
  504. return;
  505. if (atomic_dec_return(&mvm->queue_stop_count[mq]) > 0) {
  506. IWL_DEBUG_TX_QUEUES(mvm,
  507. "queue %d (mac80211 %d) already awake\n",
  508. queue, mq);
  509. return;
  510. }
  511. clear_bit(mq, &mvm->transport_queue_stop);
  512. ieee80211_wake_queue(mvm->hw, mq);
  513. }
  514. static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
  515. {
  516. struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
  517. if (state)
  518. set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
  519. else
  520. clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
  521. wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
  522. }
  523. static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
  524. {
  525. struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
  526. struct ieee80211_tx_info *info;
  527. info = IEEE80211_SKB_CB(skb);
  528. iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
  529. ieee80211_free_txskb(mvm->hw, skb);
  530. }
  531. static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
  532. {
  533. iwl_abort_notification_waits(&mvm->notif_wait);
  534. /*
  535. * If we're restarting already, don't cycle restarts.
  536. * If INIT fw asserted, it will likely fail again.
  537. * If WoWLAN fw asserted, don't restart either, mac80211
  538. * can't recover this since we're already half suspended.
  539. */
  540. if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
  541. IWL_ERR(mvm, "Firmware error during reconfiguration! Abort.\n");
  542. } else if (mvm->cur_ucode == IWL_UCODE_REGULAR &&
  543. iwlwifi_mod_params.restart_fw) {
  544. /*
  545. * This is a bit racy, but worst case we tell mac80211 about
  546. * a stopped/aborted (sched) scan when that was already done
  547. * which is not a problem. It is necessary to abort any scan
  548. * here because mac80211 requires having the scan cleared
  549. * before restarting.
  550. * We'll reset the scan_status to NONE in restart cleanup in
  551. * the next start() call from mac80211.
  552. */
  553. switch (mvm->scan_status) {
  554. case IWL_MVM_SCAN_NONE:
  555. break;
  556. case IWL_MVM_SCAN_OS:
  557. ieee80211_scan_completed(mvm->hw, true);
  558. break;
  559. }
  560. ieee80211_restart_hw(mvm->hw);
  561. }
  562. }
  563. static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
  564. {
  565. struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
  566. iwl_mvm_dump_nic_error_log(mvm);
  567. iwl_mvm_nic_restart(mvm);
  568. }
  569. static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
  570. {
  571. struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
  572. WARN_ON(1);
  573. iwl_mvm_nic_restart(mvm);
  574. }
  575. static const struct iwl_op_mode_ops iwl_mvm_ops = {
  576. .start = iwl_op_mode_mvm_start,
  577. .stop = iwl_op_mode_mvm_stop,
  578. .rx = iwl_mvm_rx_dispatch,
  579. .queue_full = iwl_mvm_stop_sw_queue,
  580. .queue_not_full = iwl_mvm_wake_sw_queue,
  581. .hw_rf_kill = iwl_mvm_set_hw_rfkill_state,
  582. .free_skb = iwl_mvm_free_skb,
  583. .nic_error = iwl_mvm_nic_error,
  584. .cmd_queue_full = iwl_mvm_cmd_queue_full,
  585. .nic_config = iwl_mvm_nic_config,
  586. };