ops.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691
  1. /******************************************************************************
  2. *
  3. * This file is provided under a dual BSD/GPLv2 license. When using or
  4. * redistributing this file, you may do so under either license.
  5. *
  6. * GPL LICENSE SUMMARY
  7. *
  8. * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of version 2 of the GNU General Public License as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  22. * USA
  23. *
  24. * The full GNU General Public License is included in this distribution
  25. * in the file called COPYING.
  26. *
  27. * Contact Information:
  28. * Intel Linux Wireless <ilw@linux.intel.com>
  29. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  30. *
  31. * BSD LICENSE
  32. *
  33. * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
  34. * All rights reserved.
  35. *
  36. * Redistribution and use in source and binary forms, with or without
  37. * modification, are permitted provided that the following conditions
  38. * are met:
  39. *
  40. * * Redistributions of source code must retain the above copyright
  41. * notice, this list of conditions and the following disclaimer.
  42. * * Redistributions in binary form must reproduce the above copyright
  43. * notice, this list of conditions and the following disclaimer in
  44. * the documentation and/or other materials provided with the
  45. * distribution.
  46. * * Neither the name Intel Corporation nor the names of its
  47. * contributors may be used to endorse or promote products derived
  48. * from this software without specific prior written permission.
  49. *
  50. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  51. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  52. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  53. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  54. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  55. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  56. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  57. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  58. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  59. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  60. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  61. *
  62. *****************************************************************************/
  63. #include <linux/module.h>
  64. #include <net/mac80211.h>
  65. #include "iwl-notif-wait.h"
  66. #include "iwl-trans.h"
  67. #include "iwl-op-mode.h"
  68. #include "iwl-fw.h"
  69. #include "iwl-debug.h"
  70. #include "iwl-drv.h"
  71. #include "iwl-modparams.h"
  72. #include "mvm.h"
  73. #include "iwl-phy-db.h"
  74. #include "iwl-eeprom-parse.h"
  75. #include "iwl-csr.h"
  76. #include "iwl-io.h"
  77. #include "iwl-prph.h"
  78. #include "rs.h"
  79. #include "fw-api-scan.h"
  80. #include "time-event.h"
  81. /*
  82. * module name, copyright, version, etc.
  83. */
  84. #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
  85. #define DRV_VERSION IWLWIFI_VERSION
  86. MODULE_DESCRIPTION(DRV_DESCRIPTION);
  87. MODULE_VERSION(DRV_VERSION);
  88. MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
  89. MODULE_LICENSE("GPL");
  90. static const struct iwl_op_mode_ops iwl_mvm_ops;
  91. struct iwl_mvm_mod_params iwlmvm_mod_params = {
  92. .power_scheme = IWL_POWER_SCHEME_BPS,
  93. /* rest of fields are 0 by default */
  94. };
  95. module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, S_IRUGO);
  96. MODULE_PARM_DESC(init_dbg,
  97. "set to true to debug an ASSERT in INIT fw (default: false");
  98. module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, S_IRUGO);
  99. MODULE_PARM_DESC(power_scheme,
  100. "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
  101. /*
  102. * module init and exit functions
  103. */
  104. static int __init iwl_mvm_init(void)
  105. {
  106. int ret;
  107. ret = iwl_mvm_rate_control_register();
  108. if (ret) {
  109. pr_err("Unable to register rate control algorithm: %d\n", ret);
  110. return ret;
  111. }
  112. ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
  113. if (ret) {
  114. pr_err("Unable to register MVM op_mode: %d\n", ret);
  115. iwl_mvm_rate_control_unregister();
  116. }
  117. return ret;
  118. }
  119. module_init(iwl_mvm_init);
  120. static void __exit iwl_mvm_exit(void)
  121. {
  122. iwl_opmode_deregister("iwlmvm");
  123. iwl_mvm_rate_control_unregister();
  124. }
  125. module_exit(iwl_mvm_exit);
  126. static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
  127. {
  128. struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
  129. u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
  130. u32 reg_val = 0;
  131. /*
  132. * We can't upload the correct value to the INIT image
  133. * as we don't have nvm_data by that time.
  134. *
  135. * TODO: Figure out what we should do here
  136. */
  137. if (mvm->nvm_data) {
  138. radio_cfg_type = mvm->nvm_data->radio_cfg_type;
  139. radio_cfg_step = mvm->nvm_data->radio_cfg_step;
  140. radio_cfg_dash = mvm->nvm_data->radio_cfg_dash;
  141. } else {
  142. radio_cfg_type = 0;
  143. radio_cfg_step = 0;
  144. radio_cfg_dash = 0;
  145. }
  146. /* SKU control */
  147. reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) <<
  148. CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
  149. reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) <<
  150. CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
  151. /* radio configuration */
  152. reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
  153. reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
  154. reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
  155. WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
  156. ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
  157. /* silicon bits */
  158. reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
  159. reg_val |= CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
  160. iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
  161. CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
  162. CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
  163. CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
  164. CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
  165. CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
  166. CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
  167. CSR_HW_IF_CONFIG_REG_BIT_MAC_SI,
  168. reg_val);
  169. IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
  170. radio_cfg_step, radio_cfg_dash);
  171. /*
  172. * W/A : NIC is stuck in a reset state after Early PCIe power off
  173. * (PCIe power is lost before PERST# is asserted), causing ME FW
  174. * to lose ownership and not being able to obtain it back.
  175. */
  176. iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
  177. APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
  178. ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
  179. }
  180. struct iwl_rx_handlers {
  181. u8 cmd_id;
  182. bool async;
  183. int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
  184. struct iwl_device_cmd *cmd);
  185. };
  186. #define RX_HANDLER(_cmd_id, _fn, _async) \
  187. { .cmd_id = _cmd_id , .fn = _fn , .async = _async }
  188. /*
  189. * Handlers for fw notifications
  190. * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
  191. * This list should be in order of frequency for performance purposes.
  192. *
  193. * The handler can be SYNC - this means that it will be called in the Rx path
  194. * which can't acquire mvm->mutex. If the handler needs to hold mvm->mutex (and
  195. * only in this case!), it should be set as ASYNC. In that case, it will be
  196. * called from a worker with mvm->mutex held.
  197. */
  198. static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
  199. RX_HANDLER(REPLY_RX_MPDU_CMD, iwl_mvm_rx_rx_mpdu, false),
  200. RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false),
  201. RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
  202. RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
  203. RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
  204. RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
  205. RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
  206. RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
  207. RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
  208. RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false),
  209. };
  210. #undef RX_HANDLER
  211. #define CMD(x) [x] = #x
  212. static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
  213. CMD(MVM_ALIVE),
  214. CMD(REPLY_ERROR),
  215. CMD(INIT_COMPLETE_NOTIF),
  216. CMD(PHY_CONTEXT_CMD),
  217. CMD(MGMT_MCAST_KEY),
  218. CMD(TX_CMD),
  219. CMD(TXPATH_FLUSH),
  220. CMD(MAC_CONTEXT_CMD),
  221. CMD(TIME_EVENT_CMD),
  222. CMD(TIME_EVENT_NOTIFICATION),
  223. CMD(BINDING_CONTEXT_CMD),
  224. CMD(TIME_QUOTA_CMD),
  225. CMD(RADIO_VERSION_NOTIFICATION),
  226. CMD(SCAN_REQUEST_CMD),
  227. CMD(SCAN_ABORT_CMD),
  228. CMD(SCAN_START_NOTIFICATION),
  229. CMD(SCAN_RESULTS_NOTIFICATION),
  230. CMD(SCAN_COMPLETE_NOTIFICATION),
  231. CMD(NVM_ACCESS_CMD),
  232. CMD(PHY_CONFIGURATION_CMD),
  233. CMD(CALIB_RES_NOTIF_PHY_DB),
  234. CMD(SET_CALIB_DEFAULT_CMD),
  235. CMD(CALIBRATION_COMPLETE_NOTIFICATION),
  236. CMD(ADD_STA),
  237. CMD(REMOVE_STA),
  238. CMD(LQ_CMD),
  239. CMD(SCAN_OFFLOAD_CONFIG_CMD),
  240. CMD(SCAN_OFFLOAD_REQUEST_CMD),
  241. CMD(SCAN_OFFLOAD_ABORT_CMD),
  242. CMD(SCAN_OFFLOAD_COMPLETE),
  243. CMD(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
  244. CMD(POWER_TABLE_CMD),
  245. CMD(WEP_KEY),
  246. CMD(REPLY_RX_PHY_CMD),
  247. CMD(REPLY_RX_MPDU_CMD),
  248. CMD(BEACON_TEMPLATE_CMD),
  249. CMD(STATISTICS_NOTIFICATION),
  250. CMD(TX_ANT_CONFIGURATION_CMD),
  251. CMD(D3_CONFIG_CMD),
  252. CMD(PROT_OFFLOAD_CONFIG_CMD),
  253. CMD(OFFLOADS_QUERY_CMD),
  254. CMD(REMOTE_WAKE_CONFIG_CMD),
  255. CMD(WOWLAN_PATTERNS),
  256. CMD(WOWLAN_CONFIGURATION),
  257. CMD(WOWLAN_TSC_RSC_PARAM),
  258. CMD(WOWLAN_TKIP_PARAM),
  259. CMD(WOWLAN_KEK_KCK_MATERIAL),
  260. CMD(WOWLAN_GET_STATUSES),
  261. CMD(WOWLAN_TX_POWER_PER_DB),
  262. CMD(NET_DETECT_CONFIG_CMD),
  263. CMD(NET_DETECT_PROFILES_QUERY_CMD),
  264. CMD(NET_DETECT_PROFILES_CMD),
  265. CMD(NET_DETECT_HOTSPOTS_CMD),
  266. CMD(NET_DETECT_HOTSPOTS_QUERY_CMD),
  267. CMD(CARD_STATE_NOTIFICATION),
  268. };
  269. #undef CMD
  270. /* this forward declaration can avoid to export the function */
  271. static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
  272. static struct iwl_op_mode *
  273. iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
  274. const struct iwl_fw *fw, struct dentry *dbgfs_dir)
  275. {
  276. struct ieee80211_hw *hw;
  277. struct iwl_op_mode *op_mode;
  278. struct iwl_mvm *mvm;
  279. struct iwl_trans_config trans_cfg = {};
  280. static const u8 no_reclaim_cmds[] = {
  281. TX_CMD,
  282. };
  283. int err, scan_size;
  284. switch (cfg->device_family) {
  285. case IWL_DEVICE_FAMILY_6030:
  286. case IWL_DEVICE_FAMILY_6005:
  287. case IWL_DEVICE_FAMILY_7000:
  288. break;
  289. default:
  290. IWL_ERR(trans, "Trying to load mvm on an unsupported device\n");
  291. return NULL;
  292. }
  293. /********************************
  294. * 1. Allocating and configuring HW data
  295. ********************************/
  296. hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
  297. sizeof(struct iwl_mvm),
  298. &iwl_mvm_hw_ops);
  299. if (!hw)
  300. return NULL;
  301. op_mode = hw->priv;
  302. op_mode->ops = &iwl_mvm_ops;
  303. op_mode->trans = trans;
  304. mvm = IWL_OP_MODE_GET_MVM(op_mode);
  305. mvm->dev = trans->dev;
  306. mvm->trans = trans;
  307. mvm->cfg = cfg;
  308. mvm->fw = fw;
  309. mvm->hw = hw;
  310. mutex_init(&mvm->mutex);
  311. spin_lock_init(&mvm->async_handlers_lock);
  312. INIT_LIST_HEAD(&mvm->time_event_list);
  313. INIT_LIST_HEAD(&mvm->async_handlers_list);
  314. spin_lock_init(&mvm->time_event_lock);
  315. INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
  316. INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
  317. INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
  318. SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
  319. /*
  320. * Populate the state variables that the transport layer needs
  321. * to know about.
  322. */
  323. trans_cfg.op_mode = op_mode;
  324. trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
  325. trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
  326. trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
  327. /* TODO: this should really be a TLV */
  328. if (cfg->device_family == IWL_DEVICE_FAMILY_7000)
  329. trans_cfg.bc_table_dword = true;
  330. if (!iwlwifi_mod_params.wd_disable)
  331. trans_cfg.queue_watchdog_timeout = cfg->base_params->wd_timeout;
  332. else
  333. trans_cfg.queue_watchdog_timeout = IWL_WATCHDOG_DISABLED;
  334. trans_cfg.command_names = iwl_mvm_cmd_strings;
  335. trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
  336. trans_cfg.cmd_fifo = IWL_MVM_CMD_FIFO;
  337. snprintf(mvm->hw->wiphy->fw_version,
  338. sizeof(mvm->hw->wiphy->fw_version),
  339. "%s", fw->fw_version);
  340. /* Configure transport layer */
  341. iwl_trans_configure(mvm->trans, &trans_cfg);
  342. trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
  343. trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
  344. /* set up notification wait support */
  345. iwl_notification_wait_init(&mvm->notif_wait);
  346. /* Init phy db */
  347. mvm->phy_db = iwl_phy_db_init(trans);
  348. if (!mvm->phy_db) {
  349. IWL_ERR(mvm, "Cannot init phy_db\n");
  350. goto out_free;
  351. }
  352. IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
  353. mvm->cfg->name, mvm->trans->hw_rev);
  354. err = iwl_trans_start_hw(mvm->trans);
  355. if (err)
  356. goto out_free;
  357. mutex_lock(&mvm->mutex);
  358. err = iwl_run_init_mvm_ucode(mvm, true);
  359. mutex_unlock(&mvm->mutex);
  360. if (err && !iwlmvm_mod_params.init_dbg) {
  361. IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
  362. goto out_free;
  363. }
  364. /* Stop the hw after the ALIVE and NVM has been read */
  365. if (!iwlmvm_mod_params.init_dbg)
  366. iwl_trans_stop_hw(mvm->trans, false);
  367. scan_size = sizeof(struct iwl_scan_cmd) +
  368. mvm->fw->ucode_capa.max_probe_length +
  369. (MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel));
  370. mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
  371. if (!mvm->scan_cmd)
  372. goto out_free;
  373. err = iwl_mvm_mac_setup_register(mvm);
  374. if (err)
  375. goto out_free;
  376. err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
  377. if (err)
  378. goto out_unregister;
  379. return op_mode;
  380. out_unregister:
  381. ieee80211_unregister_hw(mvm->hw);
  382. out_free:
  383. iwl_phy_db_free(mvm->phy_db);
  384. kfree(mvm->scan_cmd);
  385. kfree(mvm->eeprom_blob);
  386. iwl_trans_stop_hw(trans, true);
  387. ieee80211_free_hw(mvm->hw);
  388. return NULL;
  389. }
  390. static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
  391. {
  392. struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
  393. int i;
  394. iwl_mvm_leds_exit(mvm);
  395. ieee80211_unregister_hw(mvm->hw);
  396. kfree(mvm->scan_cmd);
  397. iwl_trans_stop_hw(mvm->trans, true);
  398. iwl_phy_db_free(mvm->phy_db);
  399. mvm->phy_db = NULL;
  400. kfree(mvm->eeprom_blob);
  401. iwl_free_nvm_data(mvm->nvm_data);
  402. for (i = 0; i < NVM_NUM_OF_SECTIONS; i++)
  403. kfree(mvm->nvm_sections[i].data);
  404. ieee80211_free_hw(mvm->hw);
  405. }
  406. struct iwl_async_handler_entry {
  407. struct list_head list;
  408. struct iwl_rx_cmd_buffer rxb;
  409. int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
  410. struct iwl_device_cmd *cmd);
  411. };
  412. void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
  413. {
  414. struct iwl_async_handler_entry *entry, *tmp;
  415. spin_lock_bh(&mvm->async_handlers_lock);
  416. list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
  417. iwl_free_rxb(&entry->rxb);
  418. list_del(&entry->list);
  419. kfree(entry);
  420. }
  421. spin_unlock_bh(&mvm->async_handlers_lock);
  422. }
  423. static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
  424. {
  425. struct iwl_mvm *mvm =
  426. container_of(wk, struct iwl_mvm, async_handlers_wk);
  427. struct iwl_async_handler_entry *entry, *tmp;
  428. struct list_head local_list;
  429. INIT_LIST_HEAD(&local_list);
  430. /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
  431. mutex_lock(&mvm->mutex);
  432. /*
  433. * Sync with Rx path with a lock. Remove all the entries from this list,
  434. * add them to a local one (lock free), and then handle them.
  435. */
  436. spin_lock_bh(&mvm->async_handlers_lock);
  437. list_splice_init(&mvm->async_handlers_list, &local_list);
  438. spin_unlock_bh(&mvm->async_handlers_lock);
  439. list_for_each_entry_safe(entry, tmp, &local_list, list) {
  440. if (entry->fn(mvm, &entry->rxb, NULL))
  441. IWL_WARN(mvm,
  442. "returned value from ASYNC handlers are ignored\n");
  443. iwl_free_rxb(&entry->rxb);
  444. list_del(&entry->list);
  445. kfree(entry);
  446. }
  447. mutex_unlock(&mvm->mutex);
  448. }
  449. static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
  450. struct iwl_rx_cmd_buffer *rxb,
  451. struct iwl_device_cmd *cmd)
  452. {
  453. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  454. struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
  455. u8 i;
  456. /*
  457. * Do the notification wait before RX handlers so
  458. * even if the RX handler consumes the RXB we have
  459. * access to it in the notification wait entry.
  460. */
  461. iwl_notification_wait_notify(&mvm->notif_wait, pkt);
  462. for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
  463. const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
  464. struct iwl_async_handler_entry *entry;
  465. if (rx_h->cmd_id != pkt->hdr.cmd)
  466. continue;
  467. if (!rx_h->async)
  468. return rx_h->fn(mvm, rxb, cmd);
  469. entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
  470. /* we can't do much... */
  471. if (!entry)
  472. return 0;
  473. entry->rxb._page = rxb_steal_page(rxb);
  474. entry->rxb._offset = rxb->_offset;
  475. entry->rxb._rx_page_order = rxb->_rx_page_order;
  476. entry->fn = rx_h->fn;
  477. spin_lock(&mvm->async_handlers_lock);
  478. list_add_tail(&entry->list, &mvm->async_handlers_list);
  479. spin_unlock(&mvm->async_handlers_lock);
  480. schedule_work(&mvm->async_handlers_wk);
  481. break;
  482. }
  483. return 0;
  484. }
  485. static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
  486. {
  487. struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
  488. int mq = mvm->queue_to_mac80211[queue];
  489. if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
  490. return;
  491. if (atomic_inc_return(&mvm->queue_stop_count[mq]) > 1) {
  492. IWL_DEBUG_TX_QUEUES(mvm,
  493. "queue %d (mac80211 %d) already stopped\n",
  494. queue, mq);
  495. return;
  496. }
  497. set_bit(mq, &mvm->transport_queue_stop);
  498. ieee80211_stop_queue(mvm->hw, mq);
  499. }
  500. static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
  501. {
  502. struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
  503. int mq = mvm->queue_to_mac80211[queue];
  504. if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
  505. return;
  506. if (atomic_dec_return(&mvm->queue_stop_count[mq]) > 0) {
  507. IWL_DEBUG_TX_QUEUES(mvm,
  508. "queue %d (mac80211 %d) already awake\n",
  509. queue, mq);
  510. return;
  511. }
  512. clear_bit(mq, &mvm->transport_queue_stop);
  513. ieee80211_wake_queue(mvm->hw, mq);
  514. }
  515. static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
  516. {
  517. struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
  518. if (state)
  519. set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
  520. else
  521. clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
  522. wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
  523. }
  524. static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
  525. {
  526. struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
  527. struct ieee80211_tx_info *info;
  528. info = IEEE80211_SKB_CB(skb);
  529. iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
  530. ieee80211_free_txskb(mvm->hw, skb);
  531. }
  532. static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
  533. {
  534. iwl_abort_notification_waits(&mvm->notif_wait);
  535. /*
  536. * If we're restarting already, don't cycle restarts.
  537. * If INIT fw asserted, it will likely fail again.
  538. * If WoWLAN fw asserted, don't restart either, mac80211
  539. * can't recover this since we're already half suspended.
  540. */
  541. if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
  542. IWL_ERR(mvm, "Firmware error during reconfiguration! Abort.\n");
  543. } else if (mvm->cur_ucode == IWL_UCODE_REGULAR &&
  544. iwlwifi_mod_params.restart_fw) {
  545. /*
  546. * This is a bit racy, but worst case we tell mac80211 about
  547. * a stopped/aborted (sched) scan when that was already done
  548. * which is not a problem. It is necessary to abort any scan
  549. * here because mac80211 requires having the scan cleared
  550. * before restarting.
  551. * We'll reset the scan_status to NONE in restart cleanup in
  552. * the next start() call from mac80211.
  553. */
  554. switch (mvm->scan_status) {
  555. case IWL_MVM_SCAN_NONE:
  556. break;
  557. case IWL_MVM_SCAN_OS:
  558. ieee80211_scan_completed(mvm->hw, true);
  559. break;
  560. }
  561. ieee80211_restart_hw(mvm->hw);
  562. }
  563. }
  564. static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
  565. {
  566. struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
  567. iwl_mvm_dump_nic_error_log(mvm);
  568. iwl_mvm_nic_restart(mvm);
  569. }
  570. static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
  571. {
  572. struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
  573. WARN_ON(1);
  574. iwl_mvm_nic_restart(mvm);
  575. }
  576. static const struct iwl_op_mode_ops iwl_mvm_ops = {
  577. .start = iwl_op_mode_mvm_start,
  578. .stop = iwl_op_mode_mvm_stop,
  579. .rx = iwl_mvm_rx_dispatch,
  580. .queue_full = iwl_mvm_stop_sw_queue,
  581. .queue_not_full = iwl_mvm_wake_sw_queue,
  582. .hw_rf_kill = iwl_mvm_set_hw_rfkill_state,
  583. .free_skb = iwl_mvm_free_skb,
  584. .nic_error = iwl_mvm_nic_error,
  585. .cmd_queue_full = iwl_mvm_cmd_queue_full,
  586. .nic_config = iwl_mvm_nic_config,
  587. };