iwl-power.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
  4. *
  5. * Portions of this file are derived from the ipw3945 project, as well
  6. * as portions of the ieee80211 subsystem header files.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of version 2 of the GNU General Public License as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc.,
  19. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  20. *
  21. * The full GNU General Public License is included in this distribution in the
  22. * file called LICENSE.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *****************************************************************************/
  28. #include <linux/kernel.h>
  29. #include <linux/module.h>
  30. #include <linux/init.h>
  31. #include <net/mac80211.h>
  32. #include "iwl-eeprom.h"
  33. #include "iwl-dev.h"
  34. #include "iwl-core.h"
  35. #include "iwl-io.h"
  36. #include "iwl-commands.h"
  37. #include "iwl-debug.h"
  38. #include "iwl-power.h"
  39. /*
  40. * Setting power level allows the card to go to sleep when not busy.
  41. *
  42. * We calculate a sleep command based on the required latency, which
  43. * we get from mac80211. In order to handle thermal throttling, we can
  44. * also use pre-defined power levels.
  45. */
  46. /*
  47. * For now, keep using power level 1 instead of automatically
  48. * adjusting ...
  49. */
  50. bool no_sleep_autoadjust = true;
  51. module_param(no_sleep_autoadjust, bool, S_IRUGO);
  52. MODULE_PARM_DESC(no_sleep_autoadjust,
  53. "don't automatically adjust sleep level "
  54. "according to maximum network latency");
  55. /*
  56. * This defines the old power levels. They are still used by default
  57. * (level 1) and for thermal throttle (levels 3 through 5)
  58. */
  59. struct iwl_power_vec_entry {
  60. struct iwl_powertable_cmd cmd;
  61. u8 no_dtim; /* number of skip dtim */
  62. };
  63. #define IWL_DTIM_RANGE_0_MAX 2
  64. #define IWL_DTIM_RANGE_1_MAX 10
  65. #define NOSLP cpu_to_le16(0), 0, 0
  66. #define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
  67. #define TU_TO_USEC 1024
  68. #define SLP_TOUT(T) cpu_to_le32((T) * TU_TO_USEC)
  69. #define SLP_VEC(X0, X1, X2, X3, X4) {cpu_to_le32(X0), \
  70. cpu_to_le32(X1), \
  71. cpu_to_le32(X2), \
  72. cpu_to_le32(X3), \
  73. cpu_to_le32(X4)}
  74. /* default power management (not Tx power) table values */
  75. /* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */
  76. /* DTIM 0 - 2 */
  77. static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = {
  78. {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 1, 2, 2, 0xFF)}, 0},
  79. {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
  80. {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0},
  81. {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1},
  82. {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 2, 4, 6, 0xFF)}, 2}
  83. };
  84. /* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
  85. /* DTIM 3 - 10 */
  86. static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = {
  87. {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
  88. {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0},
  89. {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0},
  90. {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1},
  91. {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 6, 10, 10)}, 2}
  92. };
  93. /* for DTIM period > IWL_DTIM_RANGE_1_MAX */
  94. /* DTIM 11 - */
  95. static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = {
  96. {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
  97. {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
  98. {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
  99. {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
  100. {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
  101. };
  102. static void iwl_static_sleep_cmd(struct iwl_priv *priv,
  103. struct iwl_powertable_cmd *cmd,
  104. enum iwl_power_level lvl, int period)
  105. {
  106. const struct iwl_power_vec_entry *table;
  107. int max_sleep[IWL_POWER_VEC_SIZE] = { 0 };
  108. int i;
  109. u8 skip;
  110. u32 slp_itrvl;
  111. table = range_2;
  112. if (period <= IWL_DTIM_RANGE_1_MAX)
  113. table = range_1;
  114. if (period <= IWL_DTIM_RANGE_0_MAX)
  115. table = range_0;
  116. BUG_ON(lvl < 0 || lvl >= IWL_POWER_NUM);
  117. *cmd = table[lvl].cmd;
  118. if (period == 0) {
  119. skip = 0;
  120. period = 1;
  121. for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
  122. max_sleep[i] = 1;
  123. } else {
  124. skip = table[lvl].no_dtim;
  125. for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
  126. max_sleep[i] = le32_to_cpu(cmd->sleep_interval[i]);
  127. max_sleep[IWL_POWER_VEC_SIZE - 1] = skip + 1;
  128. }
  129. slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
  130. /* figure out the listen interval based on dtim period and skip */
  131. if (slp_itrvl == 0xFF)
  132. cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
  133. cpu_to_le32(period * (skip + 1));
  134. slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
  135. if (slp_itrvl > period)
  136. cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
  137. cpu_to_le32((slp_itrvl / period) * period);
  138. if (skip)
  139. cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
  140. else
  141. cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
  142. slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
  143. if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL)
  144. cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
  145. cpu_to_le32(IWL_CONN_MAX_LISTEN_INTERVAL);
  146. /* enforce max sleep interval */
  147. for (i = IWL_POWER_VEC_SIZE - 1; i >= 0 ; i--) {
  148. if (le32_to_cpu(cmd->sleep_interval[i]) >
  149. (max_sleep[i] * period))
  150. cmd->sleep_interval[i] =
  151. cpu_to_le32(max_sleep[i] * period);
  152. if (i != (IWL_POWER_VEC_SIZE - 1)) {
  153. if (le32_to_cpu(cmd->sleep_interval[i]) >
  154. le32_to_cpu(cmd->sleep_interval[i+1]))
  155. cmd->sleep_interval[i] =
  156. cmd->sleep_interval[i+1];
  157. }
  158. }
  159. if (priv->power_data.pci_pm)
  160. cmd->flags |= IWL_POWER_PCI_PM_MSK;
  161. else
  162. cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
  163. IWL_DEBUG_POWER(priv, "numSkipDtim = %u, dtimPeriod = %d\n",
  164. skip, period);
  165. IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1);
  166. }
  167. /* default Thermal Throttling transaction table
  168. * Current state | Throttling Down | Throttling Up
  169. *=============================================================================
  170. * Condition Nxt State Condition Nxt State Condition Nxt State
  171. *-----------------------------------------------------------------------------
  172. * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
  173. * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
  174. * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
  175. * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
  176. *=============================================================================
  177. */
  178. static const struct iwl_tt_trans tt_range_0[IWL_TI_STATE_MAX - 1] = {
  179. {IWL_TI_0, IWL_ABSOLUTE_ZERO, 104},
  180. {IWL_TI_1, 105, CT_KILL_THRESHOLD - 1},
  181. {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
  182. };
  183. static const struct iwl_tt_trans tt_range_1[IWL_TI_STATE_MAX - 1] = {
  184. {IWL_TI_0, IWL_ABSOLUTE_ZERO, 95},
  185. {IWL_TI_2, 110, CT_KILL_THRESHOLD - 1},
  186. {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
  187. };
  188. static const struct iwl_tt_trans tt_range_2[IWL_TI_STATE_MAX - 1] = {
  189. {IWL_TI_1, IWL_ABSOLUTE_ZERO, 100},
  190. {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX},
  191. {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
  192. };
  193. static const struct iwl_tt_trans tt_range_3[IWL_TI_STATE_MAX - 1] = {
  194. {IWL_TI_0, IWL_ABSOLUTE_ZERO, CT_KILL_EXIT_THRESHOLD},
  195. {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX},
  196. {IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX}
  197. };
  198. /* Advance Thermal Throttling default restriction table */
  199. static const struct iwl_tt_restriction restriction_range[IWL_TI_STATE_MAX] = {
  200. {IWL_ANT_OK_MULTI, IWL_ANT_OK_MULTI, true },
  201. {IWL_ANT_OK_SINGLE, IWL_ANT_OK_MULTI, true },
  202. {IWL_ANT_OK_SINGLE, IWL_ANT_OK_SINGLE, false },
  203. {IWL_ANT_OK_NONE, IWL_ANT_OK_NONE, false }
  204. };
  205. static void iwl_power_sleep_cam_cmd(struct iwl_priv *priv,
  206. struct iwl_powertable_cmd *cmd)
  207. {
  208. memset(cmd, 0, sizeof(*cmd));
  209. if (priv->power_data.pci_pm)
  210. cmd->flags |= IWL_POWER_PCI_PM_MSK;
  211. IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
  212. }
  213. static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
  214. struct iwl_powertable_cmd *cmd,
  215. int dynps_ms, int wakeup_period)
  216. {
  217. /*
  218. * These are the original power level 3 sleep successions. The
  219. * device may behave better with such succession and was also
  220. * only tested with that. Just like the original sleep commands,
  221. * also adjust the succession here to the wakeup_period below.
  222. * The ranges are the same as for the sleep commands, 0-2, 3-9
  223. * and >10, which is selected based on the DTIM interval for
  224. * the sleep index but here we use the wakeup period since that
  225. * is what we need to do for the latency requirements.
  226. */
  227. static const u8 slp_succ_r0[IWL_POWER_VEC_SIZE] = { 2, 2, 2, 2, 2 };
  228. static const u8 slp_succ_r1[IWL_POWER_VEC_SIZE] = { 2, 4, 6, 7, 9 };
  229. static const u8 slp_succ_r2[IWL_POWER_VEC_SIZE] = { 2, 7, 9, 9, 0xFF };
  230. const u8 *slp_succ = slp_succ_r0;
  231. int i;
  232. if (wakeup_period > IWL_DTIM_RANGE_0_MAX)
  233. slp_succ = slp_succ_r1;
  234. if (wakeup_period > IWL_DTIM_RANGE_1_MAX)
  235. slp_succ = slp_succ_r2;
  236. memset(cmd, 0, sizeof(*cmd));
  237. cmd->flags = IWL_POWER_DRIVER_ALLOW_SLEEP_MSK |
  238. IWL_POWER_FAST_PD; /* no use seeing frames for others */
  239. if (priv->power_data.pci_pm)
  240. cmd->flags |= IWL_POWER_PCI_PM_MSK;
  241. cmd->rx_data_timeout = cpu_to_le32(1000 * dynps_ms);
  242. cmd->tx_data_timeout = cpu_to_le32(1000 * dynps_ms);
  243. for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
  244. cmd->sleep_interval[i] =
  245. cpu_to_le32(min_t(int, slp_succ[i], wakeup_period));
  246. IWL_DEBUG_POWER(priv, "Automatic sleep command\n");
  247. }
  248. static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
  249. {
  250. IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
  251. IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
  252. IWL_DEBUG_POWER(priv, "Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
  253. IWL_DEBUG_POWER(priv, "Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
  254. IWL_DEBUG_POWER(priv, "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
  255. le32_to_cpu(cmd->sleep_interval[0]),
  256. le32_to_cpu(cmd->sleep_interval[1]),
  257. le32_to_cpu(cmd->sleep_interval[2]),
  258. le32_to_cpu(cmd->sleep_interval[3]),
  259. le32_to_cpu(cmd->sleep_interval[4]));
  260. return iwl_send_cmd_pdu(priv, POWER_TABLE_CMD,
  261. sizeof(struct iwl_powertable_cmd), cmd);
  262. }
  263. /* priv->mutex must be held */
  264. int iwl_power_update_mode(struct iwl_priv *priv, bool force)
  265. {
  266. int ret = 0;
  267. struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
  268. bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS;
  269. bool update_chains;
  270. struct iwl_powertable_cmd cmd;
  271. int dtimper;
  272. /* Don't update the RX chain when chain noise calibration is running */
  273. update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
  274. priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
  275. dtimper = priv->hw->conf.ps_dtim_period ?: 1;
  276. if (priv->cfg->broken_powersave)
  277. iwl_power_sleep_cam_cmd(priv, &cmd);
  278. else if (priv->cfg->supports_idle &&
  279. priv->hw->conf.flags & IEEE80211_CONF_IDLE)
  280. iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_5, 20);
  281. else if (tt->state >= IWL_TI_1)
  282. iwl_static_sleep_cmd(priv, &cmd, tt->tt_power_mode, dtimper);
  283. else if (!enabled)
  284. iwl_power_sleep_cam_cmd(priv, &cmd);
  285. else if (priv->power_data.debug_sleep_level_override >= 0)
  286. iwl_static_sleep_cmd(priv, &cmd,
  287. priv->power_data.debug_sleep_level_override,
  288. dtimper);
  289. else if (no_sleep_autoadjust)
  290. iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_1, dtimper);
  291. else
  292. iwl_power_fill_sleep_cmd(priv, &cmd,
  293. priv->hw->conf.dynamic_ps_timeout,
  294. priv->hw->conf.max_sleep_period);
  295. if (iwl_is_ready_rf(priv) &&
  296. (memcmp(&priv->power_data.sleep_cmd, &cmd, sizeof(cmd)) || force)) {
  297. if (cmd.flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
  298. set_bit(STATUS_POWER_PMI, &priv->status);
  299. ret = iwl_set_power(priv, &cmd);
  300. if (!ret) {
  301. if (!(cmd.flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
  302. clear_bit(STATUS_POWER_PMI, &priv->status);
  303. if (priv->cfg->ops->lib->update_chain_flags &&
  304. update_chains)
  305. priv->cfg->ops->lib->update_chain_flags(priv);
  306. else if (priv->cfg->ops->lib->update_chain_flags)
  307. IWL_DEBUG_POWER(priv,
  308. "Cannot update the power, chain noise "
  309. "calibration running: %d\n",
  310. priv->chain_noise_data.state);
  311. memcpy(&priv->power_data.sleep_cmd, &cmd, sizeof(cmd));
  312. } else
  313. IWL_ERR(priv, "set power fail, ret = %d", ret);
  314. }
  315. return ret;
  316. }
  317. EXPORT_SYMBOL(iwl_power_update_mode);
  318. bool iwl_ht_enabled(struct iwl_priv *priv)
  319. {
  320. struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
  321. struct iwl_tt_restriction *restriction;
  322. if (!priv->thermal_throttle.advanced_tt)
  323. return true;
  324. restriction = tt->restriction + tt->state;
  325. return restriction->is_ht;
  326. }
  327. EXPORT_SYMBOL(iwl_ht_enabled);
  328. bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
  329. {
  330. s32 temp = priv->temperature; /* degrees CELSIUS except specified */
  331. bool within_margin = false;
  332. if (priv->cfg->temperature_kelvin)
  333. temp = KELVIN_TO_CELSIUS(priv->temperature);
  334. if (!priv->thermal_throttle.advanced_tt)
  335. within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
  336. CT_KILL_THRESHOLD_LEGACY) ? true : false;
  337. else
  338. within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
  339. CT_KILL_THRESHOLD) ? true : false;
  340. return within_margin;
  341. }
  342. enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv)
  343. {
  344. struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
  345. struct iwl_tt_restriction *restriction;
  346. if (!priv->thermal_throttle.advanced_tt)
  347. return IWL_ANT_OK_MULTI;
  348. restriction = tt->restriction + tt->state;
  349. return restriction->tx_stream;
  350. }
  351. EXPORT_SYMBOL(iwl_tx_ant_restriction);
  352. enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv)
  353. {
  354. struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
  355. struct iwl_tt_restriction *restriction;
  356. if (!priv->thermal_throttle.advanced_tt)
  357. return IWL_ANT_OK_MULTI;
  358. restriction = tt->restriction + tt->state;
  359. return restriction->rx_stream;
  360. }
  361. #define CT_KILL_EXIT_DURATION (5) /* 5 seconds duration */
  362. #define CT_KILL_WAITING_DURATION (300) /* 300ms duration */
  363. /*
  364. * toggle the bit to wake up uCode and check the temperature
  365. * if the temperature is below CT, uCode will stay awake and send card
  366. * state notification with CT_KILL bit clear to inform Thermal Throttling
  367. * Management to change state. Otherwise, uCode will go back to sleep
  368. * without doing anything, driver should continue the 5 seconds timer
  369. * to wake up uCode for temperature check until temperature drop below CT
  370. */
  371. static void iwl_tt_check_exit_ct_kill(unsigned long data)
  372. {
  373. struct iwl_priv *priv = (struct iwl_priv *)data;
  374. struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
  375. unsigned long flags;
  376. if (test_bit(STATUS_EXIT_PENDING, &priv->status))
  377. return;
  378. if (tt->state == IWL_TI_CT_KILL) {
  379. if (priv->thermal_throttle.ct_kill_toggle) {
  380. iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
  381. CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
  382. priv->thermal_throttle.ct_kill_toggle = false;
  383. } else {
  384. iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
  385. CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
  386. priv->thermal_throttle.ct_kill_toggle = true;
  387. }
  388. iwl_read32(priv, CSR_UCODE_DRV_GP1);
  389. spin_lock_irqsave(&priv->reg_lock, flags);
  390. if (!iwl_grab_nic_access(priv))
  391. iwl_release_nic_access(priv);
  392. spin_unlock_irqrestore(&priv->reg_lock, flags);
  393. /* Reschedule the ct_kill timer to occur in
  394. * CT_KILL_EXIT_DURATION seconds to ensure we get a
  395. * thermal update */
  396. IWL_DEBUG_POWER(priv, "schedule ct_kill exit timer\n");
  397. mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, jiffies +
  398. CT_KILL_EXIT_DURATION * HZ);
  399. }
  400. }
  401. static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
  402. bool stop)
  403. {
  404. if (stop) {
  405. IWL_DEBUG_POWER(priv, "Stop all queues\n");
  406. if (priv->mac80211_registered)
  407. ieee80211_stop_queues(priv->hw);
  408. IWL_DEBUG_POWER(priv,
  409. "Schedule 5 seconds CT_KILL Timer\n");
  410. mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, jiffies +
  411. CT_KILL_EXIT_DURATION * HZ);
  412. } else {
  413. IWL_DEBUG_POWER(priv, "Wake all queues\n");
  414. if (priv->mac80211_registered)
  415. ieee80211_wake_queues(priv->hw);
  416. }
  417. }
  418. static void iwl_tt_ready_for_ct_kill(unsigned long data)
  419. {
  420. struct iwl_priv *priv = (struct iwl_priv *)data;
  421. struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
  422. if (test_bit(STATUS_EXIT_PENDING, &priv->status))
  423. return;
  424. /* temperature timer expired, ready to go into CT_KILL state */
  425. if (tt->state != IWL_TI_CT_KILL) {
  426. IWL_DEBUG_POWER(priv, "entering CT_KILL state when temperature timer expired\n");
  427. tt->state = IWL_TI_CT_KILL;
  428. set_bit(STATUS_CT_KILL, &priv->status);
  429. iwl_perform_ct_kill_task(priv, true);
  430. }
  431. }
  432. static void iwl_prepare_ct_kill_task(struct iwl_priv *priv)
  433. {
  434. IWL_DEBUG_POWER(priv, "Prepare to enter IWL_TI_CT_KILL\n");
  435. /* make request to retrieve statistics information */
  436. iwl_send_statistics_request(priv, CMD_SYNC, false);
  437. /* Reschedule the ct_kill wait timer */
  438. mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
  439. jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION));
  440. }
  441. #define IWL_MINIMAL_POWER_THRESHOLD (CT_KILL_THRESHOLD_LEGACY)
  442. #define IWL_REDUCED_PERFORMANCE_THRESHOLD_2 (100)
  443. #define IWL_REDUCED_PERFORMANCE_THRESHOLD_1 (90)
  444. /*
  445. * Legacy thermal throttling
  446. * 1) Avoid NIC destruction due to high temperatures
  447. * Chip will identify dangerously high temperatures that can
  448. * harm the device and will power down
  449. * 2) Avoid the NIC power down due to high temperature
  450. * Throttle early enough to lower the power consumption before
  451. * drastic steps are needed
  452. */
  453. static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
  454. {
  455. struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
  456. enum iwl_tt_state old_state;
  457. #ifdef CONFIG_IWLWIFI_DEBUG
  458. if ((tt->tt_previous_temp) &&
  459. (temp > tt->tt_previous_temp) &&
  460. ((temp - tt->tt_previous_temp) >
  461. IWL_TT_INCREASE_MARGIN)) {
  462. IWL_DEBUG_POWER(priv,
  463. "Temperature increase %d degree Celsius\n",
  464. (temp - tt->tt_previous_temp));
  465. }
  466. #endif
  467. old_state = tt->state;
  468. /* in Celsius */
  469. if (temp >= IWL_MINIMAL_POWER_THRESHOLD)
  470. tt->state = IWL_TI_CT_KILL;
  471. else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_2)
  472. tt->state = IWL_TI_2;
  473. else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_1)
  474. tt->state = IWL_TI_1;
  475. else
  476. tt->state = IWL_TI_0;
  477. #ifdef CONFIG_IWLWIFI_DEBUG
  478. tt->tt_previous_temp = temp;
  479. #endif
  480. /* stop ct_kill_waiting_tm timer */
  481. del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
  482. if (tt->state != old_state) {
  483. switch (tt->state) {
  484. case IWL_TI_0:
  485. /*
  486. * When the system is ready to go back to IWL_TI_0
  487. * we only have to call iwl_power_update_mode() to
  488. * do so.
  489. */
  490. break;
  491. case IWL_TI_1:
  492. tt->tt_power_mode = IWL_POWER_INDEX_3;
  493. break;
  494. case IWL_TI_2:
  495. tt->tt_power_mode = IWL_POWER_INDEX_4;
  496. break;
  497. default:
  498. tt->tt_power_mode = IWL_POWER_INDEX_5;
  499. break;
  500. }
  501. mutex_lock(&priv->mutex);
  502. if (old_state == IWL_TI_CT_KILL)
  503. clear_bit(STATUS_CT_KILL, &priv->status);
  504. if (tt->state != IWL_TI_CT_KILL &&
  505. iwl_power_update_mode(priv, true)) {
  506. /* TT state not updated
  507. * try again during next temperature read
  508. */
  509. if (old_state == IWL_TI_CT_KILL)
  510. set_bit(STATUS_CT_KILL, &priv->status);
  511. tt->state = old_state;
  512. IWL_ERR(priv, "Cannot update power mode, "
  513. "TT state not updated\n");
  514. } else {
  515. if (tt->state == IWL_TI_CT_KILL) {
  516. if (force) {
  517. set_bit(STATUS_CT_KILL, &priv->status);
  518. iwl_perform_ct_kill_task(priv, true);
  519. } else {
  520. iwl_prepare_ct_kill_task(priv);
  521. tt->state = old_state;
  522. }
  523. } else if (old_state == IWL_TI_CT_KILL &&
  524. tt->state != IWL_TI_CT_KILL)
  525. iwl_perform_ct_kill_task(priv, false);
  526. IWL_DEBUG_POWER(priv, "Temperature state changed %u\n",
  527. tt->state);
  528. IWL_DEBUG_POWER(priv, "Power Index change to %u\n",
  529. tt->tt_power_mode);
  530. }
  531. mutex_unlock(&priv->mutex);
  532. }
  533. }
  534. /*
  535. * Advance thermal throttling
  536. * 1) Avoid NIC destruction due to high temperatures
  537. * Chip will identify dangerously high temperatures that can
  538. * harm the device and will power down
  539. * 2) Avoid the NIC power down due to high temperature
  540. * Throttle early enough to lower the power consumption before
  541. * drastic steps are needed
  542. * Actions include relaxing the power down sleep thresholds and
  543. * decreasing the number of TX streams
  544. * 3) Avoid throughput performance impact as much as possible
  545. *
  546. *=============================================================================
  547. * Condition Nxt State Condition Nxt State Condition Nxt State
  548. *-----------------------------------------------------------------------------
  549. * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
  550. * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
  551. * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
  552. * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
  553. *=============================================================================
  554. */
  555. static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
  556. {
  557. struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
  558. int i;
  559. bool changed = false;
  560. enum iwl_tt_state old_state;
  561. struct iwl_tt_trans *transaction;
  562. old_state = tt->state;
  563. for (i = 0; i < IWL_TI_STATE_MAX - 1; i++) {
  564. /* based on the current TT state,
  565. * find the curresponding transaction table
  566. * each table has (IWL_TI_STATE_MAX - 1) entries
  567. * tt->transaction + ((old_state * (IWL_TI_STATE_MAX - 1))
  568. * will advance to the correct table.
  569. * then based on the current temperature
  570. * find the next state need to transaction to
  571. * go through all the possible (IWL_TI_STATE_MAX - 1) entries
  572. * in the current table to see if transaction is needed
  573. */
  574. transaction = tt->transaction +
  575. ((old_state * (IWL_TI_STATE_MAX - 1)) + i);
  576. if (temp >= transaction->tt_low &&
  577. temp <= transaction->tt_high) {
  578. #ifdef CONFIG_IWLWIFI_DEBUG
  579. if ((tt->tt_previous_temp) &&
  580. (temp > tt->tt_previous_temp) &&
  581. ((temp - tt->tt_previous_temp) >
  582. IWL_TT_INCREASE_MARGIN)) {
  583. IWL_DEBUG_POWER(priv,
  584. "Temperature increase %d "
  585. "degree Celsius\n",
  586. (temp - tt->tt_previous_temp));
  587. }
  588. tt->tt_previous_temp = temp;
  589. #endif
  590. if (old_state !=
  591. transaction->next_state) {
  592. changed = true;
  593. tt->state =
  594. transaction->next_state;
  595. }
  596. break;
  597. }
  598. }
  599. /* stop ct_kill_waiting_tm timer */
  600. del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
  601. if (changed) {
  602. struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
  603. if (tt->state >= IWL_TI_1) {
  604. /* force PI = IWL_POWER_INDEX_5 in the case of TI > 0 */
  605. tt->tt_power_mode = IWL_POWER_INDEX_5;
  606. if (!iwl_ht_enabled(priv))
  607. /* disable HT */
  608. rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
  609. RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
  610. RXON_FLG_HT40_PROT_MSK |
  611. RXON_FLG_HT_PROT_MSK);
  612. else {
  613. /* check HT capability and set
  614. * according to the system HT capability
  615. * in case get disabled before */
  616. iwl_set_rxon_ht(priv, &priv->current_ht_config);
  617. }
  618. } else {
  619. /*
  620. * restore system power setting -- it will be
  621. * recalculated automatically.
  622. */
  623. /* check HT capability and set
  624. * according to the system HT capability
  625. * in case get disabled before */
  626. iwl_set_rxon_ht(priv, &priv->current_ht_config);
  627. }
  628. mutex_lock(&priv->mutex);
  629. if (old_state == IWL_TI_CT_KILL)
  630. clear_bit(STATUS_CT_KILL, &priv->status);
  631. if (tt->state != IWL_TI_CT_KILL &&
  632. iwl_power_update_mode(priv, true)) {
  633. /* TT state not updated
  634. * try again during next temperature read
  635. */
  636. IWL_ERR(priv, "Cannot update power mode, "
  637. "TT state not updated\n");
  638. if (old_state == IWL_TI_CT_KILL)
  639. set_bit(STATUS_CT_KILL, &priv->status);
  640. tt->state = old_state;
  641. } else {
  642. IWL_DEBUG_POWER(priv,
  643. "Thermal Throttling to new state: %u\n",
  644. tt->state);
  645. if (old_state != IWL_TI_CT_KILL &&
  646. tt->state == IWL_TI_CT_KILL) {
  647. if (force) {
  648. IWL_DEBUG_POWER(priv,
  649. "Enter IWL_TI_CT_KILL\n");
  650. set_bit(STATUS_CT_KILL, &priv->status);
  651. iwl_perform_ct_kill_task(priv, true);
  652. } else {
  653. iwl_prepare_ct_kill_task(priv);
  654. tt->state = old_state;
  655. }
  656. } else if (old_state == IWL_TI_CT_KILL &&
  657. tt->state != IWL_TI_CT_KILL) {
  658. IWL_DEBUG_POWER(priv, "Exit IWL_TI_CT_KILL\n");
  659. iwl_perform_ct_kill_task(priv, false);
  660. }
  661. }
  662. mutex_unlock(&priv->mutex);
  663. }
  664. }
  665. /* Card State Notification indicated reach critical temperature
  666. * if PSP not enable, no Thermal Throttling function will be performed
  667. * just set the GP1 bit to acknowledge the event
  668. * otherwise, go into IWL_TI_CT_KILL state
  669. * since Card State Notification will not provide any temperature reading
  670. * for Legacy mode
  671. * so just pass the CT_KILL temperature to iwl_legacy_tt_handler()
  672. * for advance mode
  673. * pass CT_KILL_THRESHOLD+1 to make sure move into IWL_TI_CT_KILL state
  674. */
  675. static void iwl_bg_ct_enter(struct work_struct *work)
  676. {
  677. struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter);
  678. struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
  679. if (test_bit(STATUS_EXIT_PENDING, &priv->status))
  680. return;
  681. if (!iwl_is_ready(priv))
  682. return;
  683. if (tt->state != IWL_TI_CT_KILL) {
  684. IWL_ERR(priv, "Device reached critical temperature "
  685. "- ucode going to sleep!\n");
  686. if (!priv->thermal_throttle.advanced_tt)
  687. iwl_legacy_tt_handler(priv,
  688. IWL_MINIMAL_POWER_THRESHOLD,
  689. true);
  690. else
  691. iwl_advance_tt_handler(priv,
  692. CT_KILL_THRESHOLD + 1, true);
  693. }
  694. }
  695. /* Card State Notification indicated out of critical temperature
  696. * since Card State Notification will not provide any temperature reading
  697. * so pass the IWL_REDUCED_PERFORMANCE_THRESHOLD_2 temperature
  698. * to iwl_legacy_tt_handler() to get out of IWL_CT_KILL state
  699. */
  700. static void iwl_bg_ct_exit(struct work_struct *work)
  701. {
  702. struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit);
  703. struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
  704. if (test_bit(STATUS_EXIT_PENDING, &priv->status))
  705. return;
  706. if (!iwl_is_ready(priv))
  707. return;
  708. /* stop ct_kill_exit_tm timer */
  709. del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
  710. if (tt->state == IWL_TI_CT_KILL) {
  711. IWL_ERR(priv,
  712. "Device temperature below critical"
  713. "- ucode awake!\n");
  714. /*
  715. * exit from CT_KILL state
  716. * reset the current temperature reading
  717. */
  718. priv->temperature = 0;
  719. if (!priv->thermal_throttle.advanced_tt)
  720. iwl_legacy_tt_handler(priv,
  721. IWL_REDUCED_PERFORMANCE_THRESHOLD_2,
  722. true);
  723. else
  724. iwl_advance_tt_handler(priv, CT_KILL_EXIT_THRESHOLD,
  725. true);
  726. }
  727. }
  728. void iwl_tt_enter_ct_kill(struct iwl_priv *priv)
  729. {
  730. if (test_bit(STATUS_EXIT_PENDING, &priv->status))
  731. return;
  732. IWL_DEBUG_POWER(priv, "Queueing critical temperature enter.\n");
  733. queue_work(priv->workqueue, &priv->ct_enter);
  734. }
  735. EXPORT_SYMBOL(iwl_tt_enter_ct_kill);
  736. void iwl_tt_exit_ct_kill(struct iwl_priv *priv)
  737. {
  738. if (test_bit(STATUS_EXIT_PENDING, &priv->status))
  739. return;
  740. IWL_DEBUG_POWER(priv, "Queueing critical temperature exit.\n");
  741. queue_work(priv->workqueue, &priv->ct_exit);
  742. }
  743. EXPORT_SYMBOL(iwl_tt_exit_ct_kill);
  744. static void iwl_bg_tt_work(struct work_struct *work)
  745. {
  746. struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
  747. s32 temp = priv->temperature; /* degrees CELSIUS except specified */
  748. if (test_bit(STATUS_EXIT_PENDING, &priv->status))
  749. return;
  750. if (priv->cfg->temperature_kelvin)
  751. temp = KELVIN_TO_CELSIUS(priv->temperature);
  752. if (!priv->thermal_throttle.advanced_tt)
  753. iwl_legacy_tt_handler(priv, temp, false);
  754. else
  755. iwl_advance_tt_handler(priv, temp, false);
  756. }
  757. void iwl_tt_handler(struct iwl_priv *priv)
  758. {
  759. if (test_bit(STATUS_EXIT_PENDING, &priv->status))
  760. return;
  761. IWL_DEBUG_POWER(priv, "Queueing thermal throttling work.\n");
  762. queue_work(priv->workqueue, &priv->tt_work);
  763. }
  764. EXPORT_SYMBOL(iwl_tt_handler);
  765. /* Thermal throttling initialization
  766. * For advance thermal throttling:
  767. * Initialize Thermal Index and temperature threshold table
  768. * Initialize thermal throttling restriction table
  769. */
  770. void iwl_tt_initialize(struct iwl_priv *priv)
  771. {
  772. struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
  773. int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1);
  774. struct iwl_tt_trans *transaction;
  775. IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling\n");
  776. memset(tt, 0, sizeof(struct iwl_tt_mgmt));
  777. tt->state = IWL_TI_0;
  778. init_timer(&priv->thermal_throttle.ct_kill_exit_tm);
  779. priv->thermal_throttle.ct_kill_exit_tm.data = (unsigned long)priv;
  780. priv->thermal_throttle.ct_kill_exit_tm.function =
  781. iwl_tt_check_exit_ct_kill;
  782. init_timer(&priv->thermal_throttle.ct_kill_waiting_tm);
  783. priv->thermal_throttle.ct_kill_waiting_tm.data = (unsigned long)priv;
  784. priv->thermal_throttle.ct_kill_waiting_tm.function =
  785. iwl_tt_ready_for_ct_kill;
  786. /* setup deferred ct kill work */
  787. INIT_WORK(&priv->tt_work, iwl_bg_tt_work);
  788. INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
  789. INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
  790. if (priv->cfg->adv_thermal_throttle) {
  791. IWL_DEBUG_POWER(priv, "Advanced Thermal Throttling\n");
  792. tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) *
  793. IWL_TI_STATE_MAX, GFP_KERNEL);
  794. tt->transaction = kzalloc(sizeof(struct iwl_tt_trans) *
  795. IWL_TI_STATE_MAX * (IWL_TI_STATE_MAX - 1),
  796. GFP_KERNEL);
  797. if (!tt->restriction || !tt->transaction) {
  798. IWL_ERR(priv, "Fallback to Legacy Throttling\n");
  799. priv->thermal_throttle.advanced_tt = false;
  800. kfree(tt->restriction);
  801. tt->restriction = NULL;
  802. kfree(tt->transaction);
  803. tt->transaction = NULL;
  804. } else {
  805. transaction = tt->transaction +
  806. (IWL_TI_0 * (IWL_TI_STATE_MAX - 1));
  807. memcpy(transaction, &tt_range_0[0], size);
  808. transaction = tt->transaction +
  809. (IWL_TI_1 * (IWL_TI_STATE_MAX - 1));
  810. memcpy(transaction, &tt_range_1[0], size);
  811. transaction = tt->transaction +
  812. (IWL_TI_2 * (IWL_TI_STATE_MAX - 1));
  813. memcpy(transaction, &tt_range_2[0], size);
  814. transaction = tt->transaction +
  815. (IWL_TI_CT_KILL * (IWL_TI_STATE_MAX - 1));
  816. memcpy(transaction, &tt_range_3[0], size);
  817. size = sizeof(struct iwl_tt_restriction) *
  818. IWL_TI_STATE_MAX;
  819. memcpy(tt->restriction,
  820. &restriction_range[0], size);
  821. priv->thermal_throttle.advanced_tt = true;
  822. }
  823. } else {
  824. IWL_DEBUG_POWER(priv, "Legacy Thermal Throttling\n");
  825. priv->thermal_throttle.advanced_tt = false;
  826. }
  827. }
  828. EXPORT_SYMBOL(iwl_tt_initialize);
  829. /* cleanup thermal throttling management related memory and timer */
  830. void iwl_tt_exit(struct iwl_priv *priv)
  831. {
  832. struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
  833. /* stop ct_kill_exit_tm timer if activated */
  834. del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
  835. /* stop ct_kill_waiting_tm timer if activated */
  836. del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
  837. cancel_work_sync(&priv->tt_work);
  838. cancel_work_sync(&priv->ct_enter);
  839. cancel_work_sync(&priv->ct_exit);
  840. if (priv->thermal_throttle.advanced_tt) {
  841. /* free advance thermal throttling memory */
  842. kfree(tt->restriction);
  843. tt->restriction = NULL;
  844. kfree(tt->transaction);
  845. tt->transaction = NULL;
  846. }
  847. }
  848. EXPORT_SYMBOL(iwl_tt_exit);
  849. /* initialize to default */
  850. void iwl_power_initialize(struct iwl_priv *priv)
  851. {
  852. u16 lctl = iwl_pcie_link_ctl(priv);
  853. priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
  854. priv->power_data.debug_sleep_level_override = -1;
  855. memset(&priv->power_data.sleep_cmd, 0,
  856. sizeof(priv->power_data.sleep_cmd));
  857. }
  858. EXPORT_SYMBOL(iwl_power_initialize);