processor_thermal.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438
  1. /*
  2. * processor_thermal.c - Passive cooling submodule of the ACPI processor driver
  3. *
  4. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
  7. * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  8. * - Added processor hotplug support
  9. *
  10. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or (at
  15. * your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful, but
  18. * WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License along
  23. * with this program; if not, write to the Free Software Foundation, Inc.,
  24. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  25. *
  26. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  27. */
  28. #include <linux/kernel.h>
  29. #include <linux/module.h>
  30. #include <linux/init.h>
  31. #include <linux/cpufreq.h>
  32. #include <linux/sysdev.h>
  33. #include <asm/uaccess.h>
  34. #include <acpi/acpi_bus.h>
  35. #include <acpi/processor.h>
  36. #include <acpi/acpi_drivers.h>
  37. #define PREFIX "ACPI: "
  38. #define ACPI_PROCESSOR_CLASS "processor"
  39. #define _COMPONENT ACPI_PROCESSOR_COMPONENT
  40. ACPI_MODULE_NAME("processor_thermal");
  41. /* --------------------------------------------------------------------------
  42. Limit Interface
  43. -------------------------------------------------------------------------- */
  44. static int acpi_processor_apply_limit(struct acpi_processor *pr)
  45. {
  46. int result = 0;
  47. u16 px = 0;
  48. u16 tx = 0;
  49. if (!pr)
  50. return -EINVAL;
  51. if (!pr->flags.limit)
  52. return -ENODEV;
  53. if (pr->flags.throttling) {
  54. if (pr->limit.user.tx > tx)
  55. tx = pr->limit.user.tx;
  56. if (pr->limit.thermal.tx > tx)
  57. tx = pr->limit.thermal.tx;
  58. result = acpi_processor_set_throttling(pr, tx, false);
  59. if (result)
  60. goto end;
  61. }
  62. pr->limit.state.px = px;
  63. pr->limit.state.tx = tx;
  64. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  65. "Processor [%d] limit set to (P%d:T%d)\n", pr->id,
  66. pr->limit.state.px, pr->limit.state.tx));
  67. end:
  68. if (result)
  69. printk(KERN_ERR PREFIX "Unable to set limit\n");
  70. return result;
  71. }
  72. #ifdef CONFIG_CPU_FREQ
  73. /* If a passive cooling situation is detected, primarily CPUfreq is used, as it
  74. * offers (in most cases) voltage scaling in addition to frequency scaling, and
  75. * thus a cubic (instead of linear) reduction of energy. Also, we allow for
  76. * _any_ cpufreq driver and not only the acpi-cpufreq driver.
  77. */
  78. #define CPUFREQ_THERMAL_MIN_STEP 0
  79. #define CPUFREQ_THERMAL_MAX_STEP 3
  80. static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
  81. static unsigned int acpi_thermal_cpufreq_is_init = 0;
  82. static int cpu_has_cpufreq(unsigned int cpu)
  83. {
  84. struct cpufreq_policy policy;
  85. if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu))
  86. return 0;
  87. return 1;
  88. }
  89. static int acpi_thermal_cpufreq_increase(unsigned int cpu)
  90. {
  91. if (!cpu_has_cpufreq(cpu))
  92. return -ENODEV;
  93. if (per_cpu(cpufreq_thermal_reduction_pctg, cpu) <
  94. CPUFREQ_THERMAL_MAX_STEP) {
  95. per_cpu(cpufreq_thermal_reduction_pctg, cpu)++;
  96. cpufreq_update_policy(cpu);
  97. return 0;
  98. }
  99. return -ERANGE;
  100. }
  101. static int acpi_thermal_cpufreq_decrease(unsigned int cpu)
  102. {
  103. if (!cpu_has_cpufreq(cpu))
  104. return -ENODEV;
  105. if (per_cpu(cpufreq_thermal_reduction_pctg, cpu) >
  106. (CPUFREQ_THERMAL_MIN_STEP + 1))
  107. per_cpu(cpufreq_thermal_reduction_pctg, cpu)--;
  108. else
  109. per_cpu(cpufreq_thermal_reduction_pctg, cpu) = 0;
  110. cpufreq_update_policy(cpu);
  111. /* We reached max freq again and can leave passive mode */
  112. return !per_cpu(cpufreq_thermal_reduction_pctg, cpu);
  113. }
  114. static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
  115. unsigned long event, void *data)
  116. {
  117. struct cpufreq_policy *policy = data;
  118. unsigned long max_freq = 0;
  119. if (event != CPUFREQ_ADJUST)
  120. goto out;
  121. max_freq = (
  122. policy->cpuinfo.max_freq *
  123. (100 - per_cpu(cpufreq_thermal_reduction_pctg, policy->cpu) * 20)
  124. ) / 100;
  125. cpufreq_verify_within_limits(policy, 0, max_freq);
  126. out:
  127. return 0;
  128. }
  129. static struct notifier_block acpi_thermal_cpufreq_notifier_block = {
  130. .notifier_call = acpi_thermal_cpufreq_notifier,
  131. };
  132. static int cpufreq_get_max_state(unsigned int cpu)
  133. {
  134. if (!cpu_has_cpufreq(cpu))
  135. return 0;
  136. return CPUFREQ_THERMAL_MAX_STEP;
  137. }
  138. static int cpufreq_get_cur_state(unsigned int cpu)
  139. {
  140. if (!cpu_has_cpufreq(cpu))
  141. return 0;
  142. return per_cpu(cpufreq_thermal_reduction_pctg, cpu);
  143. }
  144. static int cpufreq_set_cur_state(unsigned int cpu, int state)
  145. {
  146. if (!cpu_has_cpufreq(cpu))
  147. return 0;
  148. per_cpu(cpufreq_thermal_reduction_pctg, cpu) = state;
  149. cpufreq_update_policy(cpu);
  150. return 0;
  151. }
  152. void acpi_thermal_cpufreq_init(void)
  153. {
  154. int i;
  155. for (i = 0; i < nr_cpu_ids; i++)
  156. if (cpu_present(i))
  157. per_cpu(cpufreq_thermal_reduction_pctg, i) = 0;
  158. i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
  159. CPUFREQ_POLICY_NOTIFIER);
  160. if (!i)
  161. acpi_thermal_cpufreq_is_init = 1;
  162. }
  163. void acpi_thermal_cpufreq_exit(void)
  164. {
  165. if (acpi_thermal_cpufreq_is_init)
  166. cpufreq_unregister_notifier
  167. (&acpi_thermal_cpufreq_notifier_block,
  168. CPUFREQ_POLICY_NOTIFIER);
  169. acpi_thermal_cpufreq_is_init = 0;
  170. }
  171. #else /* ! CONFIG_CPU_FREQ */
  172. static int cpufreq_get_max_state(unsigned int cpu)
  173. {
  174. return 0;
  175. }
  176. static int cpufreq_get_cur_state(unsigned int cpu)
  177. {
  178. return 0;
  179. }
  180. static int cpufreq_set_cur_state(unsigned int cpu, int state)
  181. {
  182. return 0;
  183. }
  184. static int acpi_thermal_cpufreq_increase(unsigned int cpu)
  185. {
  186. return -ENODEV;
  187. }
  188. static int acpi_thermal_cpufreq_decrease(unsigned int cpu)
  189. {
  190. return -ENODEV;
  191. }
  192. #endif
  193. int acpi_processor_set_thermal_limit(acpi_handle handle, int type)
  194. {
  195. int result = 0;
  196. struct acpi_processor *pr = NULL;
  197. struct acpi_device *device = NULL;
  198. int tx = 0, max_tx_px = 0;
  199. if ((type < ACPI_PROCESSOR_LIMIT_NONE)
  200. || (type > ACPI_PROCESSOR_LIMIT_DECREMENT))
  201. return -EINVAL;
  202. result = acpi_bus_get_device(handle, &device);
  203. if (result)
  204. return result;
  205. pr = acpi_driver_data(device);
  206. if (!pr)
  207. return -ENODEV;
  208. /* Thermal limits are always relative to the current Px/Tx state. */
  209. if (pr->flags.throttling)
  210. pr->limit.thermal.tx = pr->throttling.state;
  211. /*
  212. * Our default policy is to only use throttling at the lowest
  213. * performance state.
  214. */
  215. tx = pr->limit.thermal.tx;
  216. switch (type) {
  217. case ACPI_PROCESSOR_LIMIT_NONE:
  218. do {
  219. result = acpi_thermal_cpufreq_decrease(pr->id);
  220. } while (!result);
  221. tx = 0;
  222. break;
  223. case ACPI_PROCESSOR_LIMIT_INCREMENT:
  224. /* if going up: P-states first, T-states later */
  225. result = acpi_thermal_cpufreq_increase(pr->id);
  226. if (!result)
  227. goto end;
  228. else if (result == -ERANGE)
  229. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  230. "At maximum performance state\n"));
  231. if (pr->flags.throttling) {
  232. if (tx == (pr->throttling.state_count - 1))
  233. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  234. "At maximum throttling state\n"));
  235. else
  236. tx++;
  237. }
  238. break;
  239. case ACPI_PROCESSOR_LIMIT_DECREMENT:
  240. /* if going down: T-states first, P-states later */
  241. if (pr->flags.throttling) {
  242. if (tx == 0) {
  243. max_tx_px = 1;
  244. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  245. "At minimum throttling state\n"));
  246. } else {
  247. tx--;
  248. goto end;
  249. }
  250. }
  251. result = acpi_thermal_cpufreq_decrease(pr->id);
  252. if (result) {
  253. /*
  254. * We only could get -ERANGE, 1 or 0.
  255. * In the first two cases we reached max freq again.
  256. */
  257. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  258. "At minimum performance state\n"));
  259. max_tx_px = 1;
  260. } else
  261. max_tx_px = 0;
  262. break;
  263. }
  264. end:
  265. if (pr->flags.throttling) {
  266. pr->limit.thermal.px = 0;
  267. pr->limit.thermal.tx = tx;
  268. result = acpi_processor_apply_limit(pr);
  269. if (result)
  270. printk(KERN_ERR PREFIX "Unable to set thermal limit\n");
  271. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Thermal limit now (P%d:T%d)\n",
  272. pr->limit.thermal.px, pr->limit.thermal.tx));
  273. } else
  274. result = 0;
  275. if (max_tx_px)
  276. return 1;
  277. else
  278. return result;
  279. }
  280. int acpi_processor_get_limit_info(struct acpi_processor *pr)
  281. {
  282. if (!pr)
  283. return -EINVAL;
  284. if (pr->flags.throttling)
  285. pr->flags.limit = 1;
  286. return 0;
  287. }
  288. /* thermal coolign device callbacks */
  289. static int acpi_processor_max_state(struct acpi_processor *pr)
  290. {
  291. int max_state = 0;
  292. /*
  293. * There exists four states according to
  294. * cpufreq_thermal_reduction_ptg. 0, 1, 2, 3
  295. */
  296. max_state += cpufreq_get_max_state(pr->id);
  297. if (pr->flags.throttling)
  298. max_state += (pr->throttling.state_count -1);
  299. return max_state;
  300. }
  301. static int
  302. processor_get_max_state(struct thermal_cooling_device *cdev,
  303. unsigned long *state)
  304. {
  305. struct acpi_device *device = cdev->devdata;
  306. struct acpi_processor *pr = acpi_driver_data(device);
  307. if (!device || !pr)
  308. return -EINVAL;
  309. *state = acpi_processor_max_state(pr);
  310. return 0;
  311. }
  312. static int
  313. processor_get_cur_state(struct thermal_cooling_device *cdev,
  314. unsigned long *cur_state)
  315. {
  316. struct acpi_device *device = cdev->devdata;
  317. struct acpi_processor *pr = acpi_driver_data(device);
  318. if (!device || !pr)
  319. return -EINVAL;
  320. *cur_state = cpufreq_get_cur_state(pr->id);
  321. if (pr->flags.throttling)
  322. *cur_state += pr->throttling.state;
  323. return 0;
  324. }
  325. static int
  326. processor_set_cur_state(struct thermal_cooling_device *cdev,
  327. unsigned long state)
  328. {
  329. struct acpi_device *device = cdev->devdata;
  330. struct acpi_processor *pr = acpi_driver_data(device);
  331. int result = 0;
  332. int max_pstate;
  333. if (!device || !pr)
  334. return -EINVAL;
  335. max_pstate = cpufreq_get_max_state(pr->id);
  336. if (state > acpi_processor_max_state(pr))
  337. return -EINVAL;
  338. if (state <= max_pstate) {
  339. if (pr->flags.throttling && pr->throttling.state)
  340. result = acpi_processor_set_throttling(pr, 0, false);
  341. cpufreq_set_cur_state(pr->id, state);
  342. } else {
  343. cpufreq_set_cur_state(pr->id, max_pstate);
  344. result = acpi_processor_set_throttling(pr,
  345. state - max_pstate, false);
  346. }
  347. return result;
  348. }
  349. struct thermal_cooling_device_ops processor_cooling_ops = {
  350. .get_max_state = processor_get_max_state,
  351. .get_cur_state = processor_get_cur_state,
  352. .set_cur_state = processor_set_cur_state,
  353. };