processor_thermal.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /*
  2. * processor_thermal.c - Passive cooling submodule of the ACPI processor driver
  3. *
  4. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
  7. * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  8. * - Added processor hotplug support
  9. *
  10. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or (at
  15. * your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful, but
  18. * WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License along
  23. * with this program; if not, write to the Free Software Foundation, Inc.,
  24. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  25. *
  26. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  27. */
  28. #include <linux/kernel.h>
  29. #include <linux/module.h>
  30. #include <linux/init.h>
  31. #include <linux/cpufreq.h>
  32. #include <asm/uaccess.h>
  33. #include <acpi/acpi_bus.h>
  34. #include <acpi/processor.h>
  35. #include <acpi/acpi_drivers.h>
  36. #define PREFIX "ACPI: "
  37. #define ACPI_PROCESSOR_CLASS "processor"
  38. #define _COMPONENT ACPI_PROCESSOR_COMPONENT
  39. ACPI_MODULE_NAME("processor_thermal");
  40. #ifdef CONFIG_CPU_FREQ
  41. /* If a passive cooling situation is detected, primarily CPUfreq is used, as it
  42. * offers (in most cases) voltage scaling in addition to frequency scaling, and
  43. * thus a cubic (instead of linear) reduction of energy. Also, we allow for
  44. * _any_ cpufreq driver and not only the acpi-cpufreq driver.
  45. */
  46. #define CPUFREQ_THERMAL_MIN_STEP 0
  47. #define CPUFREQ_THERMAL_MAX_STEP 3
  48. static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
  49. static unsigned int acpi_thermal_cpufreq_is_init = 0;
  50. static int cpu_has_cpufreq(unsigned int cpu)
  51. {
  52. struct cpufreq_policy policy;
  53. if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu))
  54. return 0;
  55. return 1;
  56. }
  57. static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
  58. unsigned long event, void *data)
  59. {
  60. struct cpufreq_policy *policy = data;
  61. unsigned long max_freq = 0;
  62. if (event != CPUFREQ_ADJUST)
  63. goto out;
  64. max_freq = (
  65. policy->cpuinfo.max_freq *
  66. (100 - per_cpu(cpufreq_thermal_reduction_pctg, policy->cpu) * 20)
  67. ) / 100;
  68. cpufreq_verify_within_limits(policy, 0, max_freq);
  69. out:
  70. return 0;
  71. }
  72. static struct notifier_block acpi_thermal_cpufreq_notifier_block = {
  73. .notifier_call = acpi_thermal_cpufreq_notifier,
  74. };
  75. static int cpufreq_get_max_state(unsigned int cpu)
  76. {
  77. if (!cpu_has_cpufreq(cpu))
  78. return 0;
  79. return CPUFREQ_THERMAL_MAX_STEP;
  80. }
  81. static int cpufreq_get_cur_state(unsigned int cpu)
  82. {
  83. if (!cpu_has_cpufreq(cpu))
  84. return 0;
  85. return per_cpu(cpufreq_thermal_reduction_pctg, cpu);
  86. }
  87. static int cpufreq_set_cur_state(unsigned int cpu, int state)
  88. {
  89. if (!cpu_has_cpufreq(cpu))
  90. return 0;
  91. per_cpu(cpufreq_thermal_reduction_pctg, cpu) = state;
  92. cpufreq_update_policy(cpu);
  93. return 0;
  94. }
  95. void acpi_thermal_cpufreq_init(void)
  96. {
  97. int i;
  98. for (i = 0; i < nr_cpu_ids; i++)
  99. if (cpu_present(i))
  100. per_cpu(cpufreq_thermal_reduction_pctg, i) = 0;
  101. i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
  102. CPUFREQ_POLICY_NOTIFIER);
  103. if (!i)
  104. acpi_thermal_cpufreq_is_init = 1;
  105. }
  106. void acpi_thermal_cpufreq_exit(void)
  107. {
  108. if (acpi_thermal_cpufreq_is_init)
  109. cpufreq_unregister_notifier
  110. (&acpi_thermal_cpufreq_notifier_block,
  111. CPUFREQ_POLICY_NOTIFIER);
  112. acpi_thermal_cpufreq_is_init = 0;
  113. }
  114. #else /* ! CONFIG_CPU_FREQ */
  115. static int cpufreq_get_max_state(unsigned int cpu)
  116. {
  117. return 0;
  118. }
  119. static int cpufreq_get_cur_state(unsigned int cpu)
  120. {
  121. return 0;
  122. }
  123. static int cpufreq_set_cur_state(unsigned int cpu, int state)
  124. {
  125. return 0;
  126. }
  127. #endif
  128. int acpi_processor_get_limit_info(struct acpi_processor *pr)
  129. {
  130. if (!pr)
  131. return -EINVAL;
  132. if (pr->flags.throttling)
  133. pr->flags.limit = 1;
  134. return 0;
  135. }
  136. /* thermal coolign device callbacks */
  137. static int acpi_processor_max_state(struct acpi_processor *pr)
  138. {
  139. int max_state = 0;
  140. /*
  141. * There exists four states according to
  142. * cpufreq_thermal_reduction_ptg. 0, 1, 2, 3
  143. */
  144. max_state += cpufreq_get_max_state(pr->id);
  145. if (pr->flags.throttling)
  146. max_state += (pr->throttling.state_count -1);
  147. return max_state;
  148. }
  149. static int
  150. processor_get_max_state(struct thermal_cooling_device *cdev,
  151. unsigned long *state)
  152. {
  153. struct acpi_device *device = cdev->devdata;
  154. struct acpi_processor *pr = acpi_driver_data(device);
  155. if (!device || !pr)
  156. return -EINVAL;
  157. *state = acpi_processor_max_state(pr);
  158. return 0;
  159. }
  160. static int
  161. processor_get_cur_state(struct thermal_cooling_device *cdev,
  162. unsigned long *cur_state)
  163. {
  164. struct acpi_device *device = cdev->devdata;
  165. struct acpi_processor *pr = acpi_driver_data(device);
  166. if (!device || !pr)
  167. return -EINVAL;
  168. *cur_state = cpufreq_get_cur_state(pr->id);
  169. if (pr->flags.throttling)
  170. *cur_state += pr->throttling.state;
  171. return 0;
  172. }
  173. static int
  174. processor_set_cur_state(struct thermal_cooling_device *cdev,
  175. unsigned long state)
  176. {
  177. struct acpi_device *device = cdev->devdata;
  178. struct acpi_processor *pr = acpi_driver_data(device);
  179. int result = 0;
  180. int max_pstate;
  181. if (!device || !pr)
  182. return -EINVAL;
  183. max_pstate = cpufreq_get_max_state(pr->id);
  184. if (state > acpi_processor_max_state(pr))
  185. return -EINVAL;
  186. if (state <= max_pstate) {
  187. if (pr->flags.throttling && pr->throttling.state)
  188. result = acpi_processor_set_throttling(pr, 0, false);
  189. cpufreq_set_cur_state(pr->id, state);
  190. } else {
  191. cpufreq_set_cur_state(pr->id, max_pstate);
  192. result = acpi_processor_set_throttling(pr,
  193. state - max_pstate, false);
  194. }
  195. return result;
  196. }
  197. const struct thermal_cooling_device_ops processor_cooling_ops = {
  198. .get_max_state = processor_get_max_state,
  199. .get_cur_state = processor_get_cur_state,
  200. .set_cur_state = processor_set_cur_state,
  201. };