therm_throt.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408
  1. /*
  2. * Thermal throttle event support code (such as syslog messaging and rate
  3. * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c).
  4. *
  5. * This allows consistent reporting of CPU thermal throttle events.
  6. *
  7. * Maintains a counter in /sys that keeps track of the number of thermal
  8. * events, such that the user knows how bad the thermal problem might be
  9. * (since the logging to syslog and mcelog is rate limited).
  10. *
  11. * Author: Dmitriy Zavin (dmitriyz@google.com)
  12. *
  13. * Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c.
  14. * Inspired by Ross Biro's and Al Borchers' counter code.
  15. */
  16. #include <linux/interrupt.h>
  17. #include <linux/notifier.h>
  18. #include <linux/jiffies.h>
  19. #include <linux/kernel.h>
  20. #include <linux/percpu.h>
  21. #include <linux/sysdev.h>
  22. #include <linux/types.h>
  23. #include <linux/init.h>
  24. #include <linux/smp.h>
  25. #include <linux/cpu.h>
  26. #include <asm/processor.h>
  27. #include <asm/system.h>
  28. #include <asm/apic.h>
  29. #include <asm/idle.h>
  30. #include <asm/mce.h>
  31. #include <asm/msr.h>
  32. /* How long to wait between reporting thermal events */
  33. #define CHECK_INTERVAL (300 * HZ)
  34. /*
  35. * Current thermal throttling state:
  36. */
  37. struct _thermal_state {
  38. bool is_throttled;
  39. u64 next_check;
  40. unsigned long throttle_count;
  41. unsigned long last_throttle_count;
  42. };
  43. struct thermal_state {
  44. struct _thermal_state core;
  45. struct _thermal_state package;
  46. };
  47. static DEFINE_PER_CPU(struct thermal_state, thermal_state);
  48. static atomic_t therm_throt_en = ATOMIC_INIT(0);
  49. static u32 lvtthmr_init __read_mostly;
  50. #ifdef CONFIG_SYSFS
  51. #define define_therm_throt_sysdev_one_ro(_name) \
  52. static SYSDEV_ATTR(_name, 0444, \
  53. therm_throt_sysdev_show_##_name, \
  54. NULL) \
  55. #define define_therm_throt_sysdev_show_func(level, name) \
  56. \
  57. static ssize_t therm_throt_sysdev_show_##level##_##name( \
  58. struct sys_device *dev, \
  59. struct sysdev_attribute *attr, \
  60. char *buf) \
  61. { \
  62. unsigned int cpu = dev->id; \
  63. ssize_t ret; \
  64. \
  65. preempt_disable(); /* CPU hotplug */ \
  66. if (cpu_online(cpu)) { \
  67. ret = sprintf(buf, "%lu\n", \
  68. per_cpu(thermal_state, cpu).level.name); \
  69. } else \
  70. ret = 0; \
  71. preempt_enable(); \
  72. \
  73. return ret; \
  74. }
  75. define_therm_throt_sysdev_show_func(core, throttle_count);
  76. define_therm_throt_sysdev_one_ro(core_throttle_count);
  77. define_therm_throt_sysdev_show_func(package, throttle_count);
  78. define_therm_throt_sysdev_one_ro(package_throttle_count);
  79. static struct attribute *thermal_throttle_attrs[] = {
  80. &attr_core_throttle_count.attr,
  81. NULL
  82. };
  83. static struct attribute_group thermal_throttle_attr_group = {
  84. .attrs = thermal_throttle_attrs,
  85. .name = "thermal_throttle"
  86. };
  87. #endif /* CONFIG_SYSFS */
  88. /***
  89. * therm_throt_process - Process thermal throttling event from interrupt
  90. * @curr: Whether the condition is current or not (boolean), since the
  91. * thermal interrupt normally gets called both when the thermal
  92. * event begins and once the event has ended.
  93. *
  94. * This function is called by the thermal interrupt after the
  95. * IRQ has been acknowledged.
  96. *
  97. * It will take care of rate limiting and printing messages to the syslog.
  98. *
  99. * Returns: 0 : Event should NOT be further logged, i.e. still in
  100. * "timeout" from previous log message.
  101. * 1 : Event should be logged further, and a message has been
  102. * printed to the syslog.
  103. */
  104. #define CORE_LEVEL 0
  105. #define PACKAGE_LEVEL 1
  106. static int therm_throt_process(bool is_throttled, int level)
  107. {
  108. struct _thermal_state *state;
  109. unsigned int this_cpu;
  110. bool was_throttled;
  111. u64 now;
  112. this_cpu = smp_processor_id();
  113. now = get_jiffies_64();
  114. if (level == CORE_LEVEL)
  115. state = &per_cpu(thermal_state, this_cpu).core;
  116. else
  117. state = &per_cpu(thermal_state, this_cpu).package;
  118. was_throttled = state->is_throttled;
  119. state->is_throttled = is_throttled;
  120. if (is_throttled)
  121. state->throttle_count++;
  122. if (time_before64(now, state->next_check) &&
  123. state->throttle_count != state->last_throttle_count)
  124. return 0;
  125. state->next_check = now + CHECK_INTERVAL;
  126. state->last_throttle_count = state->throttle_count;
  127. /* if we just entered the thermal event */
  128. if (is_throttled) {
  129. printk(KERN_CRIT "CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
  130. this_cpu,
  131. level == CORE_LEVEL ? "Core" : "Package",
  132. state->throttle_count);
  133. add_taint(TAINT_MACHINE_CHECK);
  134. return 1;
  135. }
  136. if (was_throttled) {
  137. printk(KERN_INFO "CPU%d: %s temperature/speed normal\n",
  138. this_cpu,
  139. level == CORE_LEVEL ? "Core" : "Package");
  140. return 1;
  141. }
  142. return 0;
  143. }
  144. #ifdef CONFIG_SYSFS
  145. /* Add/Remove thermal_throttle interface for CPU device: */
  146. static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
  147. {
  148. int err;
  149. struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
  150. err = sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group);
  151. if (err)
  152. return err;
  153. if (cpu_has(c, X86_FEATURE_PTS))
  154. err = sysfs_add_file_to_group(&sys_dev->kobj,
  155. &attr_package_throttle_count.attr,
  156. thermal_throttle_attr_group.name);
  157. return err;
  158. }
  159. static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev)
  160. {
  161. sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group);
  162. }
  163. /* Mutex protecting device creation against CPU hotplug: */
  164. static DEFINE_MUTEX(therm_cpu_lock);
  165. /* Get notified when a cpu comes on/off. Be hotplug friendly. */
  166. static __cpuinit int
  167. thermal_throttle_cpu_callback(struct notifier_block *nfb,
  168. unsigned long action,
  169. void *hcpu)
  170. {
  171. unsigned int cpu = (unsigned long)hcpu;
  172. struct sys_device *sys_dev;
  173. int err = 0;
  174. sys_dev = get_cpu_sysdev(cpu);
  175. switch (action) {
  176. case CPU_UP_PREPARE:
  177. case CPU_UP_PREPARE_FROZEN:
  178. mutex_lock(&therm_cpu_lock);
  179. err = thermal_throttle_add_dev(sys_dev);
  180. mutex_unlock(&therm_cpu_lock);
  181. WARN_ON(err);
  182. break;
  183. case CPU_UP_CANCELED:
  184. case CPU_UP_CANCELED_FROZEN:
  185. case CPU_DEAD:
  186. case CPU_DEAD_FROZEN:
  187. mutex_lock(&therm_cpu_lock);
  188. thermal_throttle_remove_dev(sys_dev);
  189. mutex_unlock(&therm_cpu_lock);
  190. break;
  191. }
  192. return notifier_from_errno(err);
  193. }
  194. static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
  195. {
  196. .notifier_call = thermal_throttle_cpu_callback,
  197. };
  198. static __init int thermal_throttle_init_device(void)
  199. {
  200. unsigned int cpu = 0;
  201. int err;
  202. if (!atomic_read(&therm_throt_en))
  203. return 0;
  204. register_hotcpu_notifier(&thermal_throttle_cpu_notifier);
  205. #ifdef CONFIG_HOTPLUG_CPU
  206. mutex_lock(&therm_cpu_lock);
  207. #endif
  208. /* connect live CPUs to sysfs */
  209. for_each_online_cpu(cpu) {
  210. err = thermal_throttle_add_dev(get_cpu_sysdev(cpu));
  211. WARN_ON(err);
  212. }
  213. #ifdef CONFIG_HOTPLUG_CPU
  214. mutex_unlock(&therm_cpu_lock);
  215. #endif
  216. return 0;
  217. }
  218. device_initcall(thermal_throttle_init_device);
  219. #endif /* CONFIG_SYSFS */
  220. /* Thermal transition interrupt handler */
  221. static void intel_thermal_interrupt(void)
  222. {
  223. __u64 msr_val;
  224. struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
  225. rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
  226. if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT,
  227. CORE_LEVEL) != 0)
  228. mce_log_therm_throt_event(msr_val);
  229. if (cpu_has(c, X86_FEATURE_PTS)) {
  230. rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
  231. if (therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
  232. PACKAGE_LEVEL) != 0)
  233. /*
  234. * Set up the most significant bit to notify mce log
  235. * that this thermal event is a package level event.
  236. * This is a temp solution. May be changed in the future
  237. * with mce log infrasture.
  238. */
  239. mce_log_therm_throt_event(((__u64)1 << 63) | msr_val);
  240. }
  241. }
  242. static void unexpected_thermal_interrupt(void)
  243. {
  244. printk(KERN_ERR "CPU%d: Unexpected LVT TMR interrupt!\n",
  245. smp_processor_id());
  246. add_taint(TAINT_MACHINE_CHECK);
  247. }
  248. static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
  249. asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
  250. {
  251. exit_idle();
  252. irq_enter();
  253. inc_irq_stat(irq_thermal_count);
  254. smp_thermal_vector();
  255. irq_exit();
  256. /* Ack only at the end to avoid potential reentry */
  257. ack_APIC_irq();
  258. }
  259. /* Thermal monitoring depends on APIC, ACPI and clock modulation */
  260. static int intel_thermal_supported(struct cpuinfo_x86 *c)
  261. {
  262. if (!cpu_has_apic)
  263. return 0;
  264. if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
  265. return 0;
  266. return 1;
  267. }
  268. void __init mcheck_intel_therm_init(void)
  269. {
  270. /*
  271. * This function is only called on boot CPU. Save the init thermal
  272. * LVT value on BSP and use that value to restore APs' thermal LVT
  273. * entry BIOS programmed later
  274. */
  275. if (intel_thermal_supported(&boot_cpu_data))
  276. lvtthmr_init = apic_read(APIC_LVTTHMR);
  277. }
  278. void intel_init_thermal(struct cpuinfo_x86 *c)
  279. {
  280. unsigned int cpu = smp_processor_id();
  281. int tm2 = 0;
  282. u32 l, h;
  283. if (!intel_thermal_supported(c))
  284. return;
  285. /*
  286. * First check if its enabled already, in which case there might
  287. * be some SMM goo which handles it, so we can't even put a handler
  288. * since it might be delivered via SMI already:
  289. */
  290. rdmsr(MSR_IA32_MISC_ENABLE, l, h);
  291. /*
  292. * The initial value of thermal LVT entries on all APs always reads
  293. * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI
  294. * sequence to them and LVT registers are reset to 0s except for
  295. * the mask bits which are set to 1s when APs receive INIT IPI.
  296. * Always restore the value that BIOS has programmed on AP based on
  297. * BSP's info we saved since BIOS is always setting the same value
  298. * for all threads/cores
  299. */
  300. apic_write(APIC_LVTTHMR, lvtthmr_init);
  301. h = lvtthmr_init;
  302. if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
  303. printk(KERN_DEBUG
  304. "CPU%d: Thermal monitoring handled by SMI\n", cpu);
  305. return;
  306. }
  307. /* Check whether a vector already exists */
  308. if (h & APIC_VECTOR_MASK) {
  309. printk(KERN_DEBUG
  310. "CPU%d: Thermal LVT vector (%#x) already installed\n",
  311. cpu, (h & APIC_VECTOR_MASK));
  312. return;
  313. }
  314. /* early Pentium M models use different method for enabling TM2 */
  315. if (cpu_has(c, X86_FEATURE_TM2)) {
  316. if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) {
  317. rdmsr(MSR_THERM2_CTL, l, h);
  318. if (l & MSR_THERM2_CTL_TM_SELECT)
  319. tm2 = 1;
  320. } else if (l & MSR_IA32_MISC_ENABLE_TM2)
  321. tm2 = 1;
  322. }
  323. /* We'll mask the thermal vector in the lapic till we're ready: */
  324. h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED;
  325. apic_write(APIC_LVTTHMR, h);
  326. rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
  327. wrmsr(MSR_IA32_THERM_INTERRUPT,
  328. l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE), h);
  329. if (cpu_has(c, X86_FEATURE_PTS)) {
  330. rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
  331. wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
  332. l | (PACKAGE_THERM_INT_LOW_ENABLE
  333. | PACKAGE_THERM_INT_HIGH_ENABLE), h);
  334. }
  335. smp_thermal_vector = intel_thermal_interrupt;
  336. rdmsr(MSR_IA32_MISC_ENABLE, l, h);
  337. wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
  338. /* Unmask the thermal vector: */
  339. l = apic_read(APIC_LVTTHMR);
  340. apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
  341. printk_once(KERN_INFO "CPU0: Thermal monitoring enabled (%s)\n",
  342. tm2 ? "TM2" : "TM1");
  343. /* enable thermal throttle processing */
  344. atomic_set(&therm_throt_en, 1);
  345. }