therm_throt.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484
  1. /*
  2. * Thermal throttle event support code (such as syslog messaging and rate
  3. * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c).
  4. *
  5. * This allows consistent reporting of CPU thermal throttle events.
  6. *
  7. * Maintains a counter in /sys that keeps track of the number of thermal
  8. * events, such that the user knows how bad the thermal problem might be
  9. * (since the logging to syslog and mcelog is rate limited).
  10. *
  11. * Author: Dmitriy Zavin (dmitriyz@google.com)
  12. *
  13. * Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c.
  14. * Inspired by Ross Biro's and Al Borchers' counter code.
  15. */
  16. #include <linux/interrupt.h>
  17. #include <linux/notifier.h>
  18. #include <linux/jiffies.h>
  19. #include <linux/kernel.h>
  20. #include <linux/percpu.h>
  21. #include <linux/sysdev.h>
  22. #include <linux/types.h>
  23. #include <linux/init.h>
  24. #include <linux/smp.h>
  25. #include <linux/cpu.h>
  26. #include <asm/processor.h>
  27. #include <asm/system.h>
  28. #include <asm/apic.h>
  29. #include <asm/idle.h>
  30. #include <asm/mce.h>
  31. #include <asm/msr.h>
  32. /* How long to wait between reporting thermal events */
  33. #define CHECK_INTERVAL (300 * HZ)
  34. #define THERMAL_THROTTLING_EVENT 0
  35. #define POWER_LIMIT_EVENT 1
  36. /*
  37. * Current thermal event state:
  38. */
  39. struct _thermal_state {
  40. bool new_event;
  41. int event;
  42. u64 next_check;
  43. unsigned long count;
  44. unsigned long last_count;
  45. };
  46. struct thermal_state {
  47. struct _thermal_state core_throttle;
  48. struct _thermal_state core_power_limit;
  49. struct _thermal_state package_throttle;
  50. struct _thermal_state package_power_limit;
  51. };
  52. static DEFINE_PER_CPU(struct thermal_state, thermal_state);
  53. static atomic_t therm_throt_en = ATOMIC_INIT(0);
  54. static u32 lvtthmr_init __read_mostly;
  55. #ifdef CONFIG_SYSFS
  56. #define define_therm_throt_sysdev_one_ro(_name) \
  57. static SYSDEV_ATTR(_name, 0444, \
  58. therm_throt_sysdev_show_##_name, \
  59. NULL) \
  60. #define define_therm_throt_sysdev_show_func(event, name) \
  61. \
  62. static ssize_t therm_throt_sysdev_show_##event##_##name( \
  63. struct sys_device *dev, \
  64. struct sysdev_attribute *attr, \
  65. char *buf) \
  66. { \
  67. unsigned int cpu = dev->id; \
  68. ssize_t ret; \
  69. \
  70. preempt_disable(); /* CPU hotplug */ \
  71. if (cpu_online(cpu)) { \
  72. ret = sprintf(buf, "%lu\n", \
  73. per_cpu(thermal_state, cpu).event.name); \
  74. } else \
  75. ret = 0; \
  76. preempt_enable(); \
  77. \
  78. return ret; \
  79. }
  80. define_therm_throt_sysdev_show_func(core_throttle, count);
  81. define_therm_throt_sysdev_one_ro(core_throttle_count);
  82. define_therm_throt_sysdev_show_func(core_power_limit, count);
  83. define_therm_throt_sysdev_one_ro(core_power_limit_count);
  84. define_therm_throt_sysdev_show_func(package_throttle, count);
  85. define_therm_throt_sysdev_one_ro(package_throttle_count);
  86. define_therm_throt_sysdev_show_func(package_power_limit, count);
  87. define_therm_throt_sysdev_one_ro(package_power_limit_count);
  88. static struct attribute *thermal_throttle_attrs[] = {
  89. &attr_core_throttle_count.attr,
  90. NULL
  91. };
  92. static struct attribute_group thermal_attr_group = {
  93. .attrs = thermal_throttle_attrs,
  94. .name = "thermal_throttle"
  95. };
  96. #endif /* CONFIG_SYSFS */
  97. #define CORE_LEVEL 0
  98. #define PACKAGE_LEVEL 1
  99. /***
  100. * therm_throt_process - Process thermal throttling event from interrupt
  101. * @curr: Whether the condition is current or not (boolean), since the
  102. * thermal interrupt normally gets called both when the thermal
  103. * event begins and once the event has ended.
  104. *
  105. * This function is called by the thermal interrupt after the
  106. * IRQ has been acknowledged.
  107. *
  108. * It will take care of rate limiting and printing messages to the syslog.
  109. *
  110. * Returns: 0 : Event should NOT be further logged, i.e. still in
  111. * "timeout" from previous log message.
  112. * 1 : Event should be logged further, and a message has been
  113. * printed to the syslog.
  114. */
  115. static int therm_throt_process(bool new_event, int event, int level)
  116. {
  117. struct _thermal_state *state;
  118. unsigned int this_cpu = smp_processor_id();
  119. bool old_event;
  120. u64 now;
  121. struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
  122. now = get_jiffies_64();
  123. if (level == CORE_LEVEL) {
  124. if (event == THERMAL_THROTTLING_EVENT)
  125. state = &pstate->core_throttle;
  126. else if (event == POWER_LIMIT_EVENT)
  127. state = &pstate->core_power_limit;
  128. else
  129. return 0;
  130. } else if (level == PACKAGE_LEVEL) {
  131. if (event == THERMAL_THROTTLING_EVENT)
  132. state = &pstate->package_throttle;
  133. else if (event == POWER_LIMIT_EVENT)
  134. state = &pstate->package_power_limit;
  135. else
  136. return 0;
  137. } else
  138. return 0;
  139. old_event = state->new_event;
  140. state->new_event = new_event;
  141. if (new_event)
  142. state->count++;
  143. if (time_before64(now, state->next_check) &&
  144. state->count != state->last_count)
  145. return 0;
  146. state->next_check = now + CHECK_INTERVAL;
  147. state->last_count = state->count;
  148. /* if we just entered the thermal event */
  149. if (new_event) {
  150. if (event == THERMAL_THROTTLING_EVENT)
  151. printk(KERN_CRIT "CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
  152. this_cpu,
  153. level == CORE_LEVEL ? "Core" : "Package",
  154. state->count);
  155. else
  156. printk(KERN_CRIT "CPU%d: %s power limit notification (total events = %lu)\n",
  157. this_cpu,
  158. level == CORE_LEVEL ? "Core" : "Package",
  159. state->count);
  160. add_taint(TAINT_MACHINE_CHECK);
  161. return 1;
  162. }
  163. if (old_event) {
  164. if (event == THERMAL_THROTTLING_EVENT)
  165. printk(KERN_INFO "CPU%d: %s temperature/speed normal\n",
  166. this_cpu,
  167. level == CORE_LEVEL ? "Core" : "Package");
  168. else
  169. printk(KERN_INFO "CPU%d: %s power limit normal\n",
  170. this_cpu,
  171. level == CORE_LEVEL ? "Core" : "Package");
  172. return 1;
  173. }
  174. return 0;
  175. }
  176. #ifdef CONFIG_SYSFS
  177. /* Add/Remove thermal_throttle interface for CPU device: */
  178. static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
  179. unsigned int cpu)
  180. {
  181. int err;
  182. struct cpuinfo_x86 *c = &cpu_data(cpu);
  183. err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group);
  184. if (err)
  185. return err;
  186. if (cpu_has(c, X86_FEATURE_PLN))
  187. err = sysfs_add_file_to_group(&sys_dev->kobj,
  188. &attr_core_power_limit_count.attr,
  189. thermal_attr_group.name);
  190. if (cpu_has(c, X86_FEATURE_PTS))
  191. err = sysfs_add_file_to_group(&sys_dev->kobj,
  192. &attr_package_throttle_count.attr,
  193. thermal_attr_group.name);
  194. if (cpu_has(c, X86_FEATURE_PLN))
  195. err = sysfs_add_file_to_group(&sys_dev->kobj,
  196. &attr_package_power_limit_count.attr,
  197. thermal_attr_group.name);
  198. return err;
  199. }
  200. static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev)
  201. {
  202. sysfs_remove_group(&sys_dev->kobj, &thermal_attr_group);
  203. }
  204. /* Mutex protecting device creation against CPU hotplug: */
  205. static DEFINE_MUTEX(therm_cpu_lock);
  206. /* Get notified when a cpu comes on/off. Be hotplug friendly. */
  207. static __cpuinit int
  208. thermal_throttle_cpu_callback(struct notifier_block *nfb,
  209. unsigned long action,
  210. void *hcpu)
  211. {
  212. unsigned int cpu = (unsigned long)hcpu;
  213. struct sys_device *sys_dev;
  214. int err = 0;
  215. sys_dev = get_cpu_sysdev(cpu);
  216. switch (action) {
  217. case CPU_UP_PREPARE:
  218. case CPU_UP_PREPARE_FROZEN:
  219. mutex_lock(&therm_cpu_lock);
  220. err = thermal_throttle_add_dev(sys_dev, cpu);
  221. mutex_unlock(&therm_cpu_lock);
  222. WARN_ON(err);
  223. break;
  224. case CPU_UP_CANCELED:
  225. case CPU_UP_CANCELED_FROZEN:
  226. case CPU_DEAD:
  227. case CPU_DEAD_FROZEN:
  228. mutex_lock(&therm_cpu_lock);
  229. thermal_throttle_remove_dev(sys_dev);
  230. mutex_unlock(&therm_cpu_lock);
  231. break;
  232. }
  233. return notifier_from_errno(err);
  234. }
  235. static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
  236. {
  237. .notifier_call = thermal_throttle_cpu_callback,
  238. };
  239. static __init int thermal_throttle_init_device(void)
  240. {
  241. unsigned int cpu = 0;
  242. int err;
  243. if (!atomic_read(&therm_throt_en))
  244. return 0;
  245. register_hotcpu_notifier(&thermal_throttle_cpu_notifier);
  246. #ifdef CONFIG_HOTPLUG_CPU
  247. mutex_lock(&therm_cpu_lock);
  248. #endif
  249. /* connect live CPUs to sysfs */
  250. for_each_online_cpu(cpu) {
  251. err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu);
  252. WARN_ON(err);
  253. }
  254. #ifdef CONFIG_HOTPLUG_CPU
  255. mutex_unlock(&therm_cpu_lock);
  256. #endif
  257. return 0;
  258. }
  259. device_initcall(thermal_throttle_init_device);
  260. #endif /* CONFIG_SYSFS */
  261. /*
  262. * Set up the most two significant bit to notify mce log that this thermal
  263. * event type.
  264. * This is a temp solution. May be changed in the future with mce log
  265. * infrasture.
  266. */
  267. #define CORE_THROTTLED (0)
  268. #define CORE_POWER_LIMIT ((__u64)1 << 62)
  269. #define PACKAGE_THROTTLED ((__u64)2 << 62)
  270. #define PACKAGE_POWER_LIMIT ((__u64)3 << 62)
  271. /* Thermal transition interrupt handler */
  272. static void intel_thermal_interrupt(void)
  273. {
  274. __u64 msr_val;
  275. struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
  276. rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
  277. if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT,
  278. THERMAL_THROTTLING_EVENT,
  279. CORE_LEVEL) != 0)
  280. mce_log_therm_throt_event(CORE_THROTTLED | msr_val);
  281. if (cpu_has(c, X86_FEATURE_PLN))
  282. if (therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
  283. POWER_LIMIT_EVENT,
  284. CORE_LEVEL) != 0)
  285. mce_log_therm_throt_event(CORE_POWER_LIMIT | msr_val);
  286. if (cpu_has(c, X86_FEATURE_PTS)) {
  287. rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
  288. if (therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
  289. THERMAL_THROTTLING_EVENT,
  290. PACKAGE_LEVEL) != 0)
  291. mce_log_therm_throt_event(PACKAGE_THROTTLED | msr_val);
  292. if (cpu_has(c, X86_FEATURE_PLN))
  293. if (therm_throt_process(msr_val &
  294. PACKAGE_THERM_STATUS_POWER_LIMIT,
  295. POWER_LIMIT_EVENT,
  296. PACKAGE_LEVEL) != 0)
  297. mce_log_therm_throt_event(PACKAGE_POWER_LIMIT
  298. | msr_val);
  299. }
  300. }
  301. static void unexpected_thermal_interrupt(void)
  302. {
  303. printk(KERN_ERR "CPU%d: Unexpected LVT TMR interrupt!\n",
  304. smp_processor_id());
  305. add_taint(TAINT_MACHINE_CHECK);
  306. }
  307. static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
  308. asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
  309. {
  310. exit_idle();
  311. irq_enter();
  312. inc_irq_stat(irq_thermal_count);
  313. smp_thermal_vector();
  314. irq_exit();
  315. /* Ack only at the end to avoid potential reentry */
  316. ack_APIC_irq();
  317. }
  318. /* Thermal monitoring depends on APIC, ACPI and clock modulation */
  319. static int intel_thermal_supported(struct cpuinfo_x86 *c)
  320. {
  321. if (!cpu_has_apic)
  322. return 0;
  323. if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
  324. return 0;
  325. return 1;
  326. }
  327. void __init mcheck_intel_therm_init(void)
  328. {
  329. /*
  330. * This function is only called on boot CPU. Save the init thermal
  331. * LVT value on BSP and use that value to restore APs' thermal LVT
  332. * entry BIOS programmed later
  333. */
  334. if (intel_thermal_supported(&boot_cpu_data))
  335. lvtthmr_init = apic_read(APIC_LVTTHMR);
  336. }
  337. void intel_init_thermal(struct cpuinfo_x86 *c)
  338. {
  339. unsigned int cpu = smp_processor_id();
  340. int tm2 = 0;
  341. u32 l, h;
  342. if (!intel_thermal_supported(c))
  343. return;
  344. /*
  345. * First check if its enabled already, in which case there might
  346. * be some SMM goo which handles it, so we can't even put a handler
  347. * since it might be delivered via SMI already:
  348. */
  349. rdmsr(MSR_IA32_MISC_ENABLE, l, h);
  350. /*
  351. * The initial value of thermal LVT entries on all APs always reads
  352. * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI
  353. * sequence to them and LVT registers are reset to 0s except for
  354. * the mask bits which are set to 1s when APs receive INIT IPI.
  355. * Always restore the value that BIOS has programmed on AP based on
  356. * BSP's info we saved since BIOS is always setting the same value
  357. * for all threads/cores
  358. */
  359. apic_write(APIC_LVTTHMR, lvtthmr_init);
  360. h = lvtthmr_init;
  361. if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
  362. printk(KERN_DEBUG
  363. "CPU%d: Thermal monitoring handled by SMI\n", cpu);
  364. return;
  365. }
  366. /* Check whether a vector already exists */
  367. if (h & APIC_VECTOR_MASK) {
  368. printk(KERN_DEBUG
  369. "CPU%d: Thermal LVT vector (%#x) already installed\n",
  370. cpu, (h & APIC_VECTOR_MASK));
  371. return;
  372. }
  373. /* early Pentium M models use different method for enabling TM2 */
  374. if (cpu_has(c, X86_FEATURE_TM2)) {
  375. if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) {
  376. rdmsr(MSR_THERM2_CTL, l, h);
  377. if (l & MSR_THERM2_CTL_TM_SELECT)
  378. tm2 = 1;
  379. } else if (l & MSR_IA32_MISC_ENABLE_TM2)
  380. tm2 = 1;
  381. }
  382. /* We'll mask the thermal vector in the lapic till we're ready: */
  383. h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED;
  384. apic_write(APIC_LVTTHMR, h);
  385. rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
  386. if (cpu_has(c, X86_FEATURE_PLN))
  387. wrmsr(MSR_IA32_THERM_INTERRUPT,
  388. l | (THERM_INT_LOW_ENABLE
  389. | THERM_INT_HIGH_ENABLE | THERM_INT_PLN_ENABLE), h);
  390. else
  391. wrmsr(MSR_IA32_THERM_INTERRUPT,
  392. l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE), h);
  393. if (cpu_has(c, X86_FEATURE_PTS)) {
  394. rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
  395. if (cpu_has(c, X86_FEATURE_PLN))
  396. wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
  397. l | (PACKAGE_THERM_INT_LOW_ENABLE
  398. | PACKAGE_THERM_INT_HIGH_ENABLE
  399. | PACKAGE_THERM_INT_PLN_ENABLE), h);
  400. else
  401. wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
  402. l | (PACKAGE_THERM_INT_LOW_ENABLE
  403. | PACKAGE_THERM_INT_HIGH_ENABLE), h);
  404. }
  405. smp_thermal_vector = intel_thermal_interrupt;
  406. rdmsr(MSR_IA32_MISC_ENABLE, l, h);
  407. wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
  408. /* Unmask the thermal vector: */
  409. l = apic_read(APIC_LVTTHMR);
  410. apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
  411. printk_once(KERN_INFO "CPU0: Thermal monitoring enabled (%s)\n",
  412. tm2 ? "TM2" : "TM1");
  413. /* enable thermal throttle processing */
  414. atomic_set(&therm_throt_en, 1);
  415. }