therm_throt.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517
  1. /*
  2. * Thermal throttle event support code (such as syslog messaging and rate
  3. * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c).
  4. *
  5. * This allows consistent reporting of CPU thermal throttle events.
  6. *
  7. * Maintains a counter in /sys that keeps track of the number of thermal
  8. * events, such that the user knows how bad the thermal problem might be
  9. * (since the logging to syslog and mcelog is rate limited).
  10. *
  11. * Author: Dmitriy Zavin (dmitriyz@google.com)
  12. *
  13. * Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c.
  14. * Inspired by Ross Biro's and Al Borchers' counter code.
  15. */
  16. #include <linux/interrupt.h>
  17. #include <linux/notifier.h>
  18. #include <linux/jiffies.h>
  19. #include <linux/kernel.h>
  20. #include <linux/percpu.h>
  21. #include <linux/export.h>
  22. #include <linux/types.h>
  23. #include <linux/init.h>
  24. #include <linux/smp.h>
  25. #include <linux/cpu.h>
  26. #include <asm/processor.h>
  27. #include <asm/apic.h>
  28. #include <asm/idle.h>
  29. #include <asm/mce.h>
  30. #include <asm/msr.h>
  31. /* How long to wait between reporting thermal events */
  32. #define CHECK_INTERVAL (300 * HZ)
  33. #define THERMAL_THROTTLING_EVENT 0
  34. #define POWER_LIMIT_EVENT 1
  35. /*
  36. * Current thermal event state:
  37. */
  38. struct _thermal_state {
  39. bool new_event;
  40. int event;
  41. u64 next_check;
  42. unsigned long count;
  43. unsigned long last_count;
  44. };
  45. struct thermal_state {
  46. struct _thermal_state core_throttle;
  47. struct _thermal_state core_power_limit;
  48. struct _thermal_state package_throttle;
  49. struct _thermal_state package_power_limit;
  50. struct _thermal_state core_thresh0;
  51. struct _thermal_state core_thresh1;
  52. };
  53. /* Callback to handle core threshold interrupts */
  54. int (*platform_thermal_notify)(__u64 msr_val);
  55. EXPORT_SYMBOL(platform_thermal_notify);
  56. static DEFINE_PER_CPU(struct thermal_state, thermal_state);
  57. static atomic_t therm_throt_en = ATOMIC_INIT(0);
  58. static u32 lvtthmr_init __read_mostly;
  59. #ifdef CONFIG_SYSFS
  60. #define define_therm_throt_device_one_ro(_name) \
  61. static DEVICE_ATTR(_name, 0444, \
  62. therm_throt_device_show_##_name, \
  63. NULL) \
  64. #define define_therm_throt_device_show_func(event, name) \
  65. \
  66. static ssize_t therm_throt_device_show_##event##_##name( \
  67. struct device *dev, \
  68. struct device_attribute *attr, \
  69. char *buf) \
  70. { \
  71. unsigned int cpu = dev->id; \
  72. ssize_t ret; \
  73. \
  74. preempt_disable(); /* CPU hotplug */ \
  75. if (cpu_online(cpu)) { \
  76. ret = sprintf(buf, "%lu\n", \
  77. per_cpu(thermal_state, cpu).event.name); \
  78. } else \
  79. ret = 0; \
  80. preempt_enable(); \
  81. \
  82. return ret; \
  83. }
  84. define_therm_throt_device_show_func(core_throttle, count);
  85. define_therm_throt_device_one_ro(core_throttle_count);
  86. define_therm_throt_device_show_func(core_power_limit, count);
  87. define_therm_throt_device_one_ro(core_power_limit_count);
  88. define_therm_throt_device_show_func(package_throttle, count);
  89. define_therm_throt_device_one_ro(package_throttle_count);
  90. define_therm_throt_device_show_func(package_power_limit, count);
  91. define_therm_throt_device_one_ro(package_power_limit_count);
  92. static struct attribute *thermal_throttle_attrs[] = {
  93. &dev_attr_core_throttle_count.attr,
  94. NULL
  95. };
  96. static struct attribute_group thermal_attr_group = {
  97. .attrs = thermal_throttle_attrs,
  98. .name = "thermal_throttle"
  99. };
  100. #endif /* CONFIG_SYSFS */
  101. #define CORE_LEVEL 0
  102. #define PACKAGE_LEVEL 1
  103. /***
  104. * therm_throt_process - Process thermal throttling event from interrupt
  105. * @curr: Whether the condition is current or not (boolean), since the
  106. * thermal interrupt normally gets called both when the thermal
  107. * event begins and once the event has ended.
  108. *
  109. * This function is called by the thermal interrupt after the
  110. * IRQ has been acknowledged.
  111. *
  112. * It will take care of rate limiting and printing messages to the syslog.
  113. *
  114. * Returns: 0 : Event should NOT be further logged, i.e. still in
  115. * "timeout" from previous log message.
  116. * 1 : Event should be logged further, and a message has been
  117. * printed to the syslog.
  118. */
  119. static int therm_throt_process(bool new_event, int event, int level)
  120. {
  121. struct _thermal_state *state;
  122. unsigned int this_cpu = smp_processor_id();
  123. bool old_event;
  124. u64 now;
  125. struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
  126. now = get_jiffies_64();
  127. if (level == CORE_LEVEL) {
  128. if (event == THERMAL_THROTTLING_EVENT)
  129. state = &pstate->core_throttle;
  130. else if (event == POWER_LIMIT_EVENT)
  131. state = &pstate->core_power_limit;
  132. else
  133. return 0;
  134. } else if (level == PACKAGE_LEVEL) {
  135. if (event == THERMAL_THROTTLING_EVENT)
  136. state = &pstate->package_throttle;
  137. else if (event == POWER_LIMIT_EVENT)
  138. state = &pstate->package_power_limit;
  139. else
  140. return 0;
  141. } else
  142. return 0;
  143. old_event = state->new_event;
  144. state->new_event = new_event;
  145. if (new_event)
  146. state->count++;
  147. if (time_before64(now, state->next_check) &&
  148. state->count != state->last_count)
  149. return 0;
  150. state->next_check = now + CHECK_INTERVAL;
  151. state->last_count = state->count;
  152. /* if we just entered the thermal event */
  153. if (new_event) {
  154. if (event == THERMAL_THROTTLING_EVENT)
  155. printk(KERN_CRIT "CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
  156. this_cpu,
  157. level == CORE_LEVEL ? "Core" : "Package",
  158. state->count);
  159. return 1;
  160. }
  161. if (old_event) {
  162. if (event == THERMAL_THROTTLING_EVENT)
  163. printk(KERN_INFO "CPU%d: %s temperature/speed normal\n",
  164. this_cpu,
  165. level == CORE_LEVEL ? "Core" : "Package");
  166. return 1;
  167. }
  168. return 0;
  169. }
  170. static int thresh_event_valid(int event)
  171. {
  172. struct _thermal_state *state;
  173. unsigned int this_cpu = smp_processor_id();
  174. struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu);
  175. u64 now = get_jiffies_64();
  176. state = (event == 0) ? &pstate->core_thresh0 : &pstate->core_thresh1;
  177. if (time_before64(now, state->next_check))
  178. return 0;
  179. state->next_check = now + CHECK_INTERVAL;
  180. return 1;
  181. }
  182. static bool int_pln_enable;
  183. static int __init int_pln_enable_setup(char *s)
  184. {
  185. int_pln_enable = true;
  186. return 1;
  187. }
  188. __setup("int_pln_enable", int_pln_enable_setup);
  189. #ifdef CONFIG_SYSFS
  190. /* Add/Remove thermal_throttle interface for CPU device: */
  191. static __cpuinit int thermal_throttle_add_dev(struct device *dev,
  192. unsigned int cpu)
  193. {
  194. int err;
  195. struct cpuinfo_x86 *c = &cpu_data(cpu);
  196. err = sysfs_create_group(&dev->kobj, &thermal_attr_group);
  197. if (err)
  198. return err;
  199. if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable)
  200. err = sysfs_add_file_to_group(&dev->kobj,
  201. &dev_attr_core_power_limit_count.attr,
  202. thermal_attr_group.name);
  203. if (cpu_has(c, X86_FEATURE_PTS)) {
  204. err = sysfs_add_file_to_group(&dev->kobj,
  205. &dev_attr_package_throttle_count.attr,
  206. thermal_attr_group.name);
  207. if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable)
  208. err = sysfs_add_file_to_group(&dev->kobj,
  209. &dev_attr_package_power_limit_count.attr,
  210. thermal_attr_group.name);
  211. }
  212. return err;
  213. }
  214. static __cpuinit void thermal_throttle_remove_dev(struct device *dev)
  215. {
  216. sysfs_remove_group(&dev->kobj, &thermal_attr_group);
  217. }
  218. /* Mutex protecting device creation against CPU hotplug: */
  219. static DEFINE_MUTEX(therm_cpu_lock);
  220. /* Get notified when a cpu comes on/off. Be hotplug friendly. */
  221. static __cpuinit int
  222. thermal_throttle_cpu_callback(struct notifier_block *nfb,
  223. unsigned long action,
  224. void *hcpu)
  225. {
  226. unsigned int cpu = (unsigned long)hcpu;
  227. struct device *dev;
  228. int err = 0;
  229. dev = get_cpu_device(cpu);
  230. switch (action) {
  231. case CPU_UP_PREPARE:
  232. case CPU_UP_PREPARE_FROZEN:
  233. mutex_lock(&therm_cpu_lock);
  234. err = thermal_throttle_add_dev(dev, cpu);
  235. mutex_unlock(&therm_cpu_lock);
  236. WARN_ON(err);
  237. break;
  238. case CPU_UP_CANCELED:
  239. case CPU_UP_CANCELED_FROZEN:
  240. case CPU_DEAD:
  241. case CPU_DEAD_FROZEN:
  242. mutex_lock(&therm_cpu_lock);
  243. thermal_throttle_remove_dev(dev);
  244. mutex_unlock(&therm_cpu_lock);
  245. break;
  246. }
  247. return notifier_from_errno(err);
  248. }
  249. static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
  250. {
  251. .notifier_call = thermal_throttle_cpu_callback,
  252. };
  253. static __init int thermal_throttle_init_device(void)
  254. {
  255. unsigned int cpu = 0;
  256. int err;
  257. if (!atomic_read(&therm_throt_en))
  258. return 0;
  259. register_hotcpu_notifier(&thermal_throttle_cpu_notifier);
  260. #ifdef CONFIG_HOTPLUG_CPU
  261. mutex_lock(&therm_cpu_lock);
  262. #endif
  263. /* connect live CPUs to sysfs */
  264. for_each_online_cpu(cpu) {
  265. err = thermal_throttle_add_dev(get_cpu_device(cpu), cpu);
  266. WARN_ON(err);
  267. }
  268. #ifdef CONFIG_HOTPLUG_CPU
  269. mutex_unlock(&therm_cpu_lock);
  270. #endif
  271. return 0;
  272. }
  273. device_initcall(thermal_throttle_init_device);
  274. #endif /* CONFIG_SYSFS */
  275. static void notify_thresholds(__u64 msr_val)
  276. {
  277. /* check whether the interrupt handler is defined;
  278. * otherwise simply return
  279. */
  280. if (!platform_thermal_notify)
  281. return;
  282. /* lower threshold reached */
  283. if ((msr_val & THERM_LOG_THRESHOLD0) && thresh_event_valid(0))
  284. platform_thermal_notify(msr_val);
  285. /* higher threshold reached */
  286. if ((msr_val & THERM_LOG_THRESHOLD1) && thresh_event_valid(1))
  287. platform_thermal_notify(msr_val);
  288. }
  289. /* Thermal transition interrupt handler */
  290. static void intel_thermal_interrupt(void)
  291. {
  292. __u64 msr_val;
  293. rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
  294. /* Check for violation of core thermal thresholds*/
  295. notify_thresholds(msr_val);
  296. if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT,
  297. THERMAL_THROTTLING_EVENT,
  298. CORE_LEVEL) != 0)
  299. mce_log_therm_throt_event(msr_val);
  300. if (this_cpu_has(X86_FEATURE_PLN) && int_pln_enable)
  301. therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
  302. POWER_LIMIT_EVENT,
  303. CORE_LEVEL);
  304. if (this_cpu_has(X86_FEATURE_PTS)) {
  305. rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
  306. therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
  307. THERMAL_THROTTLING_EVENT,
  308. PACKAGE_LEVEL);
  309. if (this_cpu_has(X86_FEATURE_PLN) && int_pln_enable)
  310. therm_throt_process(msr_val &
  311. PACKAGE_THERM_STATUS_POWER_LIMIT,
  312. POWER_LIMIT_EVENT,
  313. PACKAGE_LEVEL);
  314. }
  315. }
  316. static void unexpected_thermal_interrupt(void)
  317. {
  318. printk(KERN_ERR "CPU%d: Unexpected LVT thermal interrupt!\n",
  319. smp_processor_id());
  320. }
  321. static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
  322. asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
  323. {
  324. irq_enter();
  325. exit_idle();
  326. inc_irq_stat(irq_thermal_count);
  327. smp_thermal_vector();
  328. irq_exit();
  329. /* Ack only at the end to avoid potential reentry */
  330. ack_APIC_irq();
  331. }
  332. /* Thermal monitoring depends on APIC, ACPI and clock modulation */
  333. static int intel_thermal_supported(struct cpuinfo_x86 *c)
  334. {
  335. if (!cpu_has_apic)
  336. return 0;
  337. if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
  338. return 0;
  339. return 1;
  340. }
  341. void __init mcheck_intel_therm_init(void)
  342. {
  343. /*
  344. * This function is only called on boot CPU. Save the init thermal
  345. * LVT value on BSP and use that value to restore APs' thermal LVT
  346. * entry BIOS programmed later
  347. */
  348. if (intel_thermal_supported(&boot_cpu_data))
  349. lvtthmr_init = apic_read(APIC_LVTTHMR);
  350. }
  351. void intel_init_thermal(struct cpuinfo_x86 *c)
  352. {
  353. unsigned int cpu = smp_processor_id();
  354. int tm2 = 0;
  355. u32 l, h;
  356. if (!intel_thermal_supported(c))
  357. return;
  358. /*
  359. * First check if its enabled already, in which case there might
  360. * be some SMM goo which handles it, so we can't even put a handler
  361. * since it might be delivered via SMI already:
  362. */
  363. rdmsr(MSR_IA32_MISC_ENABLE, l, h);
  364. h = lvtthmr_init;
  365. /*
  366. * The initial value of thermal LVT entries on all APs always reads
  367. * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI
  368. * sequence to them and LVT registers are reset to 0s except for
  369. * the mask bits which are set to 1s when APs receive INIT IPI.
  370. * If BIOS takes over the thermal interrupt and sets its interrupt
  371. * delivery mode to SMI (not fixed), it restores the value that the
  372. * BIOS has programmed on AP based on BSP's info we saved since BIOS
  373. * is always setting the same value for all threads/cores.
  374. */
  375. if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED)
  376. apic_write(APIC_LVTTHMR, lvtthmr_init);
  377. if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
  378. printk(KERN_DEBUG
  379. "CPU%d: Thermal monitoring handled by SMI\n", cpu);
  380. return;
  381. }
  382. /* Check whether a vector already exists */
  383. if (h & APIC_VECTOR_MASK) {
  384. printk(KERN_DEBUG
  385. "CPU%d: Thermal LVT vector (%#x) already installed\n",
  386. cpu, (h & APIC_VECTOR_MASK));
  387. return;
  388. }
  389. /* early Pentium M models use different method for enabling TM2 */
  390. if (cpu_has(c, X86_FEATURE_TM2)) {
  391. if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) {
  392. rdmsr(MSR_THERM2_CTL, l, h);
  393. if (l & MSR_THERM2_CTL_TM_SELECT)
  394. tm2 = 1;
  395. } else if (l & MSR_IA32_MISC_ENABLE_TM2)
  396. tm2 = 1;
  397. }
  398. /* We'll mask the thermal vector in the lapic till we're ready: */
  399. h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED;
  400. apic_write(APIC_LVTTHMR, h);
  401. rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
  402. if (cpu_has(c, X86_FEATURE_PLN) && !int_pln_enable)
  403. wrmsr(MSR_IA32_THERM_INTERRUPT,
  404. (l | (THERM_INT_LOW_ENABLE
  405. | THERM_INT_HIGH_ENABLE)) & ~THERM_INT_PLN_ENABLE, h);
  406. else if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable)
  407. wrmsr(MSR_IA32_THERM_INTERRUPT,
  408. l | (THERM_INT_LOW_ENABLE
  409. | THERM_INT_HIGH_ENABLE | THERM_INT_PLN_ENABLE), h);
  410. else
  411. wrmsr(MSR_IA32_THERM_INTERRUPT,
  412. l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE), h);
  413. if (cpu_has(c, X86_FEATURE_PTS)) {
  414. rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
  415. if (cpu_has(c, X86_FEATURE_PLN) && !int_pln_enable)
  416. wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
  417. (l | (PACKAGE_THERM_INT_LOW_ENABLE
  418. | PACKAGE_THERM_INT_HIGH_ENABLE))
  419. & ~PACKAGE_THERM_INT_PLN_ENABLE, h);
  420. else if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable)
  421. wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
  422. l | (PACKAGE_THERM_INT_LOW_ENABLE
  423. | PACKAGE_THERM_INT_HIGH_ENABLE
  424. | PACKAGE_THERM_INT_PLN_ENABLE), h);
  425. else
  426. wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
  427. l | (PACKAGE_THERM_INT_LOW_ENABLE
  428. | PACKAGE_THERM_INT_HIGH_ENABLE), h);
  429. }
  430. smp_thermal_vector = intel_thermal_interrupt;
  431. rdmsr(MSR_IA32_MISC_ENABLE, l, h);
  432. wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
  433. /* Unmask the thermal vector: */
  434. l = apic_read(APIC_LVTTHMR);
  435. apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
  436. printk_once(KERN_INFO "CPU0: Thermal monitoring enabled (%s)\n",
  437. tm2 ? "TM2" : "TM1");
  438. /* enable thermal throttle processing */
  439. atomic_set(&therm_throt_en, 1);
  440. }