watchdog.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608
  1. /*
  2. * Detect hard and soft lockups on a system
  3. *
  4. * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
  5. *
  6. * Note: Most of this code is borrowed heavily from the original softlockup
  7. * detector, so thanks to Ingo for the initial implementation.
  8. * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
  9. * to those contributors as well.
  10. */
  11. #define pr_fmt(fmt) "NMI watchdog: " fmt
  12. #include <linux/mm.h>
  13. #include <linux/cpu.h>
  14. #include <linux/nmi.h>
  15. #include <linux/init.h>
  16. #include <linux/delay.h>
  17. #include <linux/freezer.h>
  18. #include <linux/kthread.h>
  19. #include <linux/lockdep.h>
  20. #include <linux/notifier.h>
  21. #include <linux/module.h>
  22. #include <linux/sysctl.h>
  23. #include <linux/smpboot.h>
  24. #include <linux/sched/rt.h>
  25. #include <asm/irq_regs.h>
  26. #include <linux/kvm_para.h>
  27. #include <linux/perf_event.h>
  28. int watchdog_user_enabled = 1;
  29. int __read_mostly watchdog_thresh = 10;
  30. static int __read_mostly watchdog_running;
  31. static u64 __read_mostly sample_period;
  32. static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
  33. static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
  34. static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
  35. static DEFINE_PER_CPU(bool, softlockup_touch_sync);
  36. static DEFINE_PER_CPU(bool, soft_watchdog_warn);
  37. static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
  38. static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
  39. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  40. static DEFINE_PER_CPU(bool, hard_watchdog_warn);
  41. static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
  42. static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
  43. static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
  44. #endif
  45. /* boot commands */
  46. /*
  47. * Should we panic when a soft-lockup or hard-lockup occurs:
  48. */
  49. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  50. static int hardlockup_panic =
  51. CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
  52. static int __init hardlockup_panic_setup(char *str)
  53. {
  54. if (!strncmp(str, "panic", 5))
  55. hardlockup_panic = 1;
  56. else if (!strncmp(str, "nopanic", 7))
  57. hardlockup_panic = 0;
  58. else if (!strncmp(str, "0", 1))
  59. watchdog_user_enabled = 0;
  60. return 1;
  61. }
  62. __setup("nmi_watchdog=", hardlockup_panic_setup);
  63. #endif
  64. unsigned int __read_mostly softlockup_panic =
  65. CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
  66. static int __init softlockup_panic_setup(char *str)
  67. {
  68. softlockup_panic = simple_strtoul(str, NULL, 0);
  69. return 1;
  70. }
  71. __setup("softlockup_panic=", softlockup_panic_setup);
  72. static int __init nowatchdog_setup(char *str)
  73. {
  74. watchdog_user_enabled = 0;
  75. return 1;
  76. }
  77. __setup("nowatchdog", nowatchdog_setup);
  78. /* deprecated */
  79. static int __init nosoftlockup_setup(char *str)
  80. {
  81. watchdog_user_enabled = 0;
  82. return 1;
  83. }
  84. __setup("nosoftlockup", nosoftlockup_setup);
  85. /* */
  86. /*
  87. * Hard-lockup warnings should be triggered after just a few seconds. Soft-
  88. * lockups can have false positives under extreme conditions. So we generally
  89. * want a higher threshold for soft lockups than for hard lockups. So we couple
  90. * the thresholds with a factor: we make the soft threshold twice the amount of
  91. * time the hard threshold is.
  92. */
  93. static int get_softlockup_thresh(void)
  94. {
  95. return watchdog_thresh * 2;
  96. }
  97. /*
  98. * Returns seconds, approximately. We don't need nanosecond
  99. * resolution, and we don't need to waste time with a big divide when
  100. * 2^30ns == 1.074s.
  101. */
  102. static unsigned long get_timestamp(void)
  103. {
  104. return local_clock() >> 30LL; /* 2^30 ~= 10^9 */
  105. }
  106. static void set_sample_period(void)
  107. {
  108. /*
  109. * convert watchdog_thresh from seconds to ns
  110. * the divide by 5 is to give hrtimer several chances (two
  111. * or three with the current relation between the soft
  112. * and hard thresholds) to increment before the
  113. * hardlockup detector generates a warning
  114. */
  115. sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
  116. }
  117. /* Commands for resetting the watchdog */
  118. static void __touch_watchdog(void)
  119. {
  120. __this_cpu_write(watchdog_touch_ts, get_timestamp());
  121. }
  122. void touch_softlockup_watchdog(void)
  123. {
  124. __this_cpu_write(watchdog_touch_ts, 0);
  125. }
  126. EXPORT_SYMBOL(touch_softlockup_watchdog);
  127. void touch_all_softlockup_watchdogs(void)
  128. {
  129. int cpu;
  130. /*
  131. * this is done lockless
  132. * do we care if a 0 races with a timestamp?
  133. * all it means is the softlock check starts one cycle later
  134. */
  135. for_each_online_cpu(cpu)
  136. per_cpu(watchdog_touch_ts, cpu) = 0;
  137. }
  138. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  139. void touch_nmi_watchdog(void)
  140. {
  141. if (watchdog_user_enabled) {
  142. unsigned cpu;
  143. for_each_present_cpu(cpu) {
  144. if (per_cpu(watchdog_nmi_touch, cpu) != true)
  145. per_cpu(watchdog_nmi_touch, cpu) = true;
  146. }
  147. }
  148. touch_softlockup_watchdog();
  149. }
  150. EXPORT_SYMBOL(touch_nmi_watchdog);
  151. #endif
  152. void touch_softlockup_watchdog_sync(void)
  153. {
  154. __raw_get_cpu_var(softlockup_touch_sync) = true;
  155. __raw_get_cpu_var(watchdog_touch_ts) = 0;
  156. }
  157. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  158. /* watchdog detector functions */
  159. static int is_hardlockup(void)
  160. {
  161. unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
  162. if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
  163. return 1;
  164. __this_cpu_write(hrtimer_interrupts_saved, hrint);
  165. return 0;
  166. }
  167. #endif
  168. static int is_softlockup(unsigned long touch_ts)
  169. {
  170. unsigned long now = get_timestamp();
  171. /* Warn about unreasonable delays: */
  172. if (time_after(now, touch_ts + get_softlockup_thresh()))
  173. return now - touch_ts;
  174. return 0;
  175. }
  176. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  177. static struct perf_event_attr wd_hw_attr = {
  178. .type = PERF_TYPE_HARDWARE,
  179. .config = PERF_COUNT_HW_CPU_CYCLES,
  180. .size = sizeof(struct perf_event_attr),
  181. .pinned = 1,
  182. .disabled = 1,
  183. };
  184. /* Callback function for perf event subsystem */
  185. static void watchdog_overflow_callback(struct perf_event *event,
  186. struct perf_sample_data *data,
  187. struct pt_regs *regs)
  188. {
  189. /* Ensure the watchdog never gets throttled */
  190. event->hw.interrupts = 0;
  191. if (__this_cpu_read(watchdog_nmi_touch) == true) {
  192. __this_cpu_write(watchdog_nmi_touch, false);
  193. return;
  194. }
  195. /* check for a hardlockup
  196. * This is done by making sure our timer interrupt
  197. * is incrementing. The timer interrupt should have
  198. * fired multiple times before we overflow'd. If it hasn't
  199. * then this is a good indication the cpu is stuck
  200. */
  201. if (is_hardlockup()) {
  202. int this_cpu = smp_processor_id();
  203. /* only print hardlockups once */
  204. if (__this_cpu_read(hard_watchdog_warn) == true)
  205. return;
  206. if (hardlockup_panic)
  207. panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
  208. else
  209. WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
  210. __this_cpu_write(hard_watchdog_warn, true);
  211. return;
  212. }
  213. __this_cpu_write(hard_watchdog_warn, false);
  214. return;
  215. }
  216. #endif /* CONFIG_HARDLOCKUP_DETECTOR */
  217. static void watchdog_interrupt_count(void)
  218. {
  219. __this_cpu_inc(hrtimer_interrupts);
  220. }
  221. static int watchdog_nmi_enable(unsigned int cpu);
  222. static void watchdog_nmi_disable(unsigned int cpu);
  223. /* watchdog kicker functions */
  224. static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
  225. {
  226. unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
  227. struct pt_regs *regs = get_irq_regs();
  228. int duration;
  229. /* kick the hardlockup detector */
  230. watchdog_interrupt_count();
  231. /* kick the softlockup detector */
  232. wake_up_process(__this_cpu_read(softlockup_watchdog));
  233. /* .. and repeat */
  234. hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
  235. if (touch_ts == 0) {
  236. if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
  237. /*
  238. * If the time stamp was touched atomically
  239. * make sure the scheduler tick is up to date.
  240. */
  241. __this_cpu_write(softlockup_touch_sync, false);
  242. sched_clock_tick();
  243. }
  244. /* Clear the guest paused flag on watchdog reset */
  245. kvm_check_and_clear_guest_paused();
  246. __touch_watchdog();
  247. return HRTIMER_RESTART;
  248. }
  249. /* check for a softlockup
  250. * This is done by making sure a high priority task is
  251. * being scheduled. The task touches the watchdog to
  252. * indicate it is getting cpu time. If it hasn't then
  253. * this is a good indication some task is hogging the cpu
  254. */
  255. duration = is_softlockup(touch_ts);
  256. if (unlikely(duration)) {
  257. /*
  258. * If a virtual machine is stopped by the host it can look to
  259. * the watchdog like a soft lockup, check to see if the host
  260. * stopped the vm before we issue the warning
  261. */
  262. if (kvm_check_and_clear_guest_paused())
  263. return HRTIMER_RESTART;
  264. /* only warn once */
  265. if (__this_cpu_read(soft_watchdog_warn) == true)
  266. return HRTIMER_RESTART;
  267. printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
  268. smp_processor_id(), duration,
  269. current->comm, task_pid_nr(current));
  270. print_modules();
  271. print_irqtrace_events(current);
  272. if (regs)
  273. show_regs(regs);
  274. else
  275. dump_stack();
  276. if (softlockup_panic)
  277. panic("softlockup: hung tasks");
  278. __this_cpu_write(soft_watchdog_warn, true);
  279. } else
  280. __this_cpu_write(soft_watchdog_warn, false);
  281. return HRTIMER_RESTART;
  282. }
  283. static void watchdog_set_prio(unsigned int policy, unsigned int prio)
  284. {
  285. struct sched_param param = { .sched_priority = prio };
  286. sched_setscheduler(current, policy, &param);
  287. }
  288. static void watchdog_enable(unsigned int cpu)
  289. {
  290. struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
  291. /* kick off the timer for the hardlockup detector */
  292. hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  293. hrtimer->function = watchdog_timer_fn;
  294. /* Enable the perf event */
  295. watchdog_nmi_enable(cpu);
  296. /* done here because hrtimer_start can only pin to smp_processor_id() */
  297. hrtimer_start(hrtimer, ns_to_ktime(sample_period),
  298. HRTIMER_MODE_REL_PINNED);
  299. /* initialize timestamp */
  300. watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
  301. __touch_watchdog();
  302. }
  303. static void watchdog_disable(unsigned int cpu)
  304. {
  305. struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
  306. watchdog_set_prio(SCHED_NORMAL, 0);
  307. hrtimer_cancel(hrtimer);
  308. /* disable the perf event */
  309. watchdog_nmi_disable(cpu);
  310. }
  311. static void watchdog_cleanup(unsigned int cpu, bool online)
  312. {
  313. watchdog_disable(cpu);
  314. }
  315. static int watchdog_should_run(unsigned int cpu)
  316. {
  317. return __this_cpu_read(hrtimer_interrupts) !=
  318. __this_cpu_read(soft_lockup_hrtimer_cnt);
  319. }
  320. /*
  321. * The watchdog thread function - touches the timestamp.
  322. *
  323. * It only runs once every sample_period seconds (4 seconds by
  324. * default) to reset the softlockup timestamp. If this gets delayed
  325. * for more than 2*watchdog_thresh seconds then the debug-printout
  326. * triggers in watchdog_timer_fn().
  327. */
  328. static void watchdog(unsigned int cpu)
  329. {
  330. __this_cpu_write(soft_lockup_hrtimer_cnt,
  331. __this_cpu_read(hrtimer_interrupts));
  332. __touch_watchdog();
  333. }
  334. #ifdef CONFIG_HARDLOCKUP_DETECTOR
  335. /*
  336. * People like the simple clean cpu node info on boot.
  337. * Reduce the watchdog noise by only printing messages
  338. * that are different from what cpu0 displayed.
  339. */
  340. static unsigned long cpu0_err;
  341. static int watchdog_nmi_enable(unsigned int cpu)
  342. {
  343. struct perf_event_attr *wd_attr;
  344. struct perf_event *event = per_cpu(watchdog_ev, cpu);
  345. /* is it already setup and enabled? */
  346. if (event && event->state > PERF_EVENT_STATE_OFF)
  347. goto out;
  348. /* it is setup but not enabled */
  349. if (event != NULL)
  350. goto out_enable;
  351. wd_attr = &wd_hw_attr;
  352. wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
  353. /* Try to register using hardware perf events */
  354. event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
  355. /* save cpu0 error for future comparision */
  356. if (cpu == 0 && IS_ERR(event))
  357. cpu0_err = PTR_ERR(event);
  358. if (!IS_ERR(event)) {
  359. /* only print for cpu0 or different than cpu0 */
  360. if (cpu == 0 || cpu0_err)
  361. pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
  362. goto out_save;
  363. }
  364. /* skip displaying the same error again */
  365. if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
  366. return PTR_ERR(event);
  367. /* vary the KERN level based on the returned errno */
  368. if (PTR_ERR(event) == -EOPNOTSUPP)
  369. pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
  370. else if (PTR_ERR(event) == -ENOENT)
  371. pr_warning("disabled (cpu%i): hardware events not enabled\n",
  372. cpu);
  373. else
  374. pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
  375. cpu, PTR_ERR(event));
  376. return PTR_ERR(event);
  377. /* success path */
  378. out_save:
  379. per_cpu(watchdog_ev, cpu) = event;
  380. out_enable:
  381. perf_event_enable(per_cpu(watchdog_ev, cpu));
  382. out:
  383. return 0;
  384. }
  385. static void watchdog_nmi_disable(unsigned int cpu)
  386. {
  387. struct perf_event *event = per_cpu(watchdog_ev, cpu);
  388. if (event) {
  389. perf_event_disable(event);
  390. per_cpu(watchdog_ev, cpu) = NULL;
  391. /* should be in cleanup, but blocks oprofile */
  392. perf_event_release_kernel(event);
  393. }
  394. return;
  395. }
  396. #else
  397. static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
  398. static void watchdog_nmi_disable(unsigned int cpu) { return; }
  399. #endif /* CONFIG_HARDLOCKUP_DETECTOR */
  400. static struct smp_hotplug_thread watchdog_threads = {
  401. .store = &softlockup_watchdog,
  402. .thread_should_run = watchdog_should_run,
  403. .thread_fn = watchdog,
  404. .thread_comm = "watchdog/%u",
  405. .setup = watchdog_enable,
  406. .cleanup = watchdog_cleanup,
  407. .park = watchdog_disable,
  408. .unpark = watchdog_enable,
  409. };
  410. static void restart_watchdog_hrtimer(void *info)
  411. {
  412. struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
  413. int ret;
  414. /*
  415. * No need to cancel and restart hrtimer if it is currently executing
  416. * because it will reprogram itself with the new period now.
  417. * We should never see it unqueued here because we are running per-cpu
  418. * with interrupts disabled.
  419. */
  420. ret = hrtimer_try_to_cancel(hrtimer);
  421. if (ret == 1)
  422. hrtimer_start(hrtimer, ns_to_ktime(sample_period),
  423. HRTIMER_MODE_REL_PINNED);
  424. }
  425. static void update_timers(int cpu)
  426. {
  427. struct call_single_data data = {.func = restart_watchdog_hrtimer};
  428. /*
  429. * Make sure that perf event counter will adopt to a new
  430. * sampling period. Updating the sampling period directly would
  431. * be much nicer but we do not have an API for that now so
  432. * let's use a big hammer.
  433. * Hrtimer will adopt the new period on the next tick but this
  434. * might be late already so we have to restart the timer as well.
  435. */
  436. watchdog_nmi_disable(cpu);
  437. __smp_call_function_single(cpu, &data, 1);
  438. watchdog_nmi_enable(cpu);
  439. }
  440. static void update_timers_all_cpus(void)
  441. {
  442. int cpu;
  443. get_online_cpus();
  444. preempt_disable();
  445. for_each_online_cpu(cpu)
  446. update_timers(cpu);
  447. preempt_enable();
  448. put_online_cpus();
  449. }
  450. static int watchdog_enable_all_cpus(bool sample_period_changed)
  451. {
  452. int err = 0;
  453. if (!watchdog_running) {
  454. err = smpboot_register_percpu_thread(&watchdog_threads);
  455. if (err)
  456. pr_err("Failed to create watchdog threads, disabled\n");
  457. else
  458. watchdog_running = 1;
  459. } else if (sample_period_changed) {
  460. update_timers_all_cpus();
  461. }
  462. return err;
  463. }
  464. /* prepare/enable/disable routines */
  465. /* sysctl functions */
  466. #ifdef CONFIG_SYSCTL
  467. static void watchdog_disable_all_cpus(void)
  468. {
  469. if (watchdog_running) {
  470. watchdog_running = 0;
  471. smpboot_unregister_percpu_thread(&watchdog_threads);
  472. }
  473. }
  474. /*
  475. * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
  476. */
  477. int proc_dowatchdog(struct ctl_table *table, int write,
  478. void __user *buffer, size_t *lenp, loff_t *ppos)
  479. {
  480. int err, old_thresh, old_enabled;
  481. static DEFINE_MUTEX(watchdog_proc_mutex);
  482. mutex_lock(&watchdog_proc_mutex);
  483. old_thresh = ACCESS_ONCE(watchdog_thresh);
  484. old_enabled = ACCESS_ONCE(watchdog_user_enabled);
  485. err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  486. if (err || !write)
  487. goto out;
  488. set_sample_period();
  489. /*
  490. * Watchdog threads shouldn't be enabled if they are
  491. * disabled. The 'watchdog_running' variable check in
  492. * watchdog_*_all_cpus() function takes care of this.
  493. */
  494. if (watchdog_user_enabled && watchdog_thresh)
  495. err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
  496. else
  497. watchdog_disable_all_cpus();
  498. /* Restore old values on failure */
  499. if (err) {
  500. watchdog_thresh = old_thresh;
  501. watchdog_user_enabled = old_enabled;
  502. }
  503. out:
  504. mutex_unlock(&watchdog_proc_mutex);
  505. return err;
  506. }
  507. #endif /* CONFIG_SYSCTL */
  508. void __init lockup_detector_init(void)
  509. {
  510. set_sample_period();
  511. if (watchdog_user_enabled)
  512. watchdog_enable_all_cpus(false);
  513. }