nmi.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665
  1. /*
  2. * linux/arch/x86_64/nmi.c
  3. *
  4. * NMI watchdog support on APIC systems
  5. *
  6. * Started by Ingo Molnar <mingo@redhat.com>
  7. *
  8. * Fixes:
  9. * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
  10. * Mikael Pettersson : Power Management for local APIC NMI watchdog.
  11. * Pavel Machek and
  12. * Mikael Pettersson : PM converted to driver model. Disable/enable API.
  13. */
  14. #include <linux/config.h>
  15. #include <linux/mm.h>
  16. #include <linux/delay.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/module.h>
  19. #include <linux/sysdev.h>
  20. #include <linux/nmi.h>
  21. #include <linux/sysctl.h>
  22. #include <linux/kprobes.h>
  23. #include <asm/smp.h>
  24. #include <asm/nmi.h>
  25. #include <asm/proto.h>
  26. #include <asm/kdebug.h>
  27. #include <asm/mce.h>
  28. #include <asm/intel_arch_perfmon.h>
  29. /*
  30. * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
  31. * - it may be reserved by some other driver, or not
  32. * - when not reserved by some other driver, it may be used for
  33. * the NMI watchdog, or not
  34. *
  35. * This is maintained separately from nmi_active because the NMI
  36. * watchdog may also be driven from the I/O APIC timer.
  37. */
  38. static DEFINE_SPINLOCK(lapic_nmi_owner_lock);
  39. static unsigned int lapic_nmi_owner;
  40. #define LAPIC_NMI_WATCHDOG (1<<0)
  41. #define LAPIC_NMI_RESERVED (1<<1)
  42. /* nmi_active:
  43. * +1: the lapic NMI watchdog is active, but can be disabled
  44. * 0: the lapic NMI watchdog has not been set up, and cannot
  45. * be enabled
  46. * -1: the lapic NMI watchdog is disabled, but can be enabled
  47. */
  48. int nmi_active; /* oprofile uses this */
  49. int panic_on_timeout;
  50. unsigned int nmi_watchdog = NMI_DEFAULT;
  51. static unsigned int nmi_hz = HZ;
  52. static unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */
  53. static unsigned int nmi_p4_cccr_val;
  54. /* Note that these events don't tick when the CPU idles. This means
  55. the frequency varies with CPU load. */
  56. #define K7_EVNTSEL_ENABLE (1 << 22)
  57. #define K7_EVNTSEL_INT (1 << 20)
  58. #define K7_EVNTSEL_OS (1 << 17)
  59. #define K7_EVNTSEL_USR (1 << 16)
  60. #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
  61. #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
  62. #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
  63. #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
  64. #define MSR_P4_MISC_ENABLE 0x1A0
  65. #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
  66. #define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12)
  67. #define MSR_P4_PERFCTR0 0x300
  68. #define MSR_P4_CCCR0 0x360
  69. #define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
  70. #define P4_ESCR_OS (1<<3)
  71. #define P4_ESCR_USR (1<<2)
  72. #define P4_CCCR_OVF_PMI0 (1<<26)
  73. #define P4_CCCR_OVF_PMI1 (1<<27)
  74. #define P4_CCCR_THRESHOLD(N) ((N)<<20)
  75. #define P4_CCCR_COMPLEMENT (1<<19)
  76. #define P4_CCCR_COMPARE (1<<18)
  77. #define P4_CCCR_REQUIRED (3<<16)
  78. #define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
  79. #define P4_CCCR_ENABLE (1<<12)
  80. /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
  81. CRU_ESCR0 (with any non-null event selector) through a complemented
  82. max threshold. [IA32-Vol3, Section 14.9.9] */
  83. #define MSR_P4_IQ_COUNTER0 0x30C
  84. #define P4_NMI_CRU_ESCR0 (P4_ESCR_EVENT_SELECT(0x3F)|P4_ESCR_OS|P4_ESCR_USR)
  85. #define P4_NMI_IQ_CCCR0 \
  86. (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
  87. P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
  88. static __cpuinit inline int nmi_known_cpu(void)
  89. {
  90. switch (boot_cpu_data.x86_vendor) {
  91. case X86_VENDOR_AMD:
  92. return boot_cpu_data.x86 == 15;
  93. case X86_VENDOR_INTEL:
  94. if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  95. return 1;
  96. else
  97. return (boot_cpu_data.x86 == 15);
  98. }
  99. return 0;
  100. }
  101. /* Run after command line and cpu_init init, but before all other checks */
  102. void __cpuinit nmi_watchdog_default(void)
  103. {
  104. if (nmi_watchdog != NMI_DEFAULT)
  105. return;
  106. if (nmi_known_cpu())
  107. nmi_watchdog = NMI_LOCAL_APIC;
  108. else
  109. nmi_watchdog = NMI_IO_APIC;
  110. }
  111. #ifdef CONFIG_SMP
  112. /* The performance counters used by NMI_LOCAL_APIC don't trigger when
  113. * the CPU is idle. To make sure the NMI watchdog really ticks on all
  114. * CPUs during the test make them busy.
  115. */
  116. static __init void nmi_cpu_busy(void *data)
  117. {
  118. volatile int *endflag = data;
  119. local_irq_enable();
  120. /* Intentionally don't use cpu_relax here. This is
  121. to make sure that the performance counter really ticks,
  122. even if there is a simulator or similar that catches the
  123. pause instruction. On a real HT machine this is fine because
  124. all other CPUs are busy with "useless" delay loops and don't
  125. care if they get somewhat less cycles. */
  126. while (*endflag == 0)
  127. barrier();
  128. }
  129. #endif
  130. int __init check_nmi_watchdog (void)
  131. {
  132. volatile int endflag = 0;
  133. int *counts;
  134. int cpu;
  135. counts = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
  136. if (!counts)
  137. return -1;
  138. printk(KERN_INFO "testing NMI watchdog ... ");
  139. #ifdef CONFIG_SMP
  140. if (nmi_watchdog == NMI_LOCAL_APIC)
  141. smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
  142. #endif
  143. for (cpu = 0; cpu < NR_CPUS; cpu++)
  144. counts[cpu] = cpu_pda(cpu)->__nmi_count;
  145. local_irq_enable();
  146. mdelay((10*1000)/nmi_hz); // wait 10 ticks
  147. for_each_online_cpu(cpu) {
  148. if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
  149. endflag = 1;
  150. printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
  151. cpu,
  152. counts[cpu],
  153. cpu_pda(cpu)->__nmi_count);
  154. nmi_active = 0;
  155. lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG;
  156. nmi_perfctr_msr = 0;
  157. kfree(counts);
  158. return -1;
  159. }
  160. }
  161. endflag = 1;
  162. printk("OK.\n");
  163. /* now that we know it works we can reduce NMI frequency to
  164. something more reasonable; makes a difference in some configs */
  165. if (nmi_watchdog == NMI_LOCAL_APIC)
  166. nmi_hz = 1;
  167. kfree(counts);
  168. return 0;
  169. }
  170. int __init setup_nmi_watchdog(char *str)
  171. {
  172. int nmi;
  173. if (!strncmp(str,"panic",5)) {
  174. panic_on_timeout = 1;
  175. str = strchr(str, ',');
  176. if (!str)
  177. return 1;
  178. ++str;
  179. }
  180. get_option(&str, &nmi);
  181. if (nmi >= NMI_INVALID)
  182. return 0;
  183. nmi_watchdog = nmi;
  184. return 1;
  185. }
  186. __setup("nmi_watchdog=", setup_nmi_watchdog);
  187. static void disable_intel_arch_watchdog(void);
  188. static void disable_lapic_nmi_watchdog(void)
  189. {
  190. if (nmi_active <= 0)
  191. return;
  192. switch (boot_cpu_data.x86_vendor) {
  193. case X86_VENDOR_AMD:
  194. wrmsr(MSR_K7_EVNTSEL0, 0, 0);
  195. break;
  196. case X86_VENDOR_INTEL:
  197. if (boot_cpu_data.x86 == 15) {
  198. wrmsr(MSR_P4_IQ_CCCR0, 0, 0);
  199. wrmsr(MSR_P4_CRU_ESCR0, 0, 0);
  200. } else if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
  201. disable_intel_arch_watchdog();
  202. }
  203. break;
  204. }
  205. nmi_active = -1;
  206. /* tell do_nmi() and others that we're not active any more */
  207. nmi_watchdog = 0;
  208. }
  209. static void enable_lapic_nmi_watchdog(void)
  210. {
  211. if (nmi_active < 0) {
  212. nmi_watchdog = NMI_LOCAL_APIC;
  213. touch_nmi_watchdog();
  214. setup_apic_nmi_watchdog();
  215. }
  216. }
  217. int reserve_lapic_nmi(void)
  218. {
  219. unsigned int old_owner;
  220. spin_lock(&lapic_nmi_owner_lock);
  221. old_owner = lapic_nmi_owner;
  222. lapic_nmi_owner |= LAPIC_NMI_RESERVED;
  223. spin_unlock(&lapic_nmi_owner_lock);
  224. if (old_owner & LAPIC_NMI_RESERVED)
  225. return -EBUSY;
  226. if (old_owner & LAPIC_NMI_WATCHDOG)
  227. disable_lapic_nmi_watchdog();
  228. return 0;
  229. }
  230. void release_lapic_nmi(void)
  231. {
  232. unsigned int new_owner;
  233. spin_lock(&lapic_nmi_owner_lock);
  234. new_owner = lapic_nmi_owner & ~LAPIC_NMI_RESERVED;
  235. lapic_nmi_owner = new_owner;
  236. spin_unlock(&lapic_nmi_owner_lock);
  237. if (new_owner & LAPIC_NMI_WATCHDOG)
  238. enable_lapic_nmi_watchdog();
  239. }
  240. void disable_timer_nmi_watchdog(void)
  241. {
  242. if ((nmi_watchdog != NMI_IO_APIC) || (nmi_active <= 0))
  243. return;
  244. disable_irq(0);
  245. unset_nmi_callback();
  246. nmi_active = -1;
  247. nmi_watchdog = NMI_NONE;
  248. }
  249. void enable_timer_nmi_watchdog(void)
  250. {
  251. if (nmi_active < 0) {
  252. nmi_watchdog = NMI_IO_APIC;
  253. touch_nmi_watchdog();
  254. nmi_active = 1;
  255. enable_irq(0);
  256. }
  257. }
  258. #ifdef CONFIG_PM
  259. static int nmi_pm_active; /* nmi_active before suspend */
  260. static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
  261. {
  262. nmi_pm_active = nmi_active;
  263. disable_lapic_nmi_watchdog();
  264. return 0;
  265. }
  266. static int lapic_nmi_resume(struct sys_device *dev)
  267. {
  268. if (nmi_pm_active > 0)
  269. enable_lapic_nmi_watchdog();
  270. return 0;
  271. }
  272. static struct sysdev_class nmi_sysclass = {
  273. set_kset_name("lapic_nmi"),
  274. .resume = lapic_nmi_resume,
  275. .suspend = lapic_nmi_suspend,
  276. };
  277. static struct sys_device device_lapic_nmi = {
  278. .id = 0,
  279. .cls = &nmi_sysclass,
  280. };
  281. static int __init init_lapic_nmi_sysfs(void)
  282. {
  283. int error;
  284. if (nmi_active == 0 || nmi_watchdog != NMI_LOCAL_APIC)
  285. return 0;
  286. error = sysdev_class_register(&nmi_sysclass);
  287. if (!error)
  288. error = sysdev_register(&device_lapic_nmi);
  289. return error;
  290. }
  291. /* must come after the local APIC's device_initcall() */
  292. late_initcall(init_lapic_nmi_sysfs);
  293. #endif /* CONFIG_PM */
  294. /*
  295. * Activate the NMI watchdog via the local APIC.
  296. * Original code written by Keith Owens.
  297. */
  298. static void clear_msr_range(unsigned int base, unsigned int n)
  299. {
  300. unsigned int i;
  301. for(i = 0; i < n; ++i)
  302. wrmsr(base+i, 0, 0);
  303. }
  304. static void setup_k7_watchdog(void)
  305. {
  306. int i;
  307. unsigned int evntsel;
  308. nmi_perfctr_msr = MSR_K7_PERFCTR0;
  309. for(i = 0; i < 4; ++i) {
  310. /* Simulator may not support it */
  311. if (checking_wrmsrl(MSR_K7_EVNTSEL0+i, 0UL)) {
  312. nmi_perfctr_msr = 0;
  313. return;
  314. }
  315. wrmsrl(MSR_K7_PERFCTR0+i, 0UL);
  316. }
  317. evntsel = K7_EVNTSEL_INT
  318. | K7_EVNTSEL_OS
  319. | K7_EVNTSEL_USR
  320. | K7_NMI_EVENT;
  321. wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
  322. wrmsrl(MSR_K7_PERFCTR0, -((u64)cpu_khz * 1000 / nmi_hz));
  323. apic_write(APIC_LVTPC, APIC_DM_NMI);
  324. evntsel |= K7_EVNTSEL_ENABLE;
  325. wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
  326. }
  327. static void disable_intel_arch_watchdog(void)
  328. {
  329. unsigned ebx;
  330. /*
  331. * Check whether the Architectural PerfMon supports
  332. * Unhalted Core Cycles Event or not.
  333. * NOTE: Corresponding bit = 0 in ebp indicates event present.
  334. */
  335. ebx = cpuid_ebx(10);
  336. if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
  337. wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0);
  338. }
  339. static int setup_intel_arch_watchdog(void)
  340. {
  341. unsigned int evntsel;
  342. unsigned ebx;
  343. /*
  344. * Check whether the Architectural PerfMon supports
  345. * Unhalted Core Cycles Event or not.
  346. * NOTE: Corresponding bit = 0 in ebp indicates event present.
  347. */
  348. ebx = cpuid_ebx(10);
  349. if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
  350. return 0;
  351. nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
  352. clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2);
  353. clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2);
  354. evntsel = ARCH_PERFMON_EVENTSEL_INT
  355. | ARCH_PERFMON_EVENTSEL_OS
  356. | ARCH_PERFMON_EVENTSEL_USR
  357. | ARCH_PERFMON_NMI_EVENT_SEL
  358. | ARCH_PERFMON_NMI_EVENT_UMASK;
  359. wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
  360. wrmsrl(MSR_ARCH_PERFMON_PERFCTR0, -((u64)cpu_khz * 1000 / nmi_hz));
  361. apic_write(APIC_LVTPC, APIC_DM_NMI);
  362. evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
  363. wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
  364. return 1;
  365. }
  366. static int setup_p4_watchdog(void)
  367. {
  368. unsigned int misc_enable, dummy;
  369. rdmsr(MSR_P4_MISC_ENABLE, misc_enable, dummy);
  370. if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL))
  371. return 0;
  372. nmi_perfctr_msr = MSR_P4_IQ_COUNTER0;
  373. nmi_p4_cccr_val = P4_NMI_IQ_CCCR0;
  374. #ifdef CONFIG_SMP
  375. if (smp_num_siblings == 2)
  376. nmi_p4_cccr_val |= P4_CCCR_OVF_PMI1;
  377. #endif
  378. if (!(misc_enable & MSR_P4_MISC_ENABLE_PEBS_UNAVAIL))
  379. clear_msr_range(0x3F1, 2);
  380. /* MSR 0x3F0 seems to have a default value of 0xFC00, but current
  381. docs doesn't fully define it, so leave it alone for now. */
  382. if (boot_cpu_data.x86_model >= 0x3) {
  383. /* MSR_P4_IQ_ESCR0/1 (0x3ba/0x3bb) removed */
  384. clear_msr_range(0x3A0, 26);
  385. clear_msr_range(0x3BC, 3);
  386. } else {
  387. clear_msr_range(0x3A0, 31);
  388. }
  389. clear_msr_range(0x3C0, 6);
  390. clear_msr_range(0x3C8, 6);
  391. clear_msr_range(0x3E0, 2);
  392. clear_msr_range(MSR_P4_CCCR0, 18);
  393. clear_msr_range(MSR_P4_PERFCTR0, 18);
  394. wrmsr(MSR_P4_CRU_ESCR0, P4_NMI_CRU_ESCR0, 0);
  395. wrmsr(MSR_P4_IQ_CCCR0, P4_NMI_IQ_CCCR0 & ~P4_CCCR_ENABLE, 0);
  396. Dprintk("setting P4_IQ_COUNTER0 to 0x%08lx\n", -(cpu_khz * 1000UL / nmi_hz));
  397. wrmsrl(MSR_P4_IQ_COUNTER0, -((u64)cpu_khz * 1000 / nmi_hz));
  398. apic_write(APIC_LVTPC, APIC_DM_NMI);
  399. wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
  400. return 1;
  401. }
  402. void setup_apic_nmi_watchdog(void)
  403. {
  404. switch (boot_cpu_data.x86_vendor) {
  405. case X86_VENDOR_AMD:
  406. if (boot_cpu_data.x86 != 15)
  407. return;
  408. if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
  409. return;
  410. setup_k7_watchdog();
  411. break;
  412. case X86_VENDOR_INTEL:
  413. if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
  414. if (!setup_intel_arch_watchdog())
  415. return;
  416. } else if (boot_cpu_data.x86 == 15) {
  417. if (!setup_p4_watchdog())
  418. return;
  419. } else {
  420. return;
  421. }
  422. break;
  423. default:
  424. return;
  425. }
  426. lapic_nmi_owner = LAPIC_NMI_WATCHDOG;
  427. nmi_active = 1;
  428. }
  429. /*
  430. * the best way to detect whether a CPU has a 'hard lockup' problem
  431. * is to check it's local APIC timer IRQ counts. If they are not
  432. * changing then that CPU has some problem.
  433. *
  434. * as these watchdog NMI IRQs are generated on every CPU, we only
  435. * have to check the current processor.
  436. */
  437. static DEFINE_PER_CPU(unsigned, last_irq_sum);
  438. static DEFINE_PER_CPU(local_t, alert_counter);
  439. static DEFINE_PER_CPU(int, nmi_touch);
  440. void touch_nmi_watchdog (void)
  441. {
  442. if (nmi_watchdog > 0) {
  443. unsigned cpu;
  444. /*
  445. * Tell other CPUs to reset their alert counters. We cannot
  446. * do it ourselves because the alert count increase is not
  447. * atomic.
  448. */
  449. for_each_present_cpu (cpu)
  450. per_cpu(nmi_touch, cpu) = 1;
  451. }
  452. touch_softlockup_watchdog();
  453. }
  454. void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
  455. {
  456. int sum;
  457. int touched = 0;
  458. sum = read_pda(apic_timer_irqs);
  459. if (__get_cpu_var(nmi_touch)) {
  460. __get_cpu_var(nmi_touch) = 0;
  461. touched = 1;
  462. }
  463. #ifdef CONFIG_X86_MCE
  464. /* Could check oops_in_progress here too, but it's safer
  465. not too */
  466. if (atomic_read(&mce_entry) > 0)
  467. touched = 1;
  468. #endif
  469. if (!touched && __get_cpu_var(last_irq_sum) == sum) {
  470. /*
  471. * Ayiee, looks like this CPU is stuck ...
  472. * wait a few IRQs (5 seconds) before doing the oops ...
  473. */
  474. local_inc(&__get_cpu_var(alert_counter));
  475. if (local_read(&__get_cpu_var(alert_counter)) == 5*nmi_hz) {
  476. if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
  477. == NOTIFY_STOP) {
  478. local_set(&__get_cpu_var(alert_counter), 0);
  479. return;
  480. }
  481. die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs);
  482. }
  483. } else {
  484. __get_cpu_var(last_irq_sum) = sum;
  485. local_set(&__get_cpu_var(alert_counter), 0);
  486. }
  487. if (nmi_perfctr_msr) {
  488. if (nmi_perfctr_msr == MSR_P4_IQ_COUNTER0) {
  489. /*
  490. * P4 quirks:
  491. * - An overflown perfctr will assert its interrupt
  492. * until the OVF flag in its CCCR is cleared.
  493. * - LVTPC is masked on interrupt and must be
  494. * unmasked by the LVTPC handler.
  495. */
  496. wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
  497. apic_write(APIC_LVTPC, APIC_DM_NMI);
  498. } else if (nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
  499. /*
  500. * For Intel based architectural perfmon
  501. * - LVTPC is masked on interrupt and must be
  502. * unmasked by the LVTPC handler.
  503. */
  504. apic_write(APIC_LVTPC, APIC_DM_NMI);
  505. }
  506. wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
  507. }
  508. }
  509. static __kprobes int dummy_nmi_callback(struct pt_regs * regs, int cpu)
  510. {
  511. return 0;
  512. }
  513. static nmi_callback_t nmi_callback = dummy_nmi_callback;
  514. asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code)
  515. {
  516. int cpu = safe_smp_processor_id();
  517. nmi_enter();
  518. add_pda(__nmi_count,1);
  519. if (!rcu_dereference(nmi_callback)(regs, cpu))
  520. default_do_nmi(regs);
  521. nmi_exit();
  522. }
  523. void set_nmi_callback(nmi_callback_t callback)
  524. {
  525. vmalloc_sync_all();
  526. rcu_assign_pointer(nmi_callback, callback);
  527. }
  528. void unset_nmi_callback(void)
  529. {
  530. nmi_callback = dummy_nmi_callback;
  531. }
  532. #ifdef CONFIG_SYSCTL
  533. static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
  534. {
  535. unsigned char reason = get_nmi_reason();
  536. char buf[64];
  537. if (!(reason & 0xc0)) {
  538. sprintf(buf, "NMI received for unknown reason %02x\n", reason);
  539. die_nmi(buf,regs);
  540. }
  541. return 0;
  542. }
  543. /*
  544. * proc handler for /proc/sys/kernel/unknown_nmi_panic
  545. */
  546. int proc_unknown_nmi_panic(struct ctl_table *table, int write, struct file *file,
  547. void __user *buffer, size_t *length, loff_t *ppos)
  548. {
  549. int old_state;
  550. old_state = unknown_nmi_panic;
  551. proc_dointvec(table, write, file, buffer, length, ppos);
  552. if (!!old_state == !!unknown_nmi_panic)
  553. return 0;
  554. if (unknown_nmi_panic) {
  555. if (reserve_lapic_nmi() < 0) {
  556. unknown_nmi_panic = 0;
  557. return -EBUSY;
  558. } else {
  559. set_nmi_callback(unknown_nmi_panic_callback);
  560. }
  561. } else {
  562. release_lapic_nmi();
  563. unset_nmi_callback();
  564. }
  565. return 0;
  566. }
  567. #endif
  568. EXPORT_SYMBOL(nmi_active);
  569. EXPORT_SYMBOL(nmi_watchdog);
  570. EXPORT_SYMBOL(reserve_lapic_nmi);
  571. EXPORT_SYMBOL(release_lapic_nmi);
  572. EXPORT_SYMBOL(disable_timer_nmi_watchdog);
  573. EXPORT_SYMBOL(enable_timer_nmi_watchdog);
  574. EXPORT_SYMBOL(touch_nmi_watchdog);