nmi.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972
  1. /*
  2. * linux/arch/x86_64/nmi.c
  3. *
  4. * NMI watchdog support on APIC systems
  5. *
  6. * Started by Ingo Molnar <mingo@redhat.com>
  7. *
  8. * Fixes:
  9. * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
  10. * Mikael Pettersson : Power Management for local APIC NMI watchdog.
  11. * Pavel Machek and
  12. * Mikael Pettersson : PM converted to driver model. Disable/enable API.
  13. */
  14. #include <linux/nmi.h>
  15. #include <linux/mm.h>
  16. #include <linux/delay.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/module.h>
  19. #include <linux/sysdev.h>
  20. #include <linux/sysctl.h>
  21. #include <linux/kprobes.h>
  22. #include <linux/cpumask.h>
  23. #include <asm/smp.h>
  24. #include <asm/nmi.h>
  25. #include <asm/proto.h>
  26. #include <asm/kdebug.h>
  27. #include <asm/mce.h>
  28. #include <asm/intel_arch_perfmon.h>
  29. int unknown_nmi_panic;
  30. int nmi_watchdog_enabled;
  31. int panic_on_unrecovered_nmi;
  32. /* perfctr_nmi_owner tracks the ownership of the perfctr registers:
  33. * evtsel_nmi_owner tracks the ownership of the event selection
  34. * - different performance counters/ event selection may be reserved for
  35. * different subsystems this reservation system just tries to coordinate
  36. * things a little
  37. */
  38. static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner);
  39. static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]);
  40. static cpumask_t backtrace_mask = CPU_MASK_NONE;
  41. /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
  42. * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
  43. */
  44. #define NMI_MAX_COUNTER_BITS 66
  45. /* nmi_active:
  46. * >0: the lapic NMI watchdog is active, but can be disabled
  47. * <0: the lapic NMI watchdog has not been set up, and cannot
  48. * be enabled
  49. * 0: the lapic NMI watchdog is disabled, but can be enabled
  50. */
  51. atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
  52. int panic_on_timeout;
  53. unsigned int nmi_watchdog = NMI_DEFAULT;
  54. static unsigned int nmi_hz = HZ;
  55. struct nmi_watchdog_ctlblk {
  56. int enabled;
  57. u64 check_bit;
  58. unsigned int cccr_msr;
  59. unsigned int perfctr_msr; /* the MSR to reset in NMI handler */
  60. unsigned int evntsel_msr; /* the MSR to select the events to handle */
  61. };
  62. static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk);
  63. /* local prototypes */
  64. static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
  65. /* converts an msr to an appropriate reservation bit */
  66. static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
  67. {
  68. /* returns the bit offset of the performance counter register */
  69. switch (boot_cpu_data.x86_vendor) {
  70. case X86_VENDOR_AMD:
  71. return (msr - MSR_K7_PERFCTR0);
  72. case X86_VENDOR_INTEL:
  73. if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  74. return (msr - MSR_ARCH_PERFMON_PERFCTR0);
  75. else
  76. return (msr - MSR_P4_BPU_PERFCTR0);
  77. }
  78. return 0;
  79. }
  80. /* converts an msr to an appropriate reservation bit */
  81. static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
  82. {
  83. /* returns the bit offset of the event selection register */
  84. switch (boot_cpu_data.x86_vendor) {
  85. case X86_VENDOR_AMD:
  86. return (msr - MSR_K7_EVNTSEL0);
  87. case X86_VENDOR_INTEL:
  88. if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  89. return (msr - MSR_ARCH_PERFMON_EVENTSEL0);
  90. else
  91. return (msr - MSR_P4_BSU_ESCR0);
  92. }
  93. return 0;
  94. }
  95. /* checks for a bit availability (hack for oprofile) */
  96. int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
  97. {
  98. BUG_ON(counter > NMI_MAX_COUNTER_BITS);
  99. return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
  100. }
  101. /* checks the an msr for availability */
  102. int avail_to_resrv_perfctr_nmi(unsigned int msr)
  103. {
  104. unsigned int counter;
  105. counter = nmi_perfctr_msr_to_bit(msr);
  106. BUG_ON(counter > NMI_MAX_COUNTER_BITS);
  107. return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
  108. }
  109. int reserve_perfctr_nmi(unsigned int msr)
  110. {
  111. unsigned int counter;
  112. counter = nmi_perfctr_msr_to_bit(msr);
  113. BUG_ON(counter > NMI_MAX_COUNTER_BITS);
  114. if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner)))
  115. return 1;
  116. return 0;
  117. }
  118. void release_perfctr_nmi(unsigned int msr)
  119. {
  120. unsigned int counter;
  121. counter = nmi_perfctr_msr_to_bit(msr);
  122. BUG_ON(counter > NMI_MAX_COUNTER_BITS);
  123. clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner));
  124. }
  125. int reserve_evntsel_nmi(unsigned int msr)
  126. {
  127. unsigned int counter;
  128. counter = nmi_evntsel_msr_to_bit(msr);
  129. BUG_ON(counter > NMI_MAX_COUNTER_BITS);
  130. if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)))
  131. return 1;
  132. return 0;
  133. }
  134. void release_evntsel_nmi(unsigned int msr)
  135. {
  136. unsigned int counter;
  137. counter = nmi_evntsel_msr_to_bit(msr);
  138. BUG_ON(counter > NMI_MAX_COUNTER_BITS);
  139. clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner));
  140. }
  141. static __cpuinit inline int nmi_known_cpu(void)
  142. {
  143. switch (boot_cpu_data.x86_vendor) {
  144. case X86_VENDOR_AMD:
  145. return boot_cpu_data.x86 == 15;
  146. case X86_VENDOR_INTEL:
  147. if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  148. return 1;
  149. else
  150. return (boot_cpu_data.x86 == 15);
  151. }
  152. return 0;
  153. }
  154. /* Run after command line and cpu_init init, but before all other checks */
  155. void nmi_watchdog_default(void)
  156. {
  157. if (nmi_watchdog != NMI_DEFAULT)
  158. return;
  159. if (nmi_known_cpu())
  160. nmi_watchdog = NMI_LOCAL_APIC;
  161. else
  162. nmi_watchdog = NMI_IO_APIC;
  163. }
  164. static int endflag __initdata = 0;
  165. #ifdef CONFIG_SMP
  166. /* The performance counters used by NMI_LOCAL_APIC don't trigger when
  167. * the CPU is idle. To make sure the NMI watchdog really ticks on all
  168. * CPUs during the test make them busy.
  169. */
  170. static __init void nmi_cpu_busy(void *data)
  171. {
  172. local_irq_enable_in_hardirq();
  173. /* Intentionally don't use cpu_relax here. This is
  174. to make sure that the performance counter really ticks,
  175. even if there is a simulator or similar that catches the
  176. pause instruction. On a real HT machine this is fine because
  177. all other CPUs are busy with "useless" delay loops and don't
  178. care if they get somewhat less cycles. */
  179. while (endflag == 0)
  180. mb();
  181. }
  182. #endif
  183. int __init check_nmi_watchdog (void)
  184. {
  185. int *counts;
  186. int cpu;
  187. if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DEFAULT))
  188. return 0;
  189. if (!atomic_read(&nmi_active))
  190. return 0;
  191. counts = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
  192. if (!counts)
  193. return -1;
  194. printk(KERN_INFO "testing NMI watchdog ... ");
  195. #ifdef CONFIG_SMP
  196. if (nmi_watchdog == NMI_LOCAL_APIC)
  197. smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
  198. #endif
  199. for (cpu = 0; cpu < NR_CPUS; cpu++)
  200. counts[cpu] = cpu_pda(cpu)->__nmi_count;
  201. local_irq_enable();
  202. mdelay((10*1000)/nmi_hz); // wait 10 ticks
  203. for_each_online_cpu(cpu) {
  204. if (!per_cpu(nmi_watchdog_ctlblk, cpu).enabled)
  205. continue;
  206. if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
  207. printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
  208. cpu,
  209. counts[cpu],
  210. cpu_pda(cpu)->__nmi_count);
  211. per_cpu(nmi_watchdog_ctlblk, cpu).enabled = 0;
  212. atomic_dec(&nmi_active);
  213. }
  214. }
  215. if (!atomic_read(&nmi_active)) {
  216. kfree(counts);
  217. atomic_set(&nmi_active, -1);
  218. endflag = 1;
  219. return -1;
  220. }
  221. endflag = 1;
  222. printk("OK.\n");
  223. /* now that we know it works we can reduce NMI frequency to
  224. something more reasonable; makes a difference in some configs */
  225. if (nmi_watchdog == NMI_LOCAL_APIC) {
  226. struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
  227. nmi_hz = 1;
  228. /*
  229. * On Intel CPUs with ARCH_PERFMON only 32 bits in the counter
  230. * are writable, with higher bits sign extending from bit 31.
  231. * So, we can only program the counter with 31 bit values and
  232. * 32nd bit should be 1, for 33.. to be 1.
  233. * Find the appropriate nmi_hz
  234. */
  235. if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0 &&
  236. ((u64)cpu_khz * 1000) > 0x7fffffffULL) {
  237. nmi_hz = ((u64)cpu_khz * 1000) / 0x7fffffffUL + 1;
  238. }
  239. }
  240. kfree(counts);
  241. return 0;
  242. }
  243. int __init setup_nmi_watchdog(char *str)
  244. {
  245. int nmi;
  246. if (!strncmp(str,"panic",5)) {
  247. panic_on_timeout = 1;
  248. str = strchr(str, ',');
  249. if (!str)
  250. return 1;
  251. ++str;
  252. }
  253. get_option(&str, &nmi);
  254. if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE))
  255. return 0;
  256. if ((nmi == NMI_LOCAL_APIC) && (nmi_known_cpu() == 0))
  257. return 0; /* no lapic support */
  258. nmi_watchdog = nmi;
  259. return 1;
  260. }
  261. __setup("nmi_watchdog=", setup_nmi_watchdog);
  262. static void disable_lapic_nmi_watchdog(void)
  263. {
  264. BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
  265. if (atomic_read(&nmi_active) <= 0)
  266. return;
  267. on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
  268. BUG_ON(atomic_read(&nmi_active) != 0);
  269. }
  270. static void enable_lapic_nmi_watchdog(void)
  271. {
  272. BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
  273. /* are we already enabled */
  274. if (atomic_read(&nmi_active) != 0)
  275. return;
  276. /* are we lapic aware */
  277. if (nmi_known_cpu() <= 0)
  278. return;
  279. on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
  280. touch_nmi_watchdog();
  281. }
  282. void disable_timer_nmi_watchdog(void)
  283. {
  284. BUG_ON(nmi_watchdog != NMI_IO_APIC);
  285. if (atomic_read(&nmi_active) <= 0)
  286. return;
  287. disable_irq(0);
  288. on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
  289. BUG_ON(atomic_read(&nmi_active) != 0);
  290. }
  291. void enable_timer_nmi_watchdog(void)
  292. {
  293. BUG_ON(nmi_watchdog != NMI_IO_APIC);
  294. if (atomic_read(&nmi_active) == 0) {
  295. touch_nmi_watchdog();
  296. on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
  297. enable_irq(0);
  298. }
  299. }
  300. #ifdef CONFIG_PM
  301. static int nmi_pm_active; /* nmi_active before suspend */
  302. static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
  303. {
  304. /* only CPU0 goes here, other CPUs should be offline */
  305. nmi_pm_active = atomic_read(&nmi_active);
  306. stop_apic_nmi_watchdog(NULL);
  307. BUG_ON(atomic_read(&nmi_active) != 0);
  308. return 0;
  309. }
  310. static int lapic_nmi_resume(struct sys_device *dev)
  311. {
  312. /* only CPU0 goes here, other CPUs should be offline */
  313. if (nmi_pm_active > 0) {
  314. setup_apic_nmi_watchdog(NULL);
  315. touch_nmi_watchdog();
  316. }
  317. return 0;
  318. }
  319. static struct sysdev_class nmi_sysclass = {
  320. set_kset_name("lapic_nmi"),
  321. .resume = lapic_nmi_resume,
  322. .suspend = lapic_nmi_suspend,
  323. };
  324. static struct sys_device device_lapic_nmi = {
  325. .id = 0,
  326. .cls = &nmi_sysclass,
  327. };
  328. static int __init init_lapic_nmi_sysfs(void)
  329. {
  330. int error;
  331. /* should really be a BUG_ON but b/c this is an
  332. * init call, it just doesn't work. -dcz
  333. */
  334. if (nmi_watchdog != NMI_LOCAL_APIC)
  335. return 0;
  336. if ( atomic_read(&nmi_active) < 0 )
  337. return 0;
  338. error = sysdev_class_register(&nmi_sysclass);
  339. if (!error)
  340. error = sysdev_register(&device_lapic_nmi);
  341. return error;
  342. }
  343. /* must come after the local APIC's device_initcall() */
  344. late_initcall(init_lapic_nmi_sysfs);
  345. #endif /* CONFIG_PM */
  346. /*
  347. * Activate the NMI watchdog via the local APIC.
  348. * Original code written by Keith Owens.
  349. */
  350. /* Note that these events don't tick when the CPU idles. This means
  351. the frequency varies with CPU load. */
  352. #define K7_EVNTSEL_ENABLE (1 << 22)
  353. #define K7_EVNTSEL_INT (1 << 20)
  354. #define K7_EVNTSEL_OS (1 << 17)
  355. #define K7_EVNTSEL_USR (1 << 16)
  356. #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
  357. #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
  358. static int setup_k7_watchdog(void)
  359. {
  360. unsigned int perfctr_msr, evntsel_msr;
  361. unsigned int evntsel;
  362. struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
  363. perfctr_msr = MSR_K7_PERFCTR0;
  364. evntsel_msr = MSR_K7_EVNTSEL0;
  365. if (!reserve_perfctr_nmi(perfctr_msr))
  366. goto fail;
  367. if (!reserve_evntsel_nmi(evntsel_msr))
  368. goto fail1;
  369. /* Simulator may not support it */
  370. if (checking_wrmsrl(evntsel_msr, 0UL))
  371. goto fail2;
  372. wrmsrl(perfctr_msr, 0UL);
  373. evntsel = K7_EVNTSEL_INT
  374. | K7_EVNTSEL_OS
  375. | K7_EVNTSEL_USR
  376. | K7_NMI_EVENT;
  377. /* setup the timer */
  378. wrmsr(evntsel_msr, evntsel, 0);
  379. wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
  380. apic_write(APIC_LVTPC, APIC_DM_NMI);
  381. evntsel |= K7_EVNTSEL_ENABLE;
  382. wrmsr(evntsel_msr, evntsel, 0);
  383. wd->perfctr_msr = perfctr_msr;
  384. wd->evntsel_msr = evntsel_msr;
  385. wd->cccr_msr = 0; //unused
  386. wd->check_bit = 1ULL<<63;
  387. return 1;
  388. fail2:
  389. release_evntsel_nmi(evntsel_msr);
  390. fail1:
  391. release_perfctr_nmi(perfctr_msr);
  392. fail:
  393. return 0;
  394. }
  395. static void stop_k7_watchdog(void)
  396. {
  397. struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
  398. wrmsr(wd->evntsel_msr, 0, 0);
  399. release_evntsel_nmi(wd->evntsel_msr);
  400. release_perfctr_nmi(wd->perfctr_msr);
  401. }
  402. /* Note that these events don't tick when the CPU idles. This means
  403. the frequency varies with CPU load. */
  404. #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
  405. #define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
  406. #define P4_ESCR_OS (1<<3)
  407. #define P4_ESCR_USR (1<<2)
  408. #define P4_CCCR_OVF_PMI0 (1<<26)
  409. #define P4_CCCR_OVF_PMI1 (1<<27)
  410. #define P4_CCCR_THRESHOLD(N) ((N)<<20)
  411. #define P4_CCCR_COMPLEMENT (1<<19)
  412. #define P4_CCCR_COMPARE (1<<18)
  413. #define P4_CCCR_REQUIRED (3<<16)
  414. #define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
  415. #define P4_CCCR_ENABLE (1<<12)
  416. #define P4_CCCR_OVF (1<<31)
  417. /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
  418. CRU_ESCR0 (with any non-null event selector) through a complemented
  419. max threshold. [IA32-Vol3, Section 14.9.9] */
  420. static int setup_p4_watchdog(void)
  421. {
  422. unsigned int perfctr_msr, evntsel_msr, cccr_msr;
  423. unsigned int evntsel, cccr_val;
  424. unsigned int misc_enable, dummy;
  425. unsigned int ht_num;
  426. struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
  427. rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy);
  428. if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL))
  429. return 0;
  430. #ifdef CONFIG_SMP
  431. /* detect which hyperthread we are on */
  432. if (smp_num_siblings == 2) {
  433. unsigned int ebx, apicid;
  434. ebx = cpuid_ebx(1);
  435. apicid = (ebx >> 24) & 0xff;
  436. ht_num = apicid & 1;
  437. } else
  438. #endif
  439. ht_num = 0;
  440. /* performance counters are shared resources
  441. * assign each hyperthread its own set
  442. * (re-use the ESCR0 register, seems safe
  443. * and keeps the cccr_val the same)
  444. */
  445. if (!ht_num) {
  446. /* logical cpu 0 */
  447. perfctr_msr = MSR_P4_IQ_PERFCTR0;
  448. evntsel_msr = MSR_P4_CRU_ESCR0;
  449. cccr_msr = MSR_P4_IQ_CCCR0;
  450. cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4);
  451. } else {
  452. /* logical cpu 1 */
  453. perfctr_msr = MSR_P4_IQ_PERFCTR1;
  454. evntsel_msr = MSR_P4_CRU_ESCR0;
  455. cccr_msr = MSR_P4_IQ_CCCR1;
  456. cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
  457. }
  458. if (!reserve_perfctr_nmi(perfctr_msr))
  459. goto fail;
  460. if (!reserve_evntsel_nmi(evntsel_msr))
  461. goto fail1;
  462. evntsel = P4_ESCR_EVENT_SELECT(0x3F)
  463. | P4_ESCR_OS
  464. | P4_ESCR_USR;
  465. cccr_val |= P4_CCCR_THRESHOLD(15)
  466. | P4_CCCR_COMPLEMENT
  467. | P4_CCCR_COMPARE
  468. | P4_CCCR_REQUIRED;
  469. wrmsr(evntsel_msr, evntsel, 0);
  470. wrmsr(cccr_msr, cccr_val, 0);
  471. wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
  472. apic_write(APIC_LVTPC, APIC_DM_NMI);
  473. cccr_val |= P4_CCCR_ENABLE;
  474. wrmsr(cccr_msr, cccr_val, 0);
  475. wd->perfctr_msr = perfctr_msr;
  476. wd->evntsel_msr = evntsel_msr;
  477. wd->cccr_msr = cccr_msr;
  478. wd->check_bit = 1ULL<<39;
  479. return 1;
  480. fail1:
  481. release_perfctr_nmi(perfctr_msr);
  482. fail:
  483. return 0;
  484. }
  485. static void stop_p4_watchdog(void)
  486. {
  487. struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
  488. wrmsr(wd->cccr_msr, 0, 0);
  489. wrmsr(wd->evntsel_msr, 0, 0);
  490. release_evntsel_nmi(wd->evntsel_msr);
  491. release_perfctr_nmi(wd->perfctr_msr);
  492. }
  493. #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
  494. #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
  495. static int setup_intel_arch_watchdog(void)
  496. {
  497. unsigned int ebx;
  498. union cpuid10_eax eax;
  499. unsigned int unused;
  500. unsigned int perfctr_msr, evntsel_msr;
  501. unsigned int evntsel;
  502. struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
  503. /*
  504. * Check whether the Architectural PerfMon supports
  505. * Unhalted Core Cycles Event or not.
  506. * NOTE: Corresponding bit = 0 in ebx indicates event present.
  507. */
  508. cpuid(10, &(eax.full), &ebx, &unused, &unused);
  509. if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
  510. (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
  511. goto fail;
  512. perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
  513. evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0;
  514. if (!reserve_perfctr_nmi(perfctr_msr))
  515. goto fail;
  516. if (!reserve_evntsel_nmi(evntsel_msr))
  517. goto fail1;
  518. wrmsrl(perfctr_msr, 0UL);
  519. evntsel = ARCH_PERFMON_EVENTSEL_INT
  520. | ARCH_PERFMON_EVENTSEL_OS
  521. | ARCH_PERFMON_EVENTSEL_USR
  522. | ARCH_PERFMON_NMI_EVENT_SEL
  523. | ARCH_PERFMON_NMI_EVENT_UMASK;
  524. /* setup the timer */
  525. wrmsr(evntsel_msr, evntsel, 0);
  526. wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
  527. apic_write(APIC_LVTPC, APIC_DM_NMI);
  528. evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
  529. wrmsr(evntsel_msr, evntsel, 0);
  530. wd->perfctr_msr = perfctr_msr;
  531. wd->evntsel_msr = evntsel_msr;
  532. wd->cccr_msr = 0; //unused
  533. wd->check_bit = 1ULL << (eax.split.bit_width - 1);
  534. return 1;
  535. fail1:
  536. release_perfctr_nmi(perfctr_msr);
  537. fail:
  538. return 0;
  539. }
  540. static void stop_intel_arch_watchdog(void)
  541. {
  542. unsigned int ebx;
  543. union cpuid10_eax eax;
  544. unsigned int unused;
  545. struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
  546. /*
  547. * Check whether the Architectural PerfMon supports
  548. * Unhalted Core Cycles Event or not.
  549. * NOTE: Corresponding bit = 0 in ebx indicates event present.
  550. */
  551. cpuid(10, &(eax.full), &ebx, &unused, &unused);
  552. if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
  553. (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
  554. return;
  555. wrmsr(wd->evntsel_msr, 0, 0);
  556. release_evntsel_nmi(wd->evntsel_msr);
  557. release_perfctr_nmi(wd->perfctr_msr);
  558. }
  559. void setup_apic_nmi_watchdog(void *unused)
  560. {
  561. struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
  562. /* only support LOCAL and IO APICs for now */
  563. if ((nmi_watchdog != NMI_LOCAL_APIC) &&
  564. (nmi_watchdog != NMI_IO_APIC))
  565. return;
  566. if (wd->enabled == 1)
  567. return;
  568. /* cheap hack to support suspend/resume */
  569. /* if cpu0 is not active neither should the other cpus */
  570. if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0))
  571. return;
  572. if (nmi_watchdog == NMI_LOCAL_APIC) {
  573. switch (boot_cpu_data.x86_vendor) {
  574. case X86_VENDOR_AMD:
  575. if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
  576. return;
  577. if (!setup_k7_watchdog())
  578. return;
  579. break;
  580. case X86_VENDOR_INTEL:
  581. if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
  582. if (!setup_intel_arch_watchdog())
  583. return;
  584. break;
  585. }
  586. if (!setup_p4_watchdog())
  587. return;
  588. break;
  589. default:
  590. return;
  591. }
  592. }
  593. wd->enabled = 1;
  594. atomic_inc(&nmi_active);
  595. }
  596. void stop_apic_nmi_watchdog(void *unused)
  597. {
  598. struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
  599. /* only support LOCAL and IO APICs for now */
  600. if ((nmi_watchdog != NMI_LOCAL_APIC) &&
  601. (nmi_watchdog != NMI_IO_APIC))
  602. return;
  603. if (wd->enabled == 0)
  604. return;
  605. if (nmi_watchdog == NMI_LOCAL_APIC) {
  606. switch (boot_cpu_data.x86_vendor) {
  607. case X86_VENDOR_AMD:
  608. if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
  609. return;
  610. stop_k7_watchdog();
  611. break;
  612. case X86_VENDOR_INTEL:
  613. if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
  614. stop_intel_arch_watchdog();
  615. break;
  616. }
  617. stop_p4_watchdog();
  618. break;
  619. default:
  620. return;
  621. }
  622. }
  623. wd->enabled = 0;
  624. atomic_dec(&nmi_active);
  625. }
  626. /*
  627. * the best way to detect whether a CPU has a 'hard lockup' problem
  628. * is to check it's local APIC timer IRQ counts. If they are not
  629. * changing then that CPU has some problem.
  630. *
  631. * as these watchdog NMI IRQs are generated on every CPU, we only
  632. * have to check the current processor.
  633. */
  634. static DEFINE_PER_CPU(unsigned, last_irq_sum);
  635. static DEFINE_PER_CPU(local_t, alert_counter);
  636. static DEFINE_PER_CPU(int, nmi_touch);
  637. void touch_nmi_watchdog (void)
  638. {
  639. if (nmi_watchdog > 0) {
  640. unsigned cpu;
  641. /*
  642. * Tell other CPUs to reset their alert counters. We cannot
  643. * do it ourselves because the alert count increase is not
  644. * atomic.
  645. */
  646. for_each_present_cpu (cpu)
  647. per_cpu(nmi_touch, cpu) = 1;
  648. }
  649. touch_softlockup_watchdog();
  650. }
  651. int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
  652. {
  653. int sum;
  654. int touched = 0;
  655. int cpu = smp_processor_id();
  656. struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
  657. u64 dummy;
  658. int rc=0;
  659. /* check for other users first */
  660. if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
  661. == NOTIFY_STOP) {
  662. rc = 1;
  663. touched = 1;
  664. }
  665. sum = read_pda(apic_timer_irqs);
  666. if (__get_cpu_var(nmi_touch)) {
  667. __get_cpu_var(nmi_touch) = 0;
  668. touched = 1;
  669. }
  670. if (cpu_isset(cpu, backtrace_mask)) {
  671. static DEFINE_SPINLOCK(lock); /* Serialise the printks */
  672. spin_lock(&lock);
  673. printk("NMI backtrace for cpu %d\n", cpu);
  674. dump_stack();
  675. spin_unlock(&lock);
  676. cpu_clear(cpu, backtrace_mask);
  677. }
  678. #ifdef CONFIG_X86_MCE
  679. /* Could check oops_in_progress here too, but it's safer
  680. not too */
  681. if (atomic_read(&mce_entry) > 0)
  682. touched = 1;
  683. #endif
  684. /* if the apic timer isn't firing, this cpu isn't doing much */
  685. if (!touched && __get_cpu_var(last_irq_sum) == sum) {
  686. /*
  687. * Ayiee, looks like this CPU is stuck ...
  688. * wait a few IRQs (5 seconds) before doing the oops ...
  689. */
  690. local_inc(&__get_cpu_var(alert_counter));
  691. if (local_read(&__get_cpu_var(alert_counter)) == 5*nmi_hz)
  692. die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs,
  693. panic_on_timeout);
  694. } else {
  695. __get_cpu_var(last_irq_sum) = sum;
  696. local_set(&__get_cpu_var(alert_counter), 0);
  697. }
  698. /* see if the nmi watchdog went off */
  699. if (wd->enabled) {
  700. if (nmi_watchdog == NMI_LOCAL_APIC) {
  701. rdmsrl(wd->perfctr_msr, dummy);
  702. if (dummy & wd->check_bit){
  703. /* this wasn't a watchdog timer interrupt */
  704. goto done;
  705. }
  706. /* only Intel uses the cccr msr */
  707. if (wd->cccr_msr != 0) {
  708. /*
  709. * P4 quirks:
  710. * - An overflown perfctr will assert its interrupt
  711. * until the OVF flag in its CCCR is cleared.
  712. * - LVTPC is masked on interrupt and must be
  713. * unmasked by the LVTPC handler.
  714. */
  715. rdmsrl(wd->cccr_msr, dummy);
  716. dummy &= ~P4_CCCR_OVF;
  717. wrmsrl(wd->cccr_msr, dummy);
  718. apic_write(APIC_LVTPC, APIC_DM_NMI);
  719. } else if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
  720. /*
  721. * ArchPerfom/Core Duo needs to re-unmask
  722. * the apic vector
  723. */
  724. apic_write(APIC_LVTPC, APIC_DM_NMI);
  725. }
  726. /* start the cycle over again */
  727. wrmsrl(wd->perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
  728. rc = 1;
  729. } else if (nmi_watchdog == NMI_IO_APIC) {
  730. /* don't know how to accurately check for this.
  731. * just assume it was a watchdog timer interrupt
  732. * This matches the old behaviour.
  733. */
  734. rc = 1;
  735. } else
  736. printk(KERN_WARNING "Unknown enabled NMI hardware?!\n");
  737. }
  738. done:
  739. return rc;
  740. }
  741. asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code)
  742. {
  743. nmi_enter();
  744. add_pda(__nmi_count,1);
  745. default_do_nmi(regs);
  746. nmi_exit();
  747. }
  748. int do_nmi_callback(struct pt_regs * regs, int cpu)
  749. {
  750. #ifdef CONFIG_SYSCTL
  751. if (unknown_nmi_panic)
  752. return unknown_nmi_panic_callback(regs, cpu);
  753. #endif
  754. return 0;
  755. }
  756. #ifdef CONFIG_SYSCTL
  757. static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
  758. {
  759. unsigned char reason = get_nmi_reason();
  760. char buf[64];
  761. sprintf(buf, "NMI received for unknown reason %02x\n", reason);
  762. die_nmi(buf, regs, 1); /* Always panic here */
  763. return 0;
  764. }
  765. /*
  766. * proc handler for /proc/sys/kernel/nmi
  767. */
  768. int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
  769. void __user *buffer, size_t *length, loff_t *ppos)
  770. {
  771. int old_state;
  772. nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0;
  773. old_state = nmi_watchdog_enabled;
  774. proc_dointvec(table, write, file, buffer, length, ppos);
  775. if (!!old_state == !!nmi_watchdog_enabled)
  776. return 0;
  777. if (atomic_read(&nmi_active) < 0) {
  778. printk( KERN_WARNING "NMI watchdog is permanently disabled\n");
  779. return -EIO;
  780. }
  781. /* if nmi_watchdog is not set yet, then set it */
  782. nmi_watchdog_default();
  783. if (nmi_watchdog == NMI_LOCAL_APIC) {
  784. if (nmi_watchdog_enabled)
  785. enable_lapic_nmi_watchdog();
  786. else
  787. disable_lapic_nmi_watchdog();
  788. } else {
  789. printk( KERN_WARNING
  790. "NMI watchdog doesn't know what hardware to touch\n");
  791. return -EIO;
  792. }
  793. return 0;
  794. }
  795. #endif
  796. void __trigger_all_cpu_backtrace(void)
  797. {
  798. int i;
  799. backtrace_mask = cpu_online_map;
  800. /* Wait for up to 10 seconds for all CPUs to do the backtrace */
  801. for (i = 0; i < 10 * 1000; i++) {
  802. if (cpus_empty(backtrace_mask))
  803. break;
  804. mdelay(1);
  805. }
  806. }
  807. EXPORT_SYMBOL(nmi_active);
  808. EXPORT_SYMBOL(nmi_watchdog);
  809. EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
  810. EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
  811. EXPORT_SYMBOL(reserve_perfctr_nmi);
  812. EXPORT_SYMBOL(release_perfctr_nmi);
  813. EXPORT_SYMBOL(reserve_evntsel_nmi);
  814. EXPORT_SYMBOL(release_evntsel_nmi);
  815. EXPORT_SYMBOL(disable_timer_nmi_watchdog);
  816. EXPORT_SYMBOL(enable_timer_nmi_watchdog);
  817. EXPORT_SYMBOL(touch_nmi_watchdog);