mce.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164
  1. /*
  2. * Machine check handler.
  3. *
  4. * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
  5. * Rest from unknown author(s).
  6. * 2004 Andi Kleen. Rewrote most of it.
  7. * Copyright 2008 Intel Corporation
  8. * Author: Andi Kleen
  9. */
  10. #include <linux/thread_info.h>
  11. #include <linux/capability.h>
  12. #include <linux/miscdevice.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/ratelimit.h>
  15. #include <linux/kallsyms.h>
  16. #include <linux/rcupdate.h>
  17. #include <linux/kobject.h>
  18. #include <linux/uaccess.h>
  19. #include <linux/kdebug.h>
  20. #include <linux/kernel.h>
  21. #include <linux/percpu.h>
  22. #include <linux/string.h>
  23. #include <linux/sysdev.h>
  24. #include <linux/delay.h>
  25. #include <linux/ctype.h>
  26. #include <linux/sched.h>
  27. #include <linux/sysfs.h>
  28. #include <linux/types.h>
  29. #include <linux/slab.h>
  30. #include <linux/init.h>
  31. #include <linux/kmod.h>
  32. #include <linux/poll.h>
  33. #include <linux/nmi.h>
  34. #include <linux/cpu.h>
  35. #include <linux/smp.h>
  36. #include <linux/fs.h>
  37. #include <linux/mm.h>
  38. #include <linux/debugfs.h>
  39. #include <linux/edac_mce.h>
  40. #include <asm/processor.h>
  41. #include <asm/hw_irq.h>
  42. #include <asm/apic.h>
  43. #include <asm/idle.h>
  44. #include <asm/ipi.h>
  45. #include <asm/mce.h>
  46. #include <asm/msr.h>
  47. #include "mce-internal.h"
  48. static DEFINE_MUTEX(mce_read_mutex);
  49. #define rcu_dereference_check_mce(p) \
  50. rcu_dereference_check((p), \
  51. rcu_read_lock_sched_held() || \
  52. lockdep_is_held(&mce_read_mutex))
  53. #define CREATE_TRACE_POINTS
  54. #include <trace/events/mce.h>
  55. int mce_disabled __read_mostly;
  56. #define MISC_MCELOG_MINOR 227
  57. #define SPINUNIT 100 /* 100ns */
  58. atomic_t mce_entry;
  59. DEFINE_PER_CPU(unsigned, mce_exception_count);
  60. /*
  61. * Tolerant levels:
  62. * 0: always panic on uncorrected errors, log corrected errors
  63. * 1: panic or SIGBUS on uncorrected errors, log corrected errors
  64. * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
  65. * 3: never panic or SIGBUS, log all errors (for testing only)
  66. */
  67. static int tolerant __read_mostly = 1;
  68. static int banks __read_mostly;
  69. static int rip_msr __read_mostly;
  70. static int mce_bootlog __read_mostly = -1;
  71. static int monarch_timeout __read_mostly = -1;
  72. static int mce_panic_timeout __read_mostly;
  73. static int mce_dont_log_ce __read_mostly;
  74. int mce_cmci_disabled __read_mostly;
  75. int mce_ignore_ce __read_mostly;
  76. int mce_ser __read_mostly;
  77. struct mce_bank *mce_banks __read_mostly;
  78. /* User mode helper program triggered by machine check event */
  79. static unsigned long mce_need_notify;
  80. static char mce_helper[128];
  81. static char *mce_helper_argv[2] = { mce_helper, NULL };
  82. static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
  83. static DEFINE_PER_CPU(struct mce, mces_seen);
  84. static int cpu_missing;
  85. /*
  86. * CPU/chipset specific EDAC code can register a notifier call here to print
  87. * MCE errors in a human-readable form.
  88. */
  89. ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
  90. EXPORT_SYMBOL_GPL(x86_mce_decoder_chain);
  91. static int default_decode_mce(struct notifier_block *nb, unsigned long val,
  92. void *data)
  93. {
  94. pr_emerg("No human readable MCE decoding support on this CPU type.\n");
  95. pr_emerg("Run the message through 'mcelog --ascii' to decode.\n");
  96. return NOTIFY_STOP;
  97. }
  98. static struct notifier_block mce_dec_nb = {
  99. .notifier_call = default_decode_mce,
  100. .priority = -1,
  101. };
  102. /* MCA banks polled by the period polling timer for corrected events */
  103. DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
  104. [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
  105. };
  106. static DEFINE_PER_CPU(struct work_struct, mce_work);
  107. /* Do initial initialization of a struct mce */
  108. void mce_setup(struct mce *m)
  109. {
  110. memset(m, 0, sizeof(struct mce));
  111. m->cpu = m->extcpu = smp_processor_id();
  112. rdtscll(m->tsc);
  113. /* We hope get_seconds stays lockless */
  114. m->time = get_seconds();
  115. m->cpuvendor = boot_cpu_data.x86_vendor;
  116. m->cpuid = cpuid_eax(1);
  117. #ifdef CONFIG_SMP
  118. m->socketid = cpu_data(m->extcpu).phys_proc_id;
  119. #endif
  120. m->apicid = cpu_data(m->extcpu).initial_apicid;
  121. rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
  122. }
  123. DEFINE_PER_CPU(struct mce, injectm);
  124. EXPORT_PER_CPU_SYMBOL_GPL(injectm);
  125. /*
  126. * Lockless MCE logging infrastructure.
  127. * This avoids deadlocks on printk locks without having to break locks. Also
  128. * separate MCEs from kernel messages to avoid bogus bug reports.
  129. */
  130. static struct mce_log mcelog = {
  131. .signature = MCE_LOG_SIGNATURE,
  132. .len = MCE_LOG_LEN,
  133. .recordlen = sizeof(struct mce),
  134. };
  135. void mce_log(struct mce *mce)
  136. {
  137. unsigned next, entry;
  138. /* Emit the trace record: */
  139. trace_mce_record(mce);
  140. mce->finished = 0;
  141. wmb();
  142. for (;;) {
  143. entry = rcu_dereference_check_mce(mcelog.next);
  144. for (;;) {
  145. /*
  146. * If edac_mce is enabled, it will check the error type
  147. * and will process it, if it is a known error.
  148. * Otherwise, the error will be sent through mcelog
  149. * interface
  150. */
  151. if (edac_mce_parse(mce))
  152. return;
  153. /*
  154. * When the buffer fills up discard new entries.
  155. * Assume that the earlier errors are the more
  156. * interesting ones:
  157. */
  158. if (entry >= MCE_LOG_LEN) {
  159. set_bit(MCE_OVERFLOW,
  160. (unsigned long *)&mcelog.flags);
  161. return;
  162. }
  163. /* Old left over entry. Skip: */
  164. if (mcelog.entry[entry].finished) {
  165. entry++;
  166. continue;
  167. }
  168. break;
  169. }
  170. smp_rmb();
  171. next = entry + 1;
  172. if (cmpxchg(&mcelog.next, entry, next) == entry)
  173. break;
  174. }
  175. memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
  176. wmb();
  177. mcelog.entry[entry].finished = 1;
  178. wmb();
  179. mce->finished = 1;
  180. set_bit(0, &mce_need_notify);
  181. }
  182. static void print_mce(struct mce *m)
  183. {
  184. pr_emerg("CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
  185. m->extcpu, m->mcgstatus, m->bank, m->status);
  186. if (m->ip) {
  187. pr_emerg("RIP%s %02x:<%016Lx> ",
  188. !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
  189. m->cs, m->ip);
  190. if (m->cs == __KERNEL_CS)
  191. print_symbol("{%s}", m->ip);
  192. pr_cont("\n");
  193. }
  194. pr_emerg("TSC %llx ", m->tsc);
  195. if (m->addr)
  196. pr_cont("ADDR %llx ", m->addr);
  197. if (m->misc)
  198. pr_cont("MISC %llx ", m->misc);
  199. pr_cont("\n");
  200. pr_emerg("PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
  201. m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid);
  202. /*
  203. * Print out human-readable details about the MCE error,
  204. * (if the CPU has an implementation for that)
  205. */
  206. atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
  207. }
  208. static void print_mce_head(void)
  209. {
  210. pr_emerg("\nHARDWARE ERROR\n");
  211. }
  212. static void print_mce_tail(void)
  213. {
  214. pr_emerg("This is not a software problem!\n");
  215. }
  216. #define PANIC_TIMEOUT 5 /* 5 seconds */
  217. static atomic_t mce_paniced;
  218. static int fake_panic;
  219. static atomic_t mce_fake_paniced;
  220. /* Panic in progress. Enable interrupts and wait for final IPI */
  221. static void wait_for_panic(void)
  222. {
  223. long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
  224. preempt_disable();
  225. local_irq_enable();
  226. while (timeout-- > 0)
  227. udelay(1);
  228. if (panic_timeout == 0)
  229. panic_timeout = mce_panic_timeout;
  230. panic("Panicing machine check CPU died");
  231. }
  232. static void mce_panic(char *msg, struct mce *final, char *exp)
  233. {
  234. int i;
  235. if (!fake_panic) {
  236. /*
  237. * Make sure only one CPU runs in machine check panic
  238. */
  239. if (atomic_inc_return(&mce_paniced) > 1)
  240. wait_for_panic();
  241. barrier();
  242. bust_spinlocks(1);
  243. console_verbose();
  244. } else {
  245. /* Don't log too much for fake panic */
  246. if (atomic_inc_return(&mce_fake_paniced) > 1)
  247. return;
  248. }
  249. print_mce_head();
  250. /* First print corrected ones that are still unlogged */
  251. for (i = 0; i < MCE_LOG_LEN; i++) {
  252. struct mce *m = &mcelog.entry[i];
  253. if (!(m->status & MCI_STATUS_VAL))
  254. continue;
  255. if (!(m->status & MCI_STATUS_UC))
  256. print_mce(m);
  257. }
  258. /* Now print uncorrected but with the final one last */
  259. for (i = 0; i < MCE_LOG_LEN; i++) {
  260. struct mce *m = &mcelog.entry[i];
  261. if (!(m->status & MCI_STATUS_VAL))
  262. continue;
  263. if (!(m->status & MCI_STATUS_UC))
  264. continue;
  265. if (!final || memcmp(m, final, sizeof(struct mce)))
  266. print_mce(m);
  267. }
  268. if (final)
  269. print_mce(final);
  270. if (cpu_missing)
  271. printk(KERN_EMERG "Some CPUs didn't answer in synchronization\n");
  272. print_mce_tail();
  273. if (exp)
  274. printk(KERN_EMERG "Machine check: %s\n", exp);
  275. if (!fake_panic) {
  276. if (panic_timeout == 0)
  277. panic_timeout = mce_panic_timeout;
  278. panic(msg);
  279. } else
  280. printk(KERN_EMERG "Fake kernel panic: %s\n", msg);
  281. }
  282. /* Support code for software error injection */
  283. static int msr_to_offset(u32 msr)
  284. {
  285. unsigned bank = __get_cpu_var(injectm.bank);
  286. if (msr == rip_msr)
  287. return offsetof(struct mce, ip);
  288. if (msr == MSR_IA32_MCx_STATUS(bank))
  289. return offsetof(struct mce, status);
  290. if (msr == MSR_IA32_MCx_ADDR(bank))
  291. return offsetof(struct mce, addr);
  292. if (msr == MSR_IA32_MCx_MISC(bank))
  293. return offsetof(struct mce, misc);
  294. if (msr == MSR_IA32_MCG_STATUS)
  295. return offsetof(struct mce, mcgstatus);
  296. return -1;
  297. }
  298. /* MSR access wrappers used for error injection */
  299. static u64 mce_rdmsrl(u32 msr)
  300. {
  301. u64 v;
  302. if (__get_cpu_var(injectm).finished) {
  303. int offset = msr_to_offset(msr);
  304. if (offset < 0)
  305. return 0;
  306. return *(u64 *)((char *)&__get_cpu_var(injectm) + offset);
  307. }
  308. if (rdmsrl_safe(msr, &v)) {
  309. WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
  310. /*
  311. * Return zero in case the access faulted. This should
  312. * not happen normally but can happen if the CPU does
  313. * something weird, or if the code is buggy.
  314. */
  315. v = 0;
  316. }
  317. return v;
  318. }
  319. static void mce_wrmsrl(u32 msr, u64 v)
  320. {
  321. if (__get_cpu_var(injectm).finished) {
  322. int offset = msr_to_offset(msr);
  323. if (offset >= 0)
  324. *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v;
  325. return;
  326. }
  327. wrmsrl(msr, v);
  328. }
  329. /*
  330. * Simple lockless ring to communicate PFNs from the exception handler with the
  331. * process context work function. This is vastly simplified because there's
  332. * only a single reader and a single writer.
  333. */
  334. #define MCE_RING_SIZE 16 /* we use one entry less */
  335. struct mce_ring {
  336. unsigned short start;
  337. unsigned short end;
  338. unsigned long ring[MCE_RING_SIZE];
  339. };
  340. static DEFINE_PER_CPU(struct mce_ring, mce_ring);
  341. /* Runs with CPU affinity in workqueue */
  342. static int mce_ring_empty(void)
  343. {
  344. struct mce_ring *r = &__get_cpu_var(mce_ring);
  345. return r->start == r->end;
  346. }
  347. static int mce_ring_get(unsigned long *pfn)
  348. {
  349. struct mce_ring *r;
  350. int ret = 0;
  351. *pfn = 0;
  352. get_cpu();
  353. r = &__get_cpu_var(mce_ring);
  354. if (r->start == r->end)
  355. goto out;
  356. *pfn = r->ring[r->start];
  357. r->start = (r->start + 1) % MCE_RING_SIZE;
  358. ret = 1;
  359. out:
  360. put_cpu();
  361. return ret;
  362. }
  363. /* Always runs in MCE context with preempt off */
  364. static int mce_ring_add(unsigned long pfn)
  365. {
  366. struct mce_ring *r = &__get_cpu_var(mce_ring);
  367. unsigned next;
  368. next = (r->end + 1) % MCE_RING_SIZE;
  369. if (next == r->start)
  370. return -1;
  371. r->ring[r->end] = pfn;
  372. wmb();
  373. r->end = next;
  374. return 0;
  375. }
  376. int mce_available(struct cpuinfo_x86 *c)
  377. {
  378. if (mce_disabled)
  379. return 0;
  380. return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
  381. }
  382. static void mce_schedule_work(void)
  383. {
  384. if (!mce_ring_empty()) {
  385. struct work_struct *work = &__get_cpu_var(mce_work);
  386. if (!work_pending(work))
  387. schedule_work(work);
  388. }
  389. }
  390. /*
  391. * Get the address of the instruction at the time of the machine check
  392. * error.
  393. */
  394. static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
  395. {
  396. if (regs && (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV))) {
  397. m->ip = regs->ip;
  398. m->cs = regs->cs;
  399. } else {
  400. m->ip = 0;
  401. m->cs = 0;
  402. }
  403. if (rip_msr)
  404. m->ip = mce_rdmsrl(rip_msr);
  405. }
  406. #ifdef CONFIG_X86_LOCAL_APIC
  407. /*
  408. * Called after interrupts have been reenabled again
  409. * when a MCE happened during an interrupts off region
  410. * in the kernel.
  411. */
  412. asmlinkage void smp_mce_self_interrupt(struct pt_regs *regs)
  413. {
  414. ack_APIC_irq();
  415. exit_idle();
  416. irq_enter();
  417. mce_notify_irq();
  418. mce_schedule_work();
  419. irq_exit();
  420. }
  421. #endif
  422. static void mce_report_event(struct pt_regs *regs)
  423. {
  424. if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
  425. mce_notify_irq();
  426. /*
  427. * Triggering the work queue here is just an insurance
  428. * policy in case the syscall exit notify handler
  429. * doesn't run soon enough or ends up running on the
  430. * wrong CPU (can happen when audit sleeps)
  431. */
  432. mce_schedule_work();
  433. return;
  434. }
  435. #ifdef CONFIG_X86_LOCAL_APIC
  436. /*
  437. * Without APIC do not notify. The event will be picked
  438. * up eventually.
  439. */
  440. if (!cpu_has_apic)
  441. return;
  442. /*
  443. * When interrupts are disabled we cannot use
  444. * kernel services safely. Trigger an self interrupt
  445. * through the APIC to instead do the notification
  446. * after interrupts are reenabled again.
  447. */
  448. apic->send_IPI_self(MCE_SELF_VECTOR);
  449. /*
  450. * Wait for idle afterwards again so that we don't leave the
  451. * APIC in a non idle state because the normal APIC writes
  452. * cannot exclude us.
  453. */
  454. apic_wait_icr_idle();
  455. #endif
  456. }
  457. DEFINE_PER_CPU(unsigned, mce_poll_count);
  458. /*
  459. * Poll for corrected events or events that happened before reset.
  460. * Those are just logged through /dev/mcelog.
  461. *
  462. * This is executed in standard interrupt context.
  463. *
  464. * Note: spec recommends to panic for fatal unsignalled
  465. * errors here. However this would be quite problematic --
  466. * we would need to reimplement the Monarch handling and
  467. * it would mess up the exclusion between exception handler
  468. * and poll hander -- * so we skip this for now.
  469. * These cases should not happen anyways, or only when the CPU
  470. * is already totally * confused. In this case it's likely it will
  471. * not fully execute the machine check handler either.
  472. */
  473. void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
  474. {
  475. struct mce m;
  476. int i;
  477. __get_cpu_var(mce_poll_count)++;
  478. mce_setup(&m);
  479. m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
  480. for (i = 0; i < banks; i++) {
  481. if (!mce_banks[i].ctl || !test_bit(i, *b))
  482. continue;
  483. m.misc = 0;
  484. m.addr = 0;
  485. m.bank = i;
  486. m.tsc = 0;
  487. barrier();
  488. m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
  489. if (!(m.status & MCI_STATUS_VAL))
  490. continue;
  491. /*
  492. * Uncorrected or signalled events are handled by the exception
  493. * handler when it is enabled, so don't process those here.
  494. *
  495. * TBD do the same check for MCI_STATUS_EN here?
  496. */
  497. if (!(flags & MCP_UC) &&
  498. (m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)))
  499. continue;
  500. if (m.status & MCI_STATUS_MISCV)
  501. m.misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
  502. if (m.status & MCI_STATUS_ADDRV)
  503. m.addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
  504. if (!(flags & MCP_TIMESTAMP))
  505. m.tsc = 0;
  506. /*
  507. * Don't get the IP here because it's unlikely to
  508. * have anything to do with the actual error location.
  509. */
  510. if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce) {
  511. mce_log(&m);
  512. add_taint(TAINT_MACHINE_CHECK);
  513. }
  514. /*
  515. * Clear state for this bank.
  516. */
  517. mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
  518. }
  519. /*
  520. * Don't clear MCG_STATUS here because it's only defined for
  521. * exceptions.
  522. */
  523. sync_core();
  524. }
  525. EXPORT_SYMBOL_GPL(machine_check_poll);
  526. /*
  527. * Do a quick check if any of the events requires a panic.
  528. * This decides if we keep the events around or clear them.
  529. */
  530. static int mce_no_way_out(struct mce *m, char **msg)
  531. {
  532. int i;
  533. for (i = 0; i < banks; i++) {
  534. m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
  535. if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
  536. return 1;
  537. }
  538. return 0;
  539. }
  540. /*
  541. * Variable to establish order between CPUs while scanning.
  542. * Each CPU spins initially until executing is equal its number.
  543. */
  544. static atomic_t mce_executing;
  545. /*
  546. * Defines order of CPUs on entry. First CPU becomes Monarch.
  547. */
  548. static atomic_t mce_callin;
  549. /*
  550. * Check if a timeout waiting for other CPUs happened.
  551. */
  552. static int mce_timed_out(u64 *t)
  553. {
  554. /*
  555. * The others already did panic for some reason.
  556. * Bail out like in a timeout.
  557. * rmb() to tell the compiler that system_state
  558. * might have been modified by someone else.
  559. */
  560. rmb();
  561. if (atomic_read(&mce_paniced))
  562. wait_for_panic();
  563. if (!monarch_timeout)
  564. goto out;
  565. if ((s64)*t < SPINUNIT) {
  566. /* CHECKME: Make panic default for 1 too? */
  567. if (tolerant < 1)
  568. mce_panic("Timeout synchronizing machine check over CPUs",
  569. NULL, NULL);
  570. cpu_missing = 1;
  571. return 1;
  572. }
  573. *t -= SPINUNIT;
  574. out:
  575. touch_nmi_watchdog();
  576. return 0;
  577. }
  578. /*
  579. * The Monarch's reign. The Monarch is the CPU who entered
  580. * the machine check handler first. It waits for the others to
  581. * raise the exception too and then grades them. When any
  582. * error is fatal panic. Only then let the others continue.
  583. *
  584. * The other CPUs entering the MCE handler will be controlled by the
  585. * Monarch. They are called Subjects.
  586. *
  587. * This way we prevent any potential data corruption in a unrecoverable case
  588. * and also makes sure always all CPU's errors are examined.
  589. *
  590. * Also this detects the case of a machine check event coming from outer
  591. * space (not detected by any CPUs) In this case some external agent wants
  592. * us to shut down, so panic too.
  593. *
  594. * The other CPUs might still decide to panic if the handler happens
  595. * in a unrecoverable place, but in this case the system is in a semi-stable
  596. * state and won't corrupt anything by itself. It's ok to let the others
  597. * continue for a bit first.
  598. *
  599. * All the spin loops have timeouts; when a timeout happens a CPU
  600. * typically elects itself to be Monarch.
  601. */
  602. static void mce_reign(void)
  603. {
  604. int cpu;
  605. struct mce *m = NULL;
  606. int global_worst = 0;
  607. char *msg = NULL;
  608. char *nmsg = NULL;
  609. /*
  610. * This CPU is the Monarch and the other CPUs have run
  611. * through their handlers.
  612. * Grade the severity of the errors of all the CPUs.
  613. */
  614. for_each_possible_cpu(cpu) {
  615. int severity = mce_severity(&per_cpu(mces_seen, cpu), tolerant,
  616. &nmsg);
  617. if (severity > global_worst) {
  618. msg = nmsg;
  619. global_worst = severity;
  620. m = &per_cpu(mces_seen, cpu);
  621. }
  622. }
  623. /*
  624. * Cannot recover? Panic here then.
  625. * This dumps all the mces in the log buffer and stops the
  626. * other CPUs.
  627. */
  628. if (m && global_worst >= MCE_PANIC_SEVERITY && tolerant < 3)
  629. mce_panic("Fatal Machine check", m, msg);
  630. /*
  631. * For UC somewhere we let the CPU who detects it handle it.
  632. * Also must let continue the others, otherwise the handling
  633. * CPU could deadlock on a lock.
  634. */
  635. /*
  636. * No machine check event found. Must be some external
  637. * source or one CPU is hung. Panic.
  638. */
  639. if (global_worst <= MCE_KEEP_SEVERITY && tolerant < 3)
  640. mce_panic("Machine check from unknown source", NULL, NULL);
  641. /*
  642. * Now clear all the mces_seen so that they don't reappear on
  643. * the next mce.
  644. */
  645. for_each_possible_cpu(cpu)
  646. memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
  647. }
  648. static atomic_t global_nwo;
  649. /*
  650. * Start of Monarch synchronization. This waits until all CPUs have
  651. * entered the exception handler and then determines if any of them
  652. * saw a fatal event that requires panic. Then it executes them
  653. * in the entry order.
  654. * TBD double check parallel CPU hotunplug
  655. */
  656. static int mce_start(int *no_way_out)
  657. {
  658. int order;
  659. int cpus = num_online_cpus();
  660. u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
  661. if (!timeout)
  662. return -1;
  663. atomic_add(*no_way_out, &global_nwo);
  664. /*
  665. * global_nwo should be updated before mce_callin
  666. */
  667. smp_wmb();
  668. order = atomic_inc_return(&mce_callin);
  669. /*
  670. * Wait for everyone.
  671. */
  672. while (atomic_read(&mce_callin) != cpus) {
  673. if (mce_timed_out(&timeout)) {
  674. atomic_set(&global_nwo, 0);
  675. return -1;
  676. }
  677. ndelay(SPINUNIT);
  678. }
  679. /*
  680. * mce_callin should be read before global_nwo
  681. */
  682. smp_rmb();
  683. if (order == 1) {
  684. /*
  685. * Monarch: Starts executing now, the others wait.
  686. */
  687. atomic_set(&mce_executing, 1);
  688. } else {
  689. /*
  690. * Subject: Now start the scanning loop one by one in
  691. * the original callin order.
  692. * This way when there are any shared banks it will be
  693. * only seen by one CPU before cleared, avoiding duplicates.
  694. */
  695. while (atomic_read(&mce_executing) < order) {
  696. if (mce_timed_out(&timeout)) {
  697. atomic_set(&global_nwo, 0);
  698. return -1;
  699. }
  700. ndelay(SPINUNIT);
  701. }
  702. }
  703. /*
  704. * Cache the global no_way_out state.
  705. */
  706. *no_way_out = atomic_read(&global_nwo);
  707. return order;
  708. }
  709. /*
  710. * Synchronize between CPUs after main scanning loop.
  711. * This invokes the bulk of the Monarch processing.
  712. */
  713. static int mce_end(int order)
  714. {
  715. int ret = -1;
  716. u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
  717. if (!timeout)
  718. goto reset;
  719. if (order < 0)
  720. goto reset;
  721. /*
  722. * Allow others to run.
  723. */
  724. atomic_inc(&mce_executing);
  725. if (order == 1) {
  726. /* CHECKME: Can this race with a parallel hotplug? */
  727. int cpus = num_online_cpus();
  728. /*
  729. * Monarch: Wait for everyone to go through their scanning
  730. * loops.
  731. */
  732. while (atomic_read(&mce_executing) <= cpus) {
  733. if (mce_timed_out(&timeout))
  734. goto reset;
  735. ndelay(SPINUNIT);
  736. }
  737. mce_reign();
  738. barrier();
  739. ret = 0;
  740. } else {
  741. /*
  742. * Subject: Wait for Monarch to finish.
  743. */
  744. while (atomic_read(&mce_executing) != 0) {
  745. if (mce_timed_out(&timeout))
  746. goto reset;
  747. ndelay(SPINUNIT);
  748. }
  749. /*
  750. * Don't reset anything. That's done by the Monarch.
  751. */
  752. return 0;
  753. }
  754. /*
  755. * Reset all global state.
  756. */
  757. reset:
  758. atomic_set(&global_nwo, 0);
  759. atomic_set(&mce_callin, 0);
  760. barrier();
  761. /*
  762. * Let others run again.
  763. */
  764. atomic_set(&mce_executing, 0);
  765. return ret;
  766. }
  767. /*
  768. * Check if the address reported by the CPU is in a format we can parse.
  769. * It would be possible to add code for most other cases, but all would
  770. * be somewhat complicated (e.g. segment offset would require an instruction
  771. * parser). So only support physical addresses upto page granuality for now.
  772. */
  773. static int mce_usable_address(struct mce *m)
  774. {
  775. if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
  776. return 0;
  777. if ((m->misc & 0x3f) > PAGE_SHIFT)
  778. return 0;
  779. if (((m->misc >> 6) & 7) != MCM_ADDR_PHYS)
  780. return 0;
  781. return 1;
  782. }
  783. static void mce_clear_state(unsigned long *toclear)
  784. {
  785. int i;
  786. for (i = 0; i < banks; i++) {
  787. if (test_bit(i, toclear))
  788. mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
  789. }
  790. }
  791. /*
  792. * The actual machine check handler. This only handles real
  793. * exceptions when something got corrupted coming in through int 18.
  794. *
  795. * This is executed in NMI context not subject to normal locking rules. This
  796. * implies that most kernel services cannot be safely used. Don't even
  797. * think about putting a printk in there!
  798. *
  799. * On Intel systems this is entered on all CPUs in parallel through
  800. * MCE broadcast. However some CPUs might be broken beyond repair,
  801. * so be always careful when synchronizing with others.
  802. */
  803. void do_machine_check(struct pt_regs *regs, long error_code)
  804. {
  805. struct mce m, *final;
  806. int i;
  807. int worst = 0;
  808. int severity;
  809. /*
  810. * Establish sequential order between the CPUs entering the machine
  811. * check handler.
  812. */
  813. int order;
  814. /*
  815. * If no_way_out gets set, there is no safe way to recover from this
  816. * MCE. If tolerant is cranked up, we'll try anyway.
  817. */
  818. int no_way_out = 0;
  819. /*
  820. * If kill_it gets set, there might be a way to recover from this
  821. * error.
  822. */
  823. int kill_it = 0;
  824. DECLARE_BITMAP(toclear, MAX_NR_BANKS);
  825. char *msg = "Unknown";
  826. atomic_inc(&mce_entry);
  827. __get_cpu_var(mce_exception_count)++;
  828. if (notify_die(DIE_NMI, "machine check", regs, error_code,
  829. 18, SIGKILL) == NOTIFY_STOP)
  830. goto out;
  831. if (!banks)
  832. goto out;
  833. mce_setup(&m);
  834. m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
  835. final = &__get_cpu_var(mces_seen);
  836. *final = m;
  837. no_way_out = mce_no_way_out(&m, &msg);
  838. barrier();
  839. /*
  840. * When no restart IP must always kill or panic.
  841. */
  842. if (!(m.mcgstatus & MCG_STATUS_RIPV))
  843. kill_it = 1;
  844. /*
  845. * Go through all the banks in exclusion of the other CPUs.
  846. * This way we don't report duplicated events on shared banks
  847. * because the first one to see it will clear it.
  848. */
  849. order = mce_start(&no_way_out);
  850. for (i = 0; i < banks; i++) {
  851. __clear_bit(i, toclear);
  852. if (!mce_banks[i].ctl)
  853. continue;
  854. m.misc = 0;
  855. m.addr = 0;
  856. m.bank = i;
  857. m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
  858. if ((m.status & MCI_STATUS_VAL) == 0)
  859. continue;
  860. /*
  861. * Non uncorrected or non signaled errors are handled by
  862. * machine_check_poll. Leave them alone, unless this panics.
  863. */
  864. if (!(m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
  865. !no_way_out)
  866. continue;
  867. /*
  868. * Set taint even when machine check was not enabled.
  869. */
  870. add_taint(TAINT_MACHINE_CHECK);
  871. severity = mce_severity(&m, tolerant, NULL);
  872. /*
  873. * When machine check was for corrected handler don't touch,
  874. * unless we're panicing.
  875. */
  876. if (severity == MCE_KEEP_SEVERITY && !no_way_out)
  877. continue;
  878. __set_bit(i, toclear);
  879. if (severity == MCE_NO_SEVERITY) {
  880. /*
  881. * Machine check event was not enabled. Clear, but
  882. * ignore.
  883. */
  884. continue;
  885. }
  886. /*
  887. * Kill on action required.
  888. */
  889. if (severity == MCE_AR_SEVERITY)
  890. kill_it = 1;
  891. if (m.status & MCI_STATUS_MISCV)
  892. m.misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
  893. if (m.status & MCI_STATUS_ADDRV)
  894. m.addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
  895. /*
  896. * Action optional error. Queue address for later processing.
  897. * When the ring overflows we just ignore the AO error.
  898. * RED-PEN add some logging mechanism when
  899. * usable_address or mce_add_ring fails.
  900. * RED-PEN don't ignore overflow for tolerant == 0
  901. */
  902. if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
  903. mce_ring_add(m.addr >> PAGE_SHIFT);
  904. mce_get_rip(&m, regs);
  905. mce_log(&m);
  906. if (severity > worst) {
  907. *final = m;
  908. worst = severity;
  909. }
  910. }
  911. if (!no_way_out)
  912. mce_clear_state(toclear);
  913. /*
  914. * Do most of the synchronization with other CPUs.
  915. * When there's any problem use only local no_way_out state.
  916. */
  917. if (mce_end(order) < 0)
  918. no_way_out = worst >= MCE_PANIC_SEVERITY;
  919. /*
  920. * If we have decided that we just CAN'T continue, and the user
  921. * has not set tolerant to an insane level, give up and die.
  922. *
  923. * This is mainly used in the case when the system doesn't
  924. * support MCE broadcasting or it has been disabled.
  925. */
  926. if (no_way_out && tolerant < 3)
  927. mce_panic("Fatal machine check on current CPU", final, msg);
  928. /*
  929. * If the error seems to be unrecoverable, something should be
  930. * done. Try to kill as little as possible. If we can kill just
  931. * one task, do that. If the user has set the tolerance very
  932. * high, don't try to do anything at all.
  933. */
  934. if (kill_it && tolerant < 3)
  935. force_sig(SIGBUS, current);
  936. /* notify userspace ASAP */
  937. set_thread_flag(TIF_MCE_NOTIFY);
  938. if (worst > 0)
  939. mce_report_event(regs);
  940. mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
  941. out:
  942. atomic_dec(&mce_entry);
  943. sync_core();
  944. }
  945. EXPORT_SYMBOL_GPL(do_machine_check);
  946. /* dummy to break dependency. actual code is in mm/memory-failure.c */
  947. void __attribute__((weak)) memory_failure(unsigned long pfn, int vector)
  948. {
  949. printk(KERN_ERR "Action optional memory failure at %lx ignored\n", pfn);
  950. }
  951. /*
  952. * Called after mce notification in process context. This code
  953. * is allowed to sleep. Call the high level VM handler to process
  954. * any corrupted pages.
  955. * Assume that the work queue code only calls this one at a time
  956. * per CPU.
  957. * Note we don't disable preemption, so this code might run on the wrong
  958. * CPU. In this case the event is picked up by the scheduled work queue.
  959. * This is merely a fast path to expedite processing in some common
  960. * cases.
  961. */
  962. void mce_notify_process(void)
  963. {
  964. unsigned long pfn;
  965. mce_notify_irq();
  966. while (mce_ring_get(&pfn))
  967. memory_failure(pfn, MCE_VECTOR);
  968. }
  969. static void mce_process_work(struct work_struct *dummy)
  970. {
  971. mce_notify_process();
  972. }
  973. #ifdef CONFIG_X86_MCE_INTEL
  974. /***
  975. * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
  976. * @cpu: The CPU on which the event occurred.
  977. * @status: Event status information
  978. *
  979. * This function should be called by the thermal interrupt after the
  980. * event has been processed and the decision was made to log the event
  981. * further.
  982. *
  983. * The status parameter will be saved to the 'status' field of 'struct mce'
  984. * and historically has been the register value of the
  985. * MSR_IA32_THERMAL_STATUS (Intel) msr.
  986. */
  987. void mce_log_therm_throt_event(__u64 status)
  988. {
  989. struct mce m;
  990. mce_setup(&m);
  991. m.bank = MCE_THERMAL_BANK;
  992. m.status = status;
  993. mce_log(&m);
  994. }
  995. #endif /* CONFIG_X86_MCE_INTEL */
  996. /*
  997. * Periodic polling timer for "silent" machine check errors. If the
  998. * poller finds an MCE, poll 2x faster. When the poller finds no more
  999. * errors, poll 2x slower (up to check_interval seconds).
  1000. */
  1001. static int check_interval = 5 * 60; /* 5 minutes */
  1002. static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
  1003. static DEFINE_PER_CPU(struct timer_list, mce_timer);
  1004. static void mce_start_timer(unsigned long data)
  1005. {
  1006. struct timer_list *t = &per_cpu(mce_timer, data);
  1007. int *n;
  1008. WARN_ON(smp_processor_id() != data);
  1009. if (mce_available(&current_cpu_data)) {
  1010. machine_check_poll(MCP_TIMESTAMP,
  1011. &__get_cpu_var(mce_poll_banks));
  1012. }
  1013. /*
  1014. * Alert userspace if needed. If we logged an MCE, reduce the
  1015. * polling interval, otherwise increase the polling interval.
  1016. */
  1017. n = &__get_cpu_var(mce_next_interval);
  1018. if (mce_notify_irq())
  1019. *n = max(*n/2, HZ/100);
  1020. else
  1021. *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
  1022. t->expires = jiffies + *n;
  1023. add_timer_on(t, smp_processor_id());
  1024. }
  1025. static void mce_do_trigger(struct work_struct *work)
  1026. {
  1027. call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
  1028. }
  1029. static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
  1030. /*
  1031. * Notify the user(s) about new machine check events.
  1032. * Can be called from interrupt context, but not from machine check/NMI
  1033. * context.
  1034. */
  1035. int mce_notify_irq(void)
  1036. {
  1037. /* Not more than two messages every minute */
  1038. static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
  1039. clear_thread_flag(TIF_MCE_NOTIFY);
  1040. if (test_and_clear_bit(0, &mce_need_notify)) {
  1041. wake_up_interruptible(&mce_wait);
  1042. /*
  1043. * There is no risk of missing notifications because
  1044. * work_pending is always cleared before the function is
  1045. * executed.
  1046. */
  1047. if (mce_helper[0] && !work_pending(&mce_trigger_work))
  1048. schedule_work(&mce_trigger_work);
  1049. if (__ratelimit(&ratelimit))
  1050. printk(KERN_INFO "Machine check events logged\n");
  1051. return 1;
  1052. }
  1053. return 0;
  1054. }
  1055. EXPORT_SYMBOL_GPL(mce_notify_irq);
  1056. static int __cpuinit __mcheck_cpu_mce_banks_init(void)
  1057. {
  1058. int i;
  1059. mce_banks = kzalloc(banks * sizeof(struct mce_bank), GFP_KERNEL);
  1060. if (!mce_banks)
  1061. return -ENOMEM;
  1062. for (i = 0; i < banks; i++) {
  1063. struct mce_bank *b = &mce_banks[i];
  1064. b->ctl = -1ULL;
  1065. b->init = 1;
  1066. }
  1067. return 0;
  1068. }
  1069. /*
  1070. * Initialize Machine Checks for a CPU.
  1071. */
  1072. static int __cpuinit __mcheck_cpu_cap_init(void)
  1073. {
  1074. unsigned b;
  1075. u64 cap;
  1076. rdmsrl(MSR_IA32_MCG_CAP, cap);
  1077. b = cap & MCG_BANKCNT_MASK;
  1078. if (!banks)
  1079. printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b);
  1080. if (b > MAX_NR_BANKS) {
  1081. printk(KERN_WARNING
  1082. "MCE: Using only %u machine check banks out of %u\n",
  1083. MAX_NR_BANKS, b);
  1084. b = MAX_NR_BANKS;
  1085. }
  1086. /* Don't support asymmetric configurations today */
  1087. WARN_ON(banks != 0 && b != banks);
  1088. banks = b;
  1089. if (!mce_banks) {
  1090. int err = __mcheck_cpu_mce_banks_init();
  1091. if (err)
  1092. return err;
  1093. }
  1094. /* Use accurate RIP reporting if available. */
  1095. if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
  1096. rip_msr = MSR_IA32_MCG_EIP;
  1097. if (cap & MCG_SER_P)
  1098. mce_ser = 1;
  1099. return 0;
  1100. }
  1101. static void __mcheck_cpu_init_generic(void)
  1102. {
  1103. mce_banks_t all_banks;
  1104. u64 cap;
  1105. int i;
  1106. /*
  1107. * Log the machine checks left over from the previous reset.
  1108. */
  1109. bitmap_fill(all_banks, MAX_NR_BANKS);
  1110. machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks);
  1111. set_in_cr4(X86_CR4_MCE);
  1112. rdmsrl(MSR_IA32_MCG_CAP, cap);
  1113. if (cap & MCG_CTL_P)
  1114. wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
  1115. for (i = 0; i < banks; i++) {
  1116. struct mce_bank *b = &mce_banks[i];
  1117. if (!b->init)
  1118. continue;
  1119. wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
  1120. wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
  1121. }
  1122. }
  1123. /* Add per CPU specific workarounds here */
  1124. static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
  1125. {
  1126. if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
  1127. pr_info("MCE: unknown CPU type - not enabling MCE support.\n");
  1128. return -EOPNOTSUPP;
  1129. }
  1130. /* This should be disabled by the BIOS, but isn't always */
  1131. if (c->x86_vendor == X86_VENDOR_AMD) {
  1132. if (c->x86 == 15 && banks > 4) {
  1133. /*
  1134. * disable GART TBL walk error reporting, which
  1135. * trips off incorrectly with the IOMMU & 3ware
  1136. * & Cerberus:
  1137. */
  1138. clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
  1139. }
  1140. if (c->x86 <= 17 && mce_bootlog < 0) {
  1141. /*
  1142. * Lots of broken BIOS around that don't clear them
  1143. * by default and leave crap in there. Don't log:
  1144. */
  1145. mce_bootlog = 0;
  1146. }
  1147. /*
  1148. * Various K7s with broken bank 0 around. Always disable
  1149. * by default.
  1150. */
  1151. if (c->x86 == 6 && banks > 0)
  1152. mce_banks[0].ctl = 0;
  1153. }
  1154. if (c->x86_vendor == X86_VENDOR_INTEL) {
  1155. /*
  1156. * SDM documents that on family 6 bank 0 should not be written
  1157. * because it aliases to another special BIOS controlled
  1158. * register.
  1159. * But it's not aliased anymore on model 0x1a+
  1160. * Don't ignore bank 0 completely because there could be a
  1161. * valid event later, merely don't write CTL0.
  1162. */
  1163. if (c->x86 == 6 && c->x86_model < 0x1A && banks > 0)
  1164. mce_banks[0].init = 0;
  1165. /*
  1166. * All newer Intel systems support MCE broadcasting. Enable
  1167. * synchronization with a one second timeout.
  1168. */
  1169. if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
  1170. monarch_timeout < 0)
  1171. monarch_timeout = USEC_PER_SEC;
  1172. /*
  1173. * There are also broken BIOSes on some Pentium M and
  1174. * earlier systems:
  1175. */
  1176. if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0)
  1177. mce_bootlog = 0;
  1178. }
  1179. if (monarch_timeout < 0)
  1180. monarch_timeout = 0;
  1181. if (mce_bootlog != 0)
  1182. mce_panic_timeout = 30;
  1183. return 0;
  1184. }
  1185. static void __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
  1186. {
  1187. if (c->x86 != 5)
  1188. return;
  1189. switch (c->x86_vendor) {
  1190. case X86_VENDOR_INTEL:
  1191. intel_p5_mcheck_init(c);
  1192. break;
  1193. case X86_VENDOR_CENTAUR:
  1194. winchip_mcheck_init(c);
  1195. break;
  1196. }
  1197. }
  1198. static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
  1199. {
  1200. switch (c->x86_vendor) {
  1201. case X86_VENDOR_INTEL:
  1202. mce_intel_feature_init(c);
  1203. break;
  1204. case X86_VENDOR_AMD:
  1205. mce_amd_feature_init(c);
  1206. break;
  1207. default:
  1208. break;
  1209. }
  1210. }
  1211. static void __mcheck_cpu_init_timer(void)
  1212. {
  1213. struct timer_list *t = &__get_cpu_var(mce_timer);
  1214. int *n = &__get_cpu_var(mce_next_interval);
  1215. setup_timer(t, mce_start_timer, smp_processor_id());
  1216. if (mce_ignore_ce)
  1217. return;
  1218. *n = check_interval * HZ;
  1219. if (!*n)
  1220. return;
  1221. t->expires = round_jiffies(jiffies + *n);
  1222. add_timer_on(t, smp_processor_id());
  1223. }
  1224. /* Handle unconfigured int18 (should never happen) */
  1225. static void unexpected_machine_check(struct pt_regs *regs, long error_code)
  1226. {
  1227. printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n",
  1228. smp_processor_id());
  1229. }
  1230. /* Call the installed machine check handler for this CPU setup. */
  1231. void (*machine_check_vector)(struct pt_regs *, long error_code) =
  1232. unexpected_machine_check;
  1233. /*
  1234. * Called for each booted CPU to set up machine checks.
  1235. * Must be called with preempt off:
  1236. */
  1237. void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
  1238. {
  1239. if (mce_disabled)
  1240. return;
  1241. __mcheck_cpu_ancient_init(c);
  1242. if (!mce_available(c))
  1243. return;
  1244. if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
  1245. mce_disabled = 1;
  1246. return;
  1247. }
  1248. machine_check_vector = do_machine_check;
  1249. __mcheck_cpu_init_generic();
  1250. __mcheck_cpu_init_vendor(c);
  1251. __mcheck_cpu_init_timer();
  1252. INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
  1253. }
  1254. /*
  1255. * Character device to read and clear the MCE log.
  1256. */
  1257. static DEFINE_SPINLOCK(mce_state_lock);
  1258. static int open_count; /* #times opened */
  1259. static int open_exclu; /* already open exclusive? */
  1260. static int mce_open(struct inode *inode, struct file *file)
  1261. {
  1262. spin_lock(&mce_state_lock);
  1263. if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
  1264. spin_unlock(&mce_state_lock);
  1265. return -EBUSY;
  1266. }
  1267. if (file->f_flags & O_EXCL)
  1268. open_exclu = 1;
  1269. open_count++;
  1270. spin_unlock(&mce_state_lock);
  1271. return nonseekable_open(inode, file);
  1272. }
  1273. static int mce_release(struct inode *inode, struct file *file)
  1274. {
  1275. spin_lock(&mce_state_lock);
  1276. open_count--;
  1277. open_exclu = 0;
  1278. spin_unlock(&mce_state_lock);
  1279. return 0;
  1280. }
  1281. static void collect_tscs(void *data)
  1282. {
  1283. unsigned long *cpu_tsc = (unsigned long *)data;
  1284. rdtscll(cpu_tsc[smp_processor_id()]);
  1285. }
  1286. static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
  1287. loff_t *off)
  1288. {
  1289. char __user *buf = ubuf;
  1290. unsigned long *cpu_tsc;
  1291. unsigned prev, next;
  1292. int i, err;
  1293. cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
  1294. if (!cpu_tsc)
  1295. return -ENOMEM;
  1296. mutex_lock(&mce_read_mutex);
  1297. next = rcu_dereference_check_mce(mcelog.next);
  1298. /* Only supports full reads right now */
  1299. if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
  1300. mutex_unlock(&mce_read_mutex);
  1301. kfree(cpu_tsc);
  1302. return -EINVAL;
  1303. }
  1304. err = 0;
  1305. prev = 0;
  1306. do {
  1307. for (i = prev; i < next; i++) {
  1308. unsigned long start = jiffies;
  1309. while (!mcelog.entry[i].finished) {
  1310. if (time_after_eq(jiffies, start + 2)) {
  1311. memset(mcelog.entry + i, 0,
  1312. sizeof(struct mce));
  1313. goto timeout;
  1314. }
  1315. cpu_relax();
  1316. }
  1317. smp_rmb();
  1318. err |= copy_to_user(buf, mcelog.entry + i,
  1319. sizeof(struct mce));
  1320. buf += sizeof(struct mce);
  1321. timeout:
  1322. ;
  1323. }
  1324. memset(mcelog.entry + prev, 0,
  1325. (next - prev) * sizeof(struct mce));
  1326. prev = next;
  1327. next = cmpxchg(&mcelog.next, prev, 0);
  1328. } while (next != prev);
  1329. synchronize_sched();
  1330. /*
  1331. * Collect entries that were still getting written before the
  1332. * synchronize.
  1333. */
  1334. on_each_cpu(collect_tscs, cpu_tsc, 1);
  1335. for (i = next; i < MCE_LOG_LEN; i++) {
  1336. if (mcelog.entry[i].finished &&
  1337. mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
  1338. err |= copy_to_user(buf, mcelog.entry+i,
  1339. sizeof(struct mce));
  1340. smp_rmb();
  1341. buf += sizeof(struct mce);
  1342. memset(&mcelog.entry[i], 0, sizeof(struct mce));
  1343. }
  1344. }
  1345. mutex_unlock(&mce_read_mutex);
  1346. kfree(cpu_tsc);
  1347. return err ? -EFAULT : buf - ubuf;
  1348. }
  1349. static unsigned int mce_poll(struct file *file, poll_table *wait)
  1350. {
  1351. poll_wait(file, &mce_wait, wait);
  1352. if (rcu_dereference_check_mce(mcelog.next))
  1353. return POLLIN | POLLRDNORM;
  1354. return 0;
  1355. }
  1356. static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
  1357. {
  1358. int __user *p = (int __user *)arg;
  1359. if (!capable(CAP_SYS_ADMIN))
  1360. return -EPERM;
  1361. switch (cmd) {
  1362. case MCE_GET_RECORD_LEN:
  1363. return put_user(sizeof(struct mce), p);
  1364. case MCE_GET_LOG_LEN:
  1365. return put_user(MCE_LOG_LEN, p);
  1366. case MCE_GETCLEAR_FLAGS: {
  1367. unsigned flags;
  1368. do {
  1369. flags = mcelog.flags;
  1370. } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
  1371. return put_user(flags, p);
  1372. }
  1373. default:
  1374. return -ENOTTY;
  1375. }
  1376. }
  1377. /* Modified in mce-inject.c, so not static or const */
  1378. struct file_operations mce_chrdev_ops = {
  1379. .open = mce_open,
  1380. .release = mce_release,
  1381. .read = mce_read,
  1382. .poll = mce_poll,
  1383. .unlocked_ioctl = mce_ioctl,
  1384. };
  1385. EXPORT_SYMBOL_GPL(mce_chrdev_ops);
  1386. static struct miscdevice mce_log_device = {
  1387. MISC_MCELOG_MINOR,
  1388. "mcelog",
  1389. &mce_chrdev_ops,
  1390. };
  1391. /*
  1392. * mce=off Disables machine check
  1393. * mce=no_cmci Disables CMCI
  1394. * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
  1395. * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
  1396. * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
  1397. * monarchtimeout is how long to wait for other CPUs on machine
  1398. * check, or 0 to not wait
  1399. * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
  1400. * mce=nobootlog Don't log MCEs from before booting.
  1401. */
  1402. static int __init mcheck_enable(char *str)
  1403. {
  1404. if (*str == 0) {
  1405. enable_p5_mce();
  1406. return 1;
  1407. }
  1408. if (*str == '=')
  1409. str++;
  1410. if (!strcmp(str, "off"))
  1411. mce_disabled = 1;
  1412. else if (!strcmp(str, "no_cmci"))
  1413. mce_cmci_disabled = 1;
  1414. else if (!strcmp(str, "dont_log_ce"))
  1415. mce_dont_log_ce = 1;
  1416. else if (!strcmp(str, "ignore_ce"))
  1417. mce_ignore_ce = 1;
  1418. else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
  1419. mce_bootlog = (str[0] == 'b');
  1420. else if (isdigit(str[0])) {
  1421. get_option(&str, &tolerant);
  1422. if (*str == ',') {
  1423. ++str;
  1424. get_option(&str, &monarch_timeout);
  1425. }
  1426. } else {
  1427. printk(KERN_INFO "mce argument %s ignored. Please use /sys\n",
  1428. str);
  1429. return 0;
  1430. }
  1431. return 1;
  1432. }
  1433. __setup("mce", mcheck_enable);
  1434. int __init mcheck_init(void)
  1435. {
  1436. atomic_notifier_chain_register(&x86_mce_decoder_chain, &mce_dec_nb);
  1437. mcheck_intel_therm_init();
  1438. return 0;
  1439. }
  1440. /*
  1441. * Sysfs support
  1442. */
  1443. /*
  1444. * Disable machine checks on suspend and shutdown. We can't really handle
  1445. * them later.
  1446. */
  1447. static int mce_disable_error_reporting(void)
  1448. {
  1449. int i;
  1450. for (i = 0; i < banks; i++) {
  1451. struct mce_bank *b = &mce_banks[i];
  1452. if (b->init)
  1453. wrmsrl(MSR_IA32_MCx_CTL(i), 0);
  1454. }
  1455. return 0;
  1456. }
  1457. static int mce_suspend(struct sys_device *dev, pm_message_t state)
  1458. {
  1459. return mce_disable_error_reporting();
  1460. }
  1461. static int mce_shutdown(struct sys_device *dev)
  1462. {
  1463. return mce_disable_error_reporting();
  1464. }
  1465. /*
  1466. * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
  1467. * Only one CPU is active at this time, the others get re-added later using
  1468. * CPU hotplug:
  1469. */
  1470. static int mce_resume(struct sys_device *dev)
  1471. {
  1472. __mcheck_cpu_init_generic();
  1473. __mcheck_cpu_init_vendor(&current_cpu_data);
  1474. return 0;
  1475. }
  1476. static void mce_cpu_restart(void *data)
  1477. {
  1478. del_timer_sync(&__get_cpu_var(mce_timer));
  1479. if (!mce_available(&current_cpu_data))
  1480. return;
  1481. __mcheck_cpu_init_generic();
  1482. __mcheck_cpu_init_timer();
  1483. }
  1484. /* Reinit MCEs after user configuration changes */
  1485. static void mce_restart(void)
  1486. {
  1487. on_each_cpu(mce_cpu_restart, NULL, 1);
  1488. }
  1489. /* Toggle features for corrected errors */
  1490. static void mce_disable_ce(void *all)
  1491. {
  1492. if (!mce_available(&current_cpu_data))
  1493. return;
  1494. if (all)
  1495. del_timer_sync(&__get_cpu_var(mce_timer));
  1496. cmci_clear();
  1497. }
  1498. static void mce_enable_ce(void *all)
  1499. {
  1500. if (!mce_available(&current_cpu_data))
  1501. return;
  1502. cmci_reenable();
  1503. cmci_recheck();
  1504. if (all)
  1505. __mcheck_cpu_init_timer();
  1506. }
  1507. static struct sysdev_class mce_sysclass = {
  1508. .suspend = mce_suspend,
  1509. .shutdown = mce_shutdown,
  1510. .resume = mce_resume,
  1511. .name = "machinecheck",
  1512. };
  1513. DEFINE_PER_CPU(struct sys_device, mce_dev);
  1514. __cpuinitdata
  1515. void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
  1516. static inline struct mce_bank *attr_to_bank(struct sysdev_attribute *attr)
  1517. {
  1518. return container_of(attr, struct mce_bank, attr);
  1519. }
  1520. static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr,
  1521. char *buf)
  1522. {
  1523. return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
  1524. }
  1525. static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr,
  1526. const char *buf, size_t size)
  1527. {
  1528. u64 new;
  1529. if (strict_strtoull(buf, 0, &new) < 0)
  1530. return -EINVAL;
  1531. attr_to_bank(attr)->ctl = new;
  1532. mce_restart();
  1533. return size;
  1534. }
  1535. static ssize_t
  1536. show_trigger(struct sys_device *s, struct sysdev_attribute *attr, char *buf)
  1537. {
  1538. strcpy(buf, mce_helper);
  1539. strcat(buf, "\n");
  1540. return strlen(mce_helper) + 1;
  1541. }
  1542. static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
  1543. const char *buf, size_t siz)
  1544. {
  1545. char *p;
  1546. strncpy(mce_helper, buf, sizeof(mce_helper));
  1547. mce_helper[sizeof(mce_helper)-1] = 0;
  1548. p = strchr(mce_helper, '\n');
  1549. if (p)
  1550. *p = 0;
  1551. return strlen(mce_helper) + !!p;
  1552. }
  1553. static ssize_t set_ignore_ce(struct sys_device *s,
  1554. struct sysdev_attribute *attr,
  1555. const char *buf, size_t size)
  1556. {
  1557. u64 new;
  1558. if (strict_strtoull(buf, 0, &new) < 0)
  1559. return -EINVAL;
  1560. if (mce_ignore_ce ^ !!new) {
  1561. if (new) {
  1562. /* disable ce features */
  1563. on_each_cpu(mce_disable_ce, (void *)1, 1);
  1564. mce_ignore_ce = 1;
  1565. } else {
  1566. /* enable ce features */
  1567. mce_ignore_ce = 0;
  1568. on_each_cpu(mce_enable_ce, (void *)1, 1);
  1569. }
  1570. }
  1571. return size;
  1572. }
  1573. static ssize_t set_cmci_disabled(struct sys_device *s,
  1574. struct sysdev_attribute *attr,
  1575. const char *buf, size_t size)
  1576. {
  1577. u64 new;
  1578. if (strict_strtoull(buf, 0, &new) < 0)
  1579. return -EINVAL;
  1580. if (mce_cmci_disabled ^ !!new) {
  1581. if (new) {
  1582. /* disable cmci */
  1583. on_each_cpu(mce_disable_ce, NULL, 1);
  1584. mce_cmci_disabled = 1;
  1585. } else {
  1586. /* enable cmci */
  1587. mce_cmci_disabled = 0;
  1588. on_each_cpu(mce_enable_ce, NULL, 1);
  1589. }
  1590. }
  1591. return size;
  1592. }
  1593. static ssize_t store_int_with_restart(struct sys_device *s,
  1594. struct sysdev_attribute *attr,
  1595. const char *buf, size_t size)
  1596. {
  1597. ssize_t ret = sysdev_store_int(s, attr, buf, size);
  1598. mce_restart();
  1599. return ret;
  1600. }
  1601. static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
  1602. static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
  1603. static SYSDEV_INT_ATTR(monarch_timeout, 0644, monarch_timeout);
  1604. static SYSDEV_INT_ATTR(dont_log_ce, 0644, mce_dont_log_ce);
  1605. static struct sysdev_ext_attribute attr_check_interval = {
  1606. _SYSDEV_ATTR(check_interval, 0644, sysdev_show_int,
  1607. store_int_with_restart),
  1608. &check_interval
  1609. };
  1610. static struct sysdev_ext_attribute attr_ignore_ce = {
  1611. _SYSDEV_ATTR(ignore_ce, 0644, sysdev_show_int, set_ignore_ce),
  1612. &mce_ignore_ce
  1613. };
  1614. static struct sysdev_ext_attribute attr_cmci_disabled = {
  1615. _SYSDEV_ATTR(cmci_disabled, 0644, sysdev_show_int, set_cmci_disabled),
  1616. &mce_cmci_disabled
  1617. };
  1618. static struct sysdev_attribute *mce_attrs[] = {
  1619. &attr_tolerant.attr,
  1620. &attr_check_interval.attr,
  1621. &attr_trigger,
  1622. &attr_monarch_timeout.attr,
  1623. &attr_dont_log_ce.attr,
  1624. &attr_ignore_ce.attr,
  1625. &attr_cmci_disabled.attr,
  1626. NULL
  1627. };
  1628. static cpumask_var_t mce_dev_initialized;
  1629. /* Per cpu sysdev init. All of the cpus still share the same ctrl bank: */
  1630. static __cpuinit int mce_create_device(unsigned int cpu)
  1631. {
  1632. int err;
  1633. int i, j;
  1634. if (!mce_available(&boot_cpu_data))
  1635. return -EIO;
  1636. memset(&per_cpu(mce_dev, cpu).kobj, 0, sizeof(struct kobject));
  1637. per_cpu(mce_dev, cpu).id = cpu;
  1638. per_cpu(mce_dev, cpu).cls = &mce_sysclass;
  1639. err = sysdev_register(&per_cpu(mce_dev, cpu));
  1640. if (err)
  1641. return err;
  1642. for (i = 0; mce_attrs[i]; i++) {
  1643. err = sysdev_create_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
  1644. if (err)
  1645. goto error;
  1646. }
  1647. for (j = 0; j < banks; j++) {
  1648. err = sysdev_create_file(&per_cpu(mce_dev, cpu),
  1649. &mce_banks[j].attr);
  1650. if (err)
  1651. goto error2;
  1652. }
  1653. cpumask_set_cpu(cpu, mce_dev_initialized);
  1654. return 0;
  1655. error2:
  1656. while (--j >= 0)
  1657. sysdev_remove_file(&per_cpu(mce_dev, cpu), &mce_banks[j].attr);
  1658. error:
  1659. while (--i >= 0)
  1660. sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
  1661. sysdev_unregister(&per_cpu(mce_dev, cpu));
  1662. return err;
  1663. }
  1664. static __cpuinit void mce_remove_device(unsigned int cpu)
  1665. {
  1666. int i;
  1667. if (!cpumask_test_cpu(cpu, mce_dev_initialized))
  1668. return;
  1669. for (i = 0; mce_attrs[i]; i++)
  1670. sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
  1671. for (i = 0; i < banks; i++)
  1672. sysdev_remove_file(&per_cpu(mce_dev, cpu), &mce_banks[i].attr);
  1673. sysdev_unregister(&per_cpu(mce_dev, cpu));
  1674. cpumask_clear_cpu(cpu, mce_dev_initialized);
  1675. }
  1676. /* Make sure there are no machine checks on offlined CPUs. */
  1677. static void __cpuinit mce_disable_cpu(void *h)
  1678. {
  1679. unsigned long action = *(unsigned long *)h;
  1680. int i;
  1681. if (!mce_available(&current_cpu_data))
  1682. return;
  1683. if (!(action & CPU_TASKS_FROZEN))
  1684. cmci_clear();
  1685. for (i = 0; i < banks; i++) {
  1686. struct mce_bank *b = &mce_banks[i];
  1687. if (b->init)
  1688. wrmsrl(MSR_IA32_MCx_CTL(i), 0);
  1689. }
  1690. }
  1691. static void __cpuinit mce_reenable_cpu(void *h)
  1692. {
  1693. unsigned long action = *(unsigned long *)h;
  1694. int i;
  1695. if (!mce_available(&current_cpu_data))
  1696. return;
  1697. if (!(action & CPU_TASKS_FROZEN))
  1698. cmci_reenable();
  1699. for (i = 0; i < banks; i++) {
  1700. struct mce_bank *b = &mce_banks[i];
  1701. if (b->init)
  1702. wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
  1703. }
  1704. }
  1705. /* Get notified when a cpu comes on/off. Be hotplug friendly. */
  1706. static int __cpuinit
  1707. mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
  1708. {
  1709. unsigned int cpu = (unsigned long)hcpu;
  1710. struct timer_list *t = &per_cpu(mce_timer, cpu);
  1711. switch (action) {
  1712. case CPU_ONLINE:
  1713. case CPU_ONLINE_FROZEN:
  1714. mce_create_device(cpu);
  1715. if (threshold_cpu_callback)
  1716. threshold_cpu_callback(action, cpu);
  1717. break;
  1718. case CPU_DEAD:
  1719. case CPU_DEAD_FROZEN:
  1720. if (threshold_cpu_callback)
  1721. threshold_cpu_callback(action, cpu);
  1722. mce_remove_device(cpu);
  1723. break;
  1724. case CPU_DOWN_PREPARE:
  1725. case CPU_DOWN_PREPARE_FROZEN:
  1726. del_timer_sync(t);
  1727. smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
  1728. break;
  1729. case CPU_DOWN_FAILED:
  1730. case CPU_DOWN_FAILED_FROZEN:
  1731. if (!mce_ignore_ce && check_interval) {
  1732. t->expires = round_jiffies(jiffies +
  1733. __get_cpu_var(mce_next_interval));
  1734. add_timer_on(t, cpu);
  1735. }
  1736. smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
  1737. break;
  1738. case CPU_POST_DEAD:
  1739. /* intentionally ignoring frozen here */
  1740. cmci_rediscover(cpu);
  1741. break;
  1742. }
  1743. return NOTIFY_OK;
  1744. }
  1745. static struct notifier_block mce_cpu_notifier __cpuinitdata = {
  1746. .notifier_call = mce_cpu_callback,
  1747. };
  1748. static __init void mce_init_banks(void)
  1749. {
  1750. int i;
  1751. for (i = 0; i < banks; i++) {
  1752. struct mce_bank *b = &mce_banks[i];
  1753. struct sysdev_attribute *a = &b->attr;
  1754. sysfs_attr_init(&a->attr);
  1755. a->attr.name = b->attrname;
  1756. snprintf(b->attrname, ATTR_LEN, "bank%d", i);
  1757. a->attr.mode = 0644;
  1758. a->show = show_bank;
  1759. a->store = set_bank;
  1760. }
  1761. }
  1762. static __init int mcheck_init_device(void)
  1763. {
  1764. int err;
  1765. int i = 0;
  1766. if (!mce_available(&boot_cpu_data))
  1767. return -EIO;
  1768. zalloc_cpumask_var(&mce_dev_initialized, GFP_KERNEL);
  1769. mce_init_banks();
  1770. err = sysdev_class_register(&mce_sysclass);
  1771. if (err)
  1772. return err;
  1773. for_each_online_cpu(i) {
  1774. err = mce_create_device(i);
  1775. if (err)
  1776. return err;
  1777. }
  1778. register_hotcpu_notifier(&mce_cpu_notifier);
  1779. misc_register(&mce_log_device);
  1780. return err;
  1781. }
  1782. device_initcall(mcheck_init_device);
  1783. /*
  1784. * Old style boot options parsing. Only for compatibility.
  1785. */
  1786. static int __init mcheck_disable(char *str)
  1787. {
  1788. mce_disabled = 1;
  1789. return 1;
  1790. }
  1791. __setup("nomce", mcheck_disable);
  1792. #ifdef CONFIG_DEBUG_FS
  1793. struct dentry *mce_get_debugfs_dir(void)
  1794. {
  1795. static struct dentry *dmce;
  1796. if (!dmce)
  1797. dmce = debugfs_create_dir("mce", NULL);
  1798. return dmce;
  1799. }
  1800. static void mce_reset(void)
  1801. {
  1802. cpu_missing = 0;
  1803. atomic_set(&mce_fake_paniced, 0);
  1804. atomic_set(&mce_executing, 0);
  1805. atomic_set(&mce_callin, 0);
  1806. atomic_set(&global_nwo, 0);
  1807. }
  1808. static int fake_panic_get(void *data, u64 *val)
  1809. {
  1810. *val = fake_panic;
  1811. return 0;
  1812. }
  1813. static int fake_panic_set(void *data, u64 val)
  1814. {
  1815. mce_reset();
  1816. fake_panic = val;
  1817. return 0;
  1818. }
  1819. DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
  1820. fake_panic_set, "%llu\n");
  1821. static int __init mcheck_debugfs_init(void)
  1822. {
  1823. struct dentry *dmce, *ffake_panic;
  1824. dmce = mce_get_debugfs_dir();
  1825. if (!dmce)
  1826. return -ENOMEM;
  1827. ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
  1828. &fake_panic_fops);
  1829. if (!ffake_panic)
  1830. return -ENOMEM;
  1831. return 0;
  1832. }
  1833. late_initcall(mcheck_debugfs_init);
  1834. #endif