mce.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179
  1. /*
  2. * Machine check handler.
  3. *
  4. * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
  5. * Rest from unknown author(s).
  6. * 2004 Andi Kleen. Rewrote most of it.
  7. * Copyright 2008 Intel Corporation
  8. * Author: Andi Kleen
  9. */
  10. #include <linux/thread_info.h>
  11. #include <linux/capability.h>
  12. #include <linux/miscdevice.h>
  13. #include <linux/ratelimit.h>
  14. #include <linux/kallsyms.h>
  15. #include <linux/rcupdate.h>
  16. #include <linux/kobject.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/kdebug.h>
  19. #include <linux/kernel.h>
  20. #include <linux/percpu.h>
  21. #include <linux/string.h>
  22. #include <linux/sysdev.h>
  23. #include <linux/syscore_ops.h>
  24. #include <linux/delay.h>
  25. #include <linux/ctype.h>
  26. #include <linux/sched.h>
  27. #include <linux/sysfs.h>
  28. #include <linux/types.h>
  29. #include <linux/slab.h>
  30. #include <linux/init.h>
  31. #include <linux/kmod.h>
  32. #include <linux/poll.h>
  33. #include <linux/nmi.h>
  34. #include <linux/cpu.h>
  35. #include <linux/smp.h>
  36. #include <linux/fs.h>
  37. #include <linux/mm.h>
  38. #include <linux/debugfs.h>
  39. #include <linux/edac_mce.h>
  40. #include <linux/irq_work.h>
  41. #include <asm/processor.h>
  42. #include <asm/mce.h>
  43. #include <asm/msr.h>
  44. #include "mce-internal.h"
  45. static DEFINE_MUTEX(mce_read_mutex);
  46. #define rcu_dereference_check_mce(p) \
  47. rcu_dereference_index_check((p), \
  48. rcu_read_lock_sched_held() || \
  49. lockdep_is_held(&mce_read_mutex))
  50. #define CREATE_TRACE_POINTS
  51. #include <trace/events/mce.h>
  52. int mce_disabled __read_mostly;
  53. #define MISC_MCELOG_MINOR 227
  54. #define SPINUNIT 100 /* 100ns */
  55. atomic_t mce_entry;
  56. DEFINE_PER_CPU(unsigned, mce_exception_count);
  57. /*
  58. * Tolerant levels:
  59. * 0: always panic on uncorrected errors, log corrected errors
  60. * 1: panic or SIGBUS on uncorrected errors, log corrected errors
  61. * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
  62. * 3: never panic or SIGBUS, log all errors (for testing only)
  63. */
  64. static int tolerant __read_mostly = 1;
  65. static int banks __read_mostly;
  66. static int rip_msr __read_mostly;
  67. static int mce_bootlog __read_mostly = -1;
  68. static int monarch_timeout __read_mostly = -1;
  69. static int mce_panic_timeout __read_mostly;
  70. static int mce_dont_log_ce __read_mostly;
  71. int mce_cmci_disabled __read_mostly;
  72. int mce_ignore_ce __read_mostly;
  73. int mce_ser __read_mostly;
  74. struct mce_bank *mce_banks __read_mostly;
  75. /* User mode helper program triggered by machine check event */
  76. static unsigned long mce_need_notify;
  77. static char mce_helper[128];
  78. static char *mce_helper_argv[2] = { mce_helper, NULL };
  79. static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
  80. static DEFINE_PER_CPU(struct mce, mces_seen);
  81. static int cpu_missing;
  82. /*
  83. * CPU/chipset specific EDAC code can register a notifier call here to print
  84. * MCE errors in a human-readable form.
  85. */
  86. ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
  87. EXPORT_SYMBOL_GPL(x86_mce_decoder_chain);
  88. /* MCA banks polled by the period polling timer for corrected events */
  89. DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
  90. [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
  91. };
  92. static DEFINE_PER_CPU(struct work_struct, mce_work);
  93. /* Do initial initialization of a struct mce */
  94. void mce_setup(struct mce *m)
  95. {
  96. memset(m, 0, sizeof(struct mce));
  97. m->cpu = m->extcpu = smp_processor_id();
  98. rdtscll(m->tsc);
  99. /* We hope get_seconds stays lockless */
  100. m->time = get_seconds();
  101. m->cpuvendor = boot_cpu_data.x86_vendor;
  102. m->cpuid = cpuid_eax(1);
  103. #ifdef CONFIG_SMP
  104. m->socketid = cpu_data(m->extcpu).phys_proc_id;
  105. #endif
  106. m->apicid = cpu_data(m->extcpu).initial_apicid;
  107. rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
  108. }
  109. DEFINE_PER_CPU(struct mce, injectm);
  110. EXPORT_PER_CPU_SYMBOL_GPL(injectm);
  111. /*
  112. * Lockless MCE logging infrastructure.
  113. * This avoids deadlocks on printk locks without having to break locks. Also
  114. * separate MCEs from kernel messages to avoid bogus bug reports.
  115. */
  116. static struct mce_log mcelog = {
  117. .signature = MCE_LOG_SIGNATURE,
  118. .len = MCE_LOG_LEN,
  119. .recordlen = sizeof(struct mce),
  120. };
  121. void mce_log(struct mce *mce)
  122. {
  123. unsigned next, entry;
  124. /* Emit the trace record: */
  125. trace_mce_record(mce);
  126. mce->finished = 0;
  127. wmb();
  128. for (;;) {
  129. entry = rcu_dereference_check_mce(mcelog.next);
  130. for (;;) {
  131. /*
  132. * If edac_mce is enabled, it will check the error type
  133. * and will process it, if it is a known error.
  134. * Otherwise, the error will be sent through mcelog
  135. * interface
  136. */
  137. if (edac_mce_parse(mce))
  138. return;
  139. /*
  140. * When the buffer fills up discard new entries.
  141. * Assume that the earlier errors are the more
  142. * interesting ones:
  143. */
  144. if (entry >= MCE_LOG_LEN) {
  145. set_bit(MCE_OVERFLOW,
  146. (unsigned long *)&mcelog.flags);
  147. return;
  148. }
  149. /* Old left over entry. Skip: */
  150. if (mcelog.entry[entry].finished) {
  151. entry++;
  152. continue;
  153. }
  154. break;
  155. }
  156. smp_rmb();
  157. next = entry + 1;
  158. if (cmpxchg(&mcelog.next, entry, next) == entry)
  159. break;
  160. }
  161. memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
  162. wmb();
  163. mcelog.entry[entry].finished = 1;
  164. wmb();
  165. mce->finished = 1;
  166. set_bit(0, &mce_need_notify);
  167. }
  168. static void print_mce(struct mce *m)
  169. {
  170. int ret = 0;
  171. pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
  172. m->extcpu, m->mcgstatus, m->bank, m->status);
  173. if (m->ip) {
  174. pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
  175. !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
  176. m->cs, m->ip);
  177. if (m->cs == __KERNEL_CS)
  178. print_symbol("{%s}", m->ip);
  179. pr_cont("\n");
  180. }
  181. pr_emerg(HW_ERR "TSC %llx ", m->tsc);
  182. if (m->addr)
  183. pr_cont("ADDR %llx ", m->addr);
  184. if (m->misc)
  185. pr_cont("MISC %llx ", m->misc);
  186. pr_cont("\n");
  187. pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
  188. m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid);
  189. /*
  190. * Print out human-readable details about the MCE error,
  191. * (if the CPU has an implementation for that)
  192. */
  193. ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
  194. if (ret == NOTIFY_STOP)
  195. return;
  196. pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
  197. }
  198. #define PANIC_TIMEOUT 5 /* 5 seconds */
  199. static atomic_t mce_paniced;
  200. static int fake_panic;
  201. static atomic_t mce_fake_paniced;
  202. /* Panic in progress. Enable interrupts and wait for final IPI */
  203. static void wait_for_panic(void)
  204. {
  205. long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
  206. preempt_disable();
  207. local_irq_enable();
  208. while (timeout-- > 0)
  209. udelay(1);
  210. if (panic_timeout == 0)
  211. panic_timeout = mce_panic_timeout;
  212. panic("Panicing machine check CPU died");
  213. }
  214. static void mce_panic(char *msg, struct mce *final, char *exp)
  215. {
  216. int i, apei_err = 0;
  217. if (!fake_panic) {
  218. /*
  219. * Make sure only one CPU runs in machine check panic
  220. */
  221. if (atomic_inc_return(&mce_paniced) > 1)
  222. wait_for_panic();
  223. barrier();
  224. bust_spinlocks(1);
  225. console_verbose();
  226. } else {
  227. /* Don't log too much for fake panic */
  228. if (atomic_inc_return(&mce_fake_paniced) > 1)
  229. return;
  230. }
  231. /* First print corrected ones that are still unlogged */
  232. for (i = 0; i < MCE_LOG_LEN; i++) {
  233. struct mce *m = &mcelog.entry[i];
  234. if (!(m->status & MCI_STATUS_VAL))
  235. continue;
  236. if (!(m->status & MCI_STATUS_UC)) {
  237. print_mce(m);
  238. if (!apei_err)
  239. apei_err = apei_write_mce(m);
  240. }
  241. }
  242. /* Now print uncorrected but with the final one last */
  243. for (i = 0; i < MCE_LOG_LEN; i++) {
  244. struct mce *m = &mcelog.entry[i];
  245. if (!(m->status & MCI_STATUS_VAL))
  246. continue;
  247. if (!(m->status & MCI_STATUS_UC))
  248. continue;
  249. if (!final || memcmp(m, final, sizeof(struct mce))) {
  250. print_mce(m);
  251. if (!apei_err)
  252. apei_err = apei_write_mce(m);
  253. }
  254. }
  255. if (final) {
  256. print_mce(final);
  257. if (!apei_err)
  258. apei_err = apei_write_mce(final);
  259. }
  260. if (cpu_missing)
  261. pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
  262. if (exp)
  263. pr_emerg(HW_ERR "Machine check: %s\n", exp);
  264. if (!fake_panic) {
  265. if (panic_timeout == 0)
  266. panic_timeout = mce_panic_timeout;
  267. panic(msg);
  268. } else
  269. pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
  270. }
  271. /* Support code for software error injection */
  272. static int msr_to_offset(u32 msr)
  273. {
  274. unsigned bank = __this_cpu_read(injectm.bank);
  275. if (msr == rip_msr)
  276. return offsetof(struct mce, ip);
  277. if (msr == MSR_IA32_MCx_STATUS(bank))
  278. return offsetof(struct mce, status);
  279. if (msr == MSR_IA32_MCx_ADDR(bank))
  280. return offsetof(struct mce, addr);
  281. if (msr == MSR_IA32_MCx_MISC(bank))
  282. return offsetof(struct mce, misc);
  283. if (msr == MSR_IA32_MCG_STATUS)
  284. return offsetof(struct mce, mcgstatus);
  285. return -1;
  286. }
  287. /* MSR access wrappers used for error injection */
  288. static u64 mce_rdmsrl(u32 msr)
  289. {
  290. u64 v;
  291. if (__this_cpu_read(injectm.finished)) {
  292. int offset = msr_to_offset(msr);
  293. if (offset < 0)
  294. return 0;
  295. return *(u64 *)((char *)&__get_cpu_var(injectm) + offset);
  296. }
  297. if (rdmsrl_safe(msr, &v)) {
  298. WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
  299. /*
  300. * Return zero in case the access faulted. This should
  301. * not happen normally but can happen if the CPU does
  302. * something weird, or if the code is buggy.
  303. */
  304. v = 0;
  305. }
  306. return v;
  307. }
  308. static void mce_wrmsrl(u32 msr, u64 v)
  309. {
  310. if (__this_cpu_read(injectm.finished)) {
  311. int offset = msr_to_offset(msr);
  312. if (offset >= 0)
  313. *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v;
  314. return;
  315. }
  316. wrmsrl(msr, v);
  317. }
  318. /*
  319. * Collect all global (w.r.t. this processor) status about this machine
  320. * check into our "mce" struct so that we can use it later to assess
  321. * the severity of the problem as we read per-bank specific details.
  322. */
  323. static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
  324. {
  325. mce_setup(m);
  326. m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
  327. if (regs) {
  328. /*
  329. * Get the address of the instruction at the time of
  330. * the machine check error.
  331. */
  332. if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
  333. m->ip = regs->ip;
  334. m->cs = regs->cs;
  335. }
  336. /* Use accurate RIP reporting if available. */
  337. if (rip_msr)
  338. m->ip = mce_rdmsrl(rip_msr);
  339. }
  340. }
  341. /*
  342. * Simple lockless ring to communicate PFNs from the exception handler with the
  343. * process context work function. This is vastly simplified because there's
  344. * only a single reader and a single writer.
  345. */
  346. #define MCE_RING_SIZE 16 /* we use one entry less */
  347. struct mce_ring {
  348. unsigned short start;
  349. unsigned short end;
  350. unsigned long ring[MCE_RING_SIZE];
  351. };
  352. static DEFINE_PER_CPU(struct mce_ring, mce_ring);
  353. /* Runs with CPU affinity in workqueue */
  354. static int mce_ring_empty(void)
  355. {
  356. struct mce_ring *r = &__get_cpu_var(mce_ring);
  357. return r->start == r->end;
  358. }
  359. static int mce_ring_get(unsigned long *pfn)
  360. {
  361. struct mce_ring *r;
  362. int ret = 0;
  363. *pfn = 0;
  364. get_cpu();
  365. r = &__get_cpu_var(mce_ring);
  366. if (r->start == r->end)
  367. goto out;
  368. *pfn = r->ring[r->start];
  369. r->start = (r->start + 1) % MCE_RING_SIZE;
  370. ret = 1;
  371. out:
  372. put_cpu();
  373. return ret;
  374. }
  375. /* Always runs in MCE context with preempt off */
  376. static int mce_ring_add(unsigned long pfn)
  377. {
  378. struct mce_ring *r = &__get_cpu_var(mce_ring);
  379. unsigned next;
  380. next = (r->end + 1) % MCE_RING_SIZE;
  381. if (next == r->start)
  382. return -1;
  383. r->ring[r->end] = pfn;
  384. wmb();
  385. r->end = next;
  386. return 0;
  387. }
  388. int mce_available(struct cpuinfo_x86 *c)
  389. {
  390. if (mce_disabled)
  391. return 0;
  392. return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
  393. }
  394. static void mce_schedule_work(void)
  395. {
  396. if (!mce_ring_empty()) {
  397. struct work_struct *work = &__get_cpu_var(mce_work);
  398. if (!work_pending(work))
  399. schedule_work(work);
  400. }
  401. }
  402. DEFINE_PER_CPU(struct irq_work, mce_irq_work);
  403. static void mce_irq_work_cb(struct irq_work *entry)
  404. {
  405. mce_notify_irq();
  406. mce_schedule_work();
  407. }
  408. static void mce_report_event(struct pt_regs *regs)
  409. {
  410. if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
  411. mce_notify_irq();
  412. /*
  413. * Triggering the work queue here is just an insurance
  414. * policy in case the syscall exit notify handler
  415. * doesn't run soon enough or ends up running on the
  416. * wrong CPU (can happen when audit sleeps)
  417. */
  418. mce_schedule_work();
  419. return;
  420. }
  421. irq_work_queue(&__get_cpu_var(mce_irq_work));
  422. }
  423. DEFINE_PER_CPU(unsigned, mce_poll_count);
  424. /*
  425. * Poll for corrected events or events that happened before reset.
  426. * Those are just logged through /dev/mcelog.
  427. *
  428. * This is executed in standard interrupt context.
  429. *
  430. * Note: spec recommends to panic for fatal unsignalled
  431. * errors here. However this would be quite problematic --
  432. * we would need to reimplement the Monarch handling and
  433. * it would mess up the exclusion between exception handler
  434. * and poll hander -- * so we skip this for now.
  435. * These cases should not happen anyways, or only when the CPU
  436. * is already totally * confused. In this case it's likely it will
  437. * not fully execute the machine check handler either.
  438. */
  439. void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
  440. {
  441. struct mce m;
  442. int i;
  443. percpu_inc(mce_poll_count);
  444. mce_gather_info(&m, NULL);
  445. for (i = 0; i < banks; i++) {
  446. if (!mce_banks[i].ctl || !test_bit(i, *b))
  447. continue;
  448. m.misc = 0;
  449. m.addr = 0;
  450. m.bank = i;
  451. m.tsc = 0;
  452. barrier();
  453. m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
  454. if (!(m.status & MCI_STATUS_VAL))
  455. continue;
  456. /*
  457. * Uncorrected or signalled events are handled by the exception
  458. * handler when it is enabled, so don't process those here.
  459. *
  460. * TBD do the same check for MCI_STATUS_EN here?
  461. */
  462. if (!(flags & MCP_UC) &&
  463. (m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)))
  464. continue;
  465. if (m.status & MCI_STATUS_MISCV)
  466. m.misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
  467. if (m.status & MCI_STATUS_ADDRV)
  468. m.addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
  469. if (!(flags & MCP_TIMESTAMP))
  470. m.tsc = 0;
  471. /*
  472. * Don't get the IP here because it's unlikely to
  473. * have anything to do with the actual error location.
  474. */
  475. if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce) {
  476. mce_log(&m);
  477. atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, &m);
  478. }
  479. /*
  480. * Clear state for this bank.
  481. */
  482. mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
  483. }
  484. /*
  485. * Don't clear MCG_STATUS here because it's only defined for
  486. * exceptions.
  487. */
  488. sync_core();
  489. }
  490. EXPORT_SYMBOL_GPL(machine_check_poll);
  491. /*
  492. * Do a quick check if any of the events requires a panic.
  493. * This decides if we keep the events around or clear them.
  494. */
  495. static int mce_no_way_out(struct mce *m, char **msg)
  496. {
  497. int i;
  498. for (i = 0; i < banks; i++) {
  499. m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
  500. if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
  501. return 1;
  502. }
  503. return 0;
  504. }
  505. /*
  506. * Variable to establish order between CPUs while scanning.
  507. * Each CPU spins initially until executing is equal its number.
  508. */
  509. static atomic_t mce_executing;
  510. /*
  511. * Defines order of CPUs on entry. First CPU becomes Monarch.
  512. */
  513. static atomic_t mce_callin;
  514. /*
  515. * Check if a timeout waiting for other CPUs happened.
  516. */
  517. static int mce_timed_out(u64 *t)
  518. {
  519. /*
  520. * The others already did panic for some reason.
  521. * Bail out like in a timeout.
  522. * rmb() to tell the compiler that system_state
  523. * might have been modified by someone else.
  524. */
  525. rmb();
  526. if (atomic_read(&mce_paniced))
  527. wait_for_panic();
  528. if (!monarch_timeout)
  529. goto out;
  530. if ((s64)*t < SPINUNIT) {
  531. /* CHECKME: Make panic default for 1 too? */
  532. if (tolerant < 1)
  533. mce_panic("Timeout synchronizing machine check over CPUs",
  534. NULL, NULL);
  535. cpu_missing = 1;
  536. return 1;
  537. }
  538. *t -= SPINUNIT;
  539. out:
  540. touch_nmi_watchdog();
  541. return 0;
  542. }
  543. /*
  544. * The Monarch's reign. The Monarch is the CPU who entered
  545. * the machine check handler first. It waits for the others to
  546. * raise the exception too and then grades them. When any
  547. * error is fatal panic. Only then let the others continue.
  548. *
  549. * The other CPUs entering the MCE handler will be controlled by the
  550. * Monarch. They are called Subjects.
  551. *
  552. * This way we prevent any potential data corruption in a unrecoverable case
  553. * and also makes sure always all CPU's errors are examined.
  554. *
  555. * Also this detects the case of a machine check event coming from outer
  556. * space (not detected by any CPUs) In this case some external agent wants
  557. * us to shut down, so panic too.
  558. *
  559. * The other CPUs might still decide to panic if the handler happens
  560. * in a unrecoverable place, but in this case the system is in a semi-stable
  561. * state and won't corrupt anything by itself. It's ok to let the others
  562. * continue for a bit first.
  563. *
  564. * All the spin loops have timeouts; when a timeout happens a CPU
  565. * typically elects itself to be Monarch.
  566. */
  567. static void mce_reign(void)
  568. {
  569. int cpu;
  570. struct mce *m = NULL;
  571. int global_worst = 0;
  572. char *msg = NULL;
  573. char *nmsg = NULL;
  574. /*
  575. * This CPU is the Monarch and the other CPUs have run
  576. * through their handlers.
  577. * Grade the severity of the errors of all the CPUs.
  578. */
  579. for_each_possible_cpu(cpu) {
  580. int severity = mce_severity(&per_cpu(mces_seen, cpu), tolerant,
  581. &nmsg);
  582. if (severity > global_worst) {
  583. msg = nmsg;
  584. global_worst = severity;
  585. m = &per_cpu(mces_seen, cpu);
  586. }
  587. }
  588. /*
  589. * Cannot recover? Panic here then.
  590. * This dumps all the mces in the log buffer and stops the
  591. * other CPUs.
  592. */
  593. if (m && global_worst >= MCE_PANIC_SEVERITY && tolerant < 3)
  594. mce_panic("Fatal Machine check", m, msg);
  595. /*
  596. * For UC somewhere we let the CPU who detects it handle it.
  597. * Also must let continue the others, otherwise the handling
  598. * CPU could deadlock on a lock.
  599. */
  600. /*
  601. * No machine check event found. Must be some external
  602. * source or one CPU is hung. Panic.
  603. */
  604. if (global_worst <= MCE_KEEP_SEVERITY && tolerant < 3)
  605. mce_panic("Machine check from unknown source", NULL, NULL);
  606. /*
  607. * Now clear all the mces_seen so that they don't reappear on
  608. * the next mce.
  609. */
  610. for_each_possible_cpu(cpu)
  611. memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
  612. }
  613. static atomic_t global_nwo;
  614. /*
  615. * Start of Monarch synchronization. This waits until all CPUs have
  616. * entered the exception handler and then determines if any of them
  617. * saw a fatal event that requires panic. Then it executes them
  618. * in the entry order.
  619. * TBD double check parallel CPU hotunplug
  620. */
  621. static int mce_start(int *no_way_out)
  622. {
  623. int order;
  624. int cpus = num_online_cpus();
  625. u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
  626. if (!timeout)
  627. return -1;
  628. atomic_add(*no_way_out, &global_nwo);
  629. /*
  630. * global_nwo should be updated before mce_callin
  631. */
  632. smp_wmb();
  633. order = atomic_inc_return(&mce_callin);
  634. /*
  635. * Wait for everyone.
  636. */
  637. while (atomic_read(&mce_callin) != cpus) {
  638. if (mce_timed_out(&timeout)) {
  639. atomic_set(&global_nwo, 0);
  640. return -1;
  641. }
  642. ndelay(SPINUNIT);
  643. }
  644. /*
  645. * mce_callin should be read before global_nwo
  646. */
  647. smp_rmb();
  648. if (order == 1) {
  649. /*
  650. * Monarch: Starts executing now, the others wait.
  651. */
  652. atomic_set(&mce_executing, 1);
  653. } else {
  654. /*
  655. * Subject: Now start the scanning loop one by one in
  656. * the original callin order.
  657. * This way when there are any shared banks it will be
  658. * only seen by one CPU before cleared, avoiding duplicates.
  659. */
  660. while (atomic_read(&mce_executing) < order) {
  661. if (mce_timed_out(&timeout)) {
  662. atomic_set(&global_nwo, 0);
  663. return -1;
  664. }
  665. ndelay(SPINUNIT);
  666. }
  667. }
  668. /*
  669. * Cache the global no_way_out state.
  670. */
  671. *no_way_out = atomic_read(&global_nwo);
  672. return order;
  673. }
  674. /*
  675. * Synchronize between CPUs after main scanning loop.
  676. * This invokes the bulk of the Monarch processing.
  677. */
  678. static int mce_end(int order)
  679. {
  680. int ret = -1;
  681. u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
  682. if (!timeout)
  683. goto reset;
  684. if (order < 0)
  685. goto reset;
  686. /*
  687. * Allow others to run.
  688. */
  689. atomic_inc(&mce_executing);
  690. if (order == 1) {
  691. /* CHECKME: Can this race with a parallel hotplug? */
  692. int cpus = num_online_cpus();
  693. /*
  694. * Monarch: Wait for everyone to go through their scanning
  695. * loops.
  696. */
  697. while (atomic_read(&mce_executing) <= cpus) {
  698. if (mce_timed_out(&timeout))
  699. goto reset;
  700. ndelay(SPINUNIT);
  701. }
  702. mce_reign();
  703. barrier();
  704. ret = 0;
  705. } else {
  706. /*
  707. * Subject: Wait for Monarch to finish.
  708. */
  709. while (atomic_read(&mce_executing) != 0) {
  710. if (mce_timed_out(&timeout))
  711. goto reset;
  712. ndelay(SPINUNIT);
  713. }
  714. /*
  715. * Don't reset anything. That's done by the Monarch.
  716. */
  717. return 0;
  718. }
  719. /*
  720. * Reset all global state.
  721. */
  722. reset:
  723. atomic_set(&global_nwo, 0);
  724. atomic_set(&mce_callin, 0);
  725. barrier();
  726. /*
  727. * Let others run again.
  728. */
  729. atomic_set(&mce_executing, 0);
  730. return ret;
  731. }
  732. /*
  733. * Check if the address reported by the CPU is in a format we can parse.
  734. * It would be possible to add code for most other cases, but all would
  735. * be somewhat complicated (e.g. segment offset would require an instruction
  736. * parser). So only support physical addresses up to page granuality for now.
  737. */
  738. static int mce_usable_address(struct mce *m)
  739. {
  740. if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
  741. return 0;
  742. if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
  743. return 0;
  744. if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
  745. return 0;
  746. return 1;
  747. }
  748. static void mce_clear_state(unsigned long *toclear)
  749. {
  750. int i;
  751. for (i = 0; i < banks; i++) {
  752. if (test_bit(i, toclear))
  753. mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
  754. }
  755. }
  756. /*
  757. * The actual machine check handler. This only handles real
  758. * exceptions when something got corrupted coming in through int 18.
  759. *
  760. * This is executed in NMI context not subject to normal locking rules. This
  761. * implies that most kernel services cannot be safely used. Don't even
  762. * think about putting a printk in there!
  763. *
  764. * On Intel systems this is entered on all CPUs in parallel through
  765. * MCE broadcast. However some CPUs might be broken beyond repair,
  766. * so be always careful when synchronizing with others.
  767. */
  768. void do_machine_check(struct pt_regs *regs, long error_code)
  769. {
  770. struct mce m, *final;
  771. int i;
  772. int worst = 0;
  773. int severity;
  774. /*
  775. * Establish sequential order between the CPUs entering the machine
  776. * check handler.
  777. */
  778. int order;
  779. /*
  780. * If no_way_out gets set, there is no safe way to recover from this
  781. * MCE. If tolerant is cranked up, we'll try anyway.
  782. */
  783. int no_way_out = 0;
  784. /*
  785. * If kill_it gets set, there might be a way to recover from this
  786. * error.
  787. */
  788. int kill_it = 0;
  789. DECLARE_BITMAP(toclear, MAX_NR_BANKS);
  790. char *msg = "Unknown";
  791. atomic_inc(&mce_entry);
  792. percpu_inc(mce_exception_count);
  793. if (notify_die(DIE_NMI, "machine check", regs, error_code,
  794. 18, SIGKILL) == NOTIFY_STOP)
  795. goto out;
  796. if (!banks)
  797. goto out;
  798. mce_gather_info(&m, regs);
  799. final = &__get_cpu_var(mces_seen);
  800. *final = m;
  801. no_way_out = mce_no_way_out(&m, &msg);
  802. barrier();
  803. /*
  804. * When no restart IP must always kill or panic.
  805. */
  806. if (!(m.mcgstatus & MCG_STATUS_RIPV))
  807. kill_it = 1;
  808. /*
  809. * Go through all the banks in exclusion of the other CPUs.
  810. * This way we don't report duplicated events on shared banks
  811. * because the first one to see it will clear it.
  812. */
  813. order = mce_start(&no_way_out);
  814. for (i = 0; i < banks; i++) {
  815. __clear_bit(i, toclear);
  816. if (!mce_banks[i].ctl)
  817. continue;
  818. m.misc = 0;
  819. m.addr = 0;
  820. m.bank = i;
  821. m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
  822. if ((m.status & MCI_STATUS_VAL) == 0)
  823. continue;
  824. /*
  825. * Non uncorrected or non signaled errors are handled by
  826. * machine_check_poll. Leave them alone, unless this panics.
  827. */
  828. if (!(m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
  829. !no_way_out)
  830. continue;
  831. /*
  832. * Set taint even when machine check was not enabled.
  833. */
  834. add_taint(TAINT_MACHINE_CHECK);
  835. severity = mce_severity(&m, tolerant, NULL);
  836. /*
  837. * When machine check was for corrected handler don't touch,
  838. * unless we're panicing.
  839. */
  840. if (severity == MCE_KEEP_SEVERITY && !no_way_out)
  841. continue;
  842. __set_bit(i, toclear);
  843. if (severity == MCE_NO_SEVERITY) {
  844. /*
  845. * Machine check event was not enabled. Clear, but
  846. * ignore.
  847. */
  848. continue;
  849. }
  850. /*
  851. * Kill on action required.
  852. */
  853. if (severity == MCE_AR_SEVERITY)
  854. kill_it = 1;
  855. if (m.status & MCI_STATUS_MISCV)
  856. m.misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
  857. if (m.status & MCI_STATUS_ADDRV)
  858. m.addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
  859. /*
  860. * Action optional error. Queue address for later processing.
  861. * When the ring overflows we just ignore the AO error.
  862. * RED-PEN add some logging mechanism when
  863. * usable_address or mce_add_ring fails.
  864. * RED-PEN don't ignore overflow for tolerant == 0
  865. */
  866. if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
  867. mce_ring_add(m.addr >> PAGE_SHIFT);
  868. mce_log(&m);
  869. if (severity > worst) {
  870. *final = m;
  871. worst = severity;
  872. }
  873. }
  874. if (!no_way_out)
  875. mce_clear_state(toclear);
  876. /*
  877. * Do most of the synchronization with other CPUs.
  878. * When there's any problem use only local no_way_out state.
  879. */
  880. if (mce_end(order) < 0)
  881. no_way_out = worst >= MCE_PANIC_SEVERITY;
  882. /*
  883. * If we have decided that we just CAN'T continue, and the user
  884. * has not set tolerant to an insane level, give up and die.
  885. *
  886. * This is mainly used in the case when the system doesn't
  887. * support MCE broadcasting or it has been disabled.
  888. */
  889. if (no_way_out && tolerant < 3)
  890. mce_panic("Fatal machine check on current CPU", final, msg);
  891. /*
  892. * If the error seems to be unrecoverable, something should be
  893. * done. Try to kill as little as possible. If we can kill just
  894. * one task, do that. If the user has set the tolerance very
  895. * high, don't try to do anything at all.
  896. */
  897. if (kill_it && tolerant < 3)
  898. force_sig(SIGBUS, current);
  899. /* notify userspace ASAP */
  900. set_thread_flag(TIF_MCE_NOTIFY);
  901. if (worst > 0)
  902. mce_report_event(regs);
  903. mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
  904. out:
  905. atomic_dec(&mce_entry);
  906. sync_core();
  907. }
  908. EXPORT_SYMBOL_GPL(do_machine_check);
  909. /* dummy to break dependency. actual code is in mm/memory-failure.c */
  910. void __attribute__((weak)) memory_failure(unsigned long pfn, int vector)
  911. {
  912. printk(KERN_ERR "Action optional memory failure at %lx ignored\n", pfn);
  913. }
  914. /*
  915. * Called after mce notification in process context. This code
  916. * is allowed to sleep. Call the high level VM handler to process
  917. * any corrupted pages.
  918. * Assume that the work queue code only calls this one at a time
  919. * per CPU.
  920. * Note we don't disable preemption, so this code might run on the wrong
  921. * CPU. In this case the event is picked up by the scheduled work queue.
  922. * This is merely a fast path to expedite processing in some common
  923. * cases.
  924. */
  925. void mce_notify_process(void)
  926. {
  927. unsigned long pfn;
  928. mce_notify_irq();
  929. while (mce_ring_get(&pfn))
  930. memory_failure(pfn, MCE_VECTOR);
  931. }
  932. static void mce_process_work(struct work_struct *dummy)
  933. {
  934. mce_notify_process();
  935. }
  936. #ifdef CONFIG_X86_MCE_INTEL
  937. /***
  938. * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
  939. * @cpu: The CPU on which the event occurred.
  940. * @status: Event status information
  941. *
  942. * This function should be called by the thermal interrupt after the
  943. * event has been processed and the decision was made to log the event
  944. * further.
  945. *
  946. * The status parameter will be saved to the 'status' field of 'struct mce'
  947. * and historically has been the register value of the
  948. * MSR_IA32_THERMAL_STATUS (Intel) msr.
  949. */
  950. void mce_log_therm_throt_event(__u64 status)
  951. {
  952. struct mce m;
  953. mce_setup(&m);
  954. m.bank = MCE_THERMAL_BANK;
  955. m.status = status;
  956. mce_log(&m);
  957. }
  958. #endif /* CONFIG_X86_MCE_INTEL */
  959. /*
  960. * Periodic polling timer for "silent" machine check errors. If the
  961. * poller finds an MCE, poll 2x faster. When the poller finds no more
  962. * errors, poll 2x slower (up to check_interval seconds).
  963. */
  964. static int check_interval = 5 * 60; /* 5 minutes */
  965. static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
  966. static DEFINE_PER_CPU(struct timer_list, mce_timer);
  967. static void mce_start_timer(unsigned long data)
  968. {
  969. struct timer_list *t = &per_cpu(mce_timer, data);
  970. int *n;
  971. WARN_ON(smp_processor_id() != data);
  972. if (mce_available(__this_cpu_ptr(&cpu_info))) {
  973. machine_check_poll(MCP_TIMESTAMP,
  974. &__get_cpu_var(mce_poll_banks));
  975. }
  976. /*
  977. * Alert userspace if needed. If we logged an MCE, reduce the
  978. * polling interval, otherwise increase the polling interval.
  979. */
  980. n = &__get_cpu_var(mce_next_interval);
  981. if (mce_notify_irq())
  982. *n = max(*n/2, HZ/100);
  983. else
  984. *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
  985. t->expires = jiffies + *n;
  986. add_timer_on(t, smp_processor_id());
  987. }
  988. static void mce_do_trigger(struct work_struct *work)
  989. {
  990. call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
  991. }
  992. static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
  993. /*
  994. * Notify the user(s) about new machine check events.
  995. * Can be called from interrupt context, but not from machine check/NMI
  996. * context.
  997. */
  998. int mce_notify_irq(void)
  999. {
  1000. /* Not more than two messages every minute */
  1001. static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
  1002. clear_thread_flag(TIF_MCE_NOTIFY);
  1003. if (test_and_clear_bit(0, &mce_need_notify)) {
  1004. wake_up_interruptible(&mce_wait);
  1005. /*
  1006. * There is no risk of missing notifications because
  1007. * work_pending is always cleared before the function is
  1008. * executed.
  1009. */
  1010. if (mce_helper[0] && !work_pending(&mce_trigger_work))
  1011. schedule_work(&mce_trigger_work);
  1012. if (__ratelimit(&ratelimit))
  1013. pr_info(HW_ERR "Machine check events logged\n");
  1014. return 1;
  1015. }
  1016. return 0;
  1017. }
  1018. EXPORT_SYMBOL_GPL(mce_notify_irq);
  1019. static int __cpuinit __mcheck_cpu_mce_banks_init(void)
  1020. {
  1021. int i;
  1022. mce_banks = kzalloc(banks * sizeof(struct mce_bank), GFP_KERNEL);
  1023. if (!mce_banks)
  1024. return -ENOMEM;
  1025. for (i = 0; i < banks; i++) {
  1026. struct mce_bank *b = &mce_banks[i];
  1027. b->ctl = -1ULL;
  1028. b->init = 1;
  1029. }
  1030. return 0;
  1031. }
  1032. /*
  1033. * Initialize Machine Checks for a CPU.
  1034. */
  1035. static int __cpuinit __mcheck_cpu_cap_init(void)
  1036. {
  1037. unsigned b;
  1038. u64 cap;
  1039. rdmsrl(MSR_IA32_MCG_CAP, cap);
  1040. b = cap & MCG_BANKCNT_MASK;
  1041. if (!banks)
  1042. printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b);
  1043. if (b > MAX_NR_BANKS) {
  1044. printk(KERN_WARNING
  1045. "MCE: Using only %u machine check banks out of %u\n",
  1046. MAX_NR_BANKS, b);
  1047. b = MAX_NR_BANKS;
  1048. }
  1049. /* Don't support asymmetric configurations today */
  1050. WARN_ON(banks != 0 && b != banks);
  1051. banks = b;
  1052. if (!mce_banks) {
  1053. int err = __mcheck_cpu_mce_banks_init();
  1054. if (err)
  1055. return err;
  1056. }
  1057. /* Use accurate RIP reporting if available. */
  1058. if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
  1059. rip_msr = MSR_IA32_MCG_EIP;
  1060. if (cap & MCG_SER_P)
  1061. mce_ser = 1;
  1062. return 0;
  1063. }
  1064. static void __mcheck_cpu_init_generic(void)
  1065. {
  1066. mce_banks_t all_banks;
  1067. u64 cap;
  1068. int i;
  1069. /*
  1070. * Log the machine checks left over from the previous reset.
  1071. */
  1072. bitmap_fill(all_banks, MAX_NR_BANKS);
  1073. machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks);
  1074. set_in_cr4(X86_CR4_MCE);
  1075. rdmsrl(MSR_IA32_MCG_CAP, cap);
  1076. if (cap & MCG_CTL_P)
  1077. wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
  1078. for (i = 0; i < banks; i++) {
  1079. struct mce_bank *b = &mce_banks[i];
  1080. if (!b->init)
  1081. continue;
  1082. wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
  1083. wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
  1084. }
  1085. }
  1086. /* Add per CPU specific workarounds here */
  1087. static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
  1088. {
  1089. if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
  1090. pr_info("MCE: unknown CPU type - not enabling MCE support.\n");
  1091. return -EOPNOTSUPP;
  1092. }
  1093. /* This should be disabled by the BIOS, but isn't always */
  1094. if (c->x86_vendor == X86_VENDOR_AMD) {
  1095. if (c->x86 == 15 && banks > 4) {
  1096. /*
  1097. * disable GART TBL walk error reporting, which
  1098. * trips off incorrectly with the IOMMU & 3ware
  1099. * & Cerberus:
  1100. */
  1101. clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
  1102. }
  1103. if (c->x86 <= 17 && mce_bootlog < 0) {
  1104. /*
  1105. * Lots of broken BIOS around that don't clear them
  1106. * by default and leave crap in there. Don't log:
  1107. */
  1108. mce_bootlog = 0;
  1109. }
  1110. /*
  1111. * Various K7s with broken bank 0 around. Always disable
  1112. * by default.
  1113. */
  1114. if (c->x86 == 6 && banks > 0)
  1115. mce_banks[0].ctl = 0;
  1116. }
  1117. if (c->x86_vendor == X86_VENDOR_INTEL) {
  1118. /*
  1119. * SDM documents that on family 6 bank 0 should not be written
  1120. * because it aliases to another special BIOS controlled
  1121. * register.
  1122. * But it's not aliased anymore on model 0x1a+
  1123. * Don't ignore bank 0 completely because there could be a
  1124. * valid event later, merely don't write CTL0.
  1125. */
  1126. if (c->x86 == 6 && c->x86_model < 0x1A && banks > 0)
  1127. mce_banks[0].init = 0;
  1128. /*
  1129. * All newer Intel systems support MCE broadcasting. Enable
  1130. * synchronization with a one second timeout.
  1131. */
  1132. if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
  1133. monarch_timeout < 0)
  1134. monarch_timeout = USEC_PER_SEC;
  1135. /*
  1136. * There are also broken BIOSes on some Pentium M and
  1137. * earlier systems:
  1138. */
  1139. if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0)
  1140. mce_bootlog = 0;
  1141. }
  1142. if (monarch_timeout < 0)
  1143. monarch_timeout = 0;
  1144. if (mce_bootlog != 0)
  1145. mce_panic_timeout = 30;
  1146. return 0;
  1147. }
  1148. static int __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
  1149. {
  1150. if (c->x86 != 5)
  1151. return 0;
  1152. switch (c->x86_vendor) {
  1153. case X86_VENDOR_INTEL:
  1154. intel_p5_mcheck_init(c);
  1155. return 1;
  1156. break;
  1157. case X86_VENDOR_CENTAUR:
  1158. winchip_mcheck_init(c);
  1159. return 1;
  1160. break;
  1161. }
  1162. return 0;
  1163. }
  1164. static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
  1165. {
  1166. switch (c->x86_vendor) {
  1167. case X86_VENDOR_INTEL:
  1168. mce_intel_feature_init(c);
  1169. break;
  1170. case X86_VENDOR_AMD:
  1171. mce_amd_feature_init(c);
  1172. break;
  1173. default:
  1174. break;
  1175. }
  1176. }
  1177. static void __mcheck_cpu_init_timer(void)
  1178. {
  1179. struct timer_list *t = &__get_cpu_var(mce_timer);
  1180. int *n = &__get_cpu_var(mce_next_interval);
  1181. setup_timer(t, mce_start_timer, smp_processor_id());
  1182. if (mce_ignore_ce)
  1183. return;
  1184. *n = check_interval * HZ;
  1185. if (!*n)
  1186. return;
  1187. t->expires = round_jiffies(jiffies + *n);
  1188. add_timer_on(t, smp_processor_id());
  1189. }
  1190. /* Handle unconfigured int18 (should never happen) */
  1191. static void unexpected_machine_check(struct pt_regs *regs, long error_code)
  1192. {
  1193. printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n",
  1194. smp_processor_id());
  1195. }
  1196. /* Call the installed machine check handler for this CPU setup. */
  1197. void (*machine_check_vector)(struct pt_regs *, long error_code) =
  1198. unexpected_machine_check;
  1199. /*
  1200. * Called for each booted CPU to set up machine checks.
  1201. * Must be called with preempt off:
  1202. */
  1203. void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
  1204. {
  1205. if (mce_disabled)
  1206. return;
  1207. if (__mcheck_cpu_ancient_init(c))
  1208. return;
  1209. if (!mce_available(c))
  1210. return;
  1211. if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
  1212. mce_disabled = 1;
  1213. return;
  1214. }
  1215. machine_check_vector = do_machine_check;
  1216. __mcheck_cpu_init_generic();
  1217. __mcheck_cpu_init_vendor(c);
  1218. __mcheck_cpu_init_timer();
  1219. INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
  1220. init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb);
  1221. }
  1222. /*
  1223. * Character device to read and clear the MCE log.
  1224. */
  1225. static DEFINE_SPINLOCK(mce_state_lock);
  1226. static int open_count; /* #times opened */
  1227. static int open_exclu; /* already open exclusive? */
  1228. static int mce_open(struct inode *inode, struct file *file)
  1229. {
  1230. spin_lock(&mce_state_lock);
  1231. if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
  1232. spin_unlock(&mce_state_lock);
  1233. return -EBUSY;
  1234. }
  1235. if (file->f_flags & O_EXCL)
  1236. open_exclu = 1;
  1237. open_count++;
  1238. spin_unlock(&mce_state_lock);
  1239. return nonseekable_open(inode, file);
  1240. }
  1241. static int mce_release(struct inode *inode, struct file *file)
  1242. {
  1243. spin_lock(&mce_state_lock);
  1244. open_count--;
  1245. open_exclu = 0;
  1246. spin_unlock(&mce_state_lock);
  1247. return 0;
  1248. }
  1249. static void collect_tscs(void *data)
  1250. {
  1251. unsigned long *cpu_tsc = (unsigned long *)data;
  1252. rdtscll(cpu_tsc[smp_processor_id()]);
  1253. }
  1254. static int mce_apei_read_done;
  1255. /* Collect MCE record of previous boot in persistent storage via APEI ERST. */
  1256. static int __mce_read_apei(char __user **ubuf, size_t usize)
  1257. {
  1258. int rc;
  1259. u64 record_id;
  1260. struct mce m;
  1261. if (usize < sizeof(struct mce))
  1262. return -EINVAL;
  1263. rc = apei_read_mce(&m, &record_id);
  1264. /* Error or no more MCE record */
  1265. if (rc <= 0) {
  1266. mce_apei_read_done = 1;
  1267. return rc;
  1268. }
  1269. rc = -EFAULT;
  1270. if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
  1271. return rc;
  1272. /*
  1273. * In fact, we should have cleared the record after that has
  1274. * been flushed to the disk or sent to network in
  1275. * /sbin/mcelog, but we have no interface to support that now,
  1276. * so just clear it to avoid duplication.
  1277. */
  1278. rc = apei_clear_mce(record_id);
  1279. if (rc) {
  1280. mce_apei_read_done = 1;
  1281. return rc;
  1282. }
  1283. *ubuf += sizeof(struct mce);
  1284. return 0;
  1285. }
  1286. static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
  1287. loff_t *off)
  1288. {
  1289. char __user *buf = ubuf;
  1290. unsigned long *cpu_tsc;
  1291. unsigned prev, next;
  1292. int i, err;
  1293. cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
  1294. if (!cpu_tsc)
  1295. return -ENOMEM;
  1296. mutex_lock(&mce_read_mutex);
  1297. if (!mce_apei_read_done) {
  1298. err = __mce_read_apei(&buf, usize);
  1299. if (err || buf != ubuf)
  1300. goto out;
  1301. }
  1302. next = rcu_dereference_check_mce(mcelog.next);
  1303. /* Only supports full reads right now */
  1304. err = -EINVAL;
  1305. if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
  1306. goto out;
  1307. err = 0;
  1308. prev = 0;
  1309. do {
  1310. for (i = prev; i < next; i++) {
  1311. unsigned long start = jiffies;
  1312. while (!mcelog.entry[i].finished) {
  1313. if (time_after_eq(jiffies, start + 2)) {
  1314. memset(mcelog.entry + i, 0,
  1315. sizeof(struct mce));
  1316. goto timeout;
  1317. }
  1318. cpu_relax();
  1319. }
  1320. smp_rmb();
  1321. err |= copy_to_user(buf, mcelog.entry + i,
  1322. sizeof(struct mce));
  1323. buf += sizeof(struct mce);
  1324. timeout:
  1325. ;
  1326. }
  1327. memset(mcelog.entry + prev, 0,
  1328. (next - prev) * sizeof(struct mce));
  1329. prev = next;
  1330. next = cmpxchg(&mcelog.next, prev, 0);
  1331. } while (next != prev);
  1332. synchronize_sched();
  1333. /*
  1334. * Collect entries that were still getting written before the
  1335. * synchronize.
  1336. */
  1337. on_each_cpu(collect_tscs, cpu_tsc, 1);
  1338. for (i = next; i < MCE_LOG_LEN; i++) {
  1339. if (mcelog.entry[i].finished &&
  1340. mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) {
  1341. err |= copy_to_user(buf, mcelog.entry+i,
  1342. sizeof(struct mce));
  1343. smp_rmb();
  1344. buf += sizeof(struct mce);
  1345. memset(&mcelog.entry[i], 0, sizeof(struct mce));
  1346. }
  1347. }
  1348. if (err)
  1349. err = -EFAULT;
  1350. out:
  1351. mutex_unlock(&mce_read_mutex);
  1352. kfree(cpu_tsc);
  1353. return err ? err : buf - ubuf;
  1354. }
  1355. static unsigned int mce_poll(struct file *file, poll_table *wait)
  1356. {
  1357. poll_wait(file, &mce_wait, wait);
  1358. if (rcu_access_index(mcelog.next))
  1359. return POLLIN | POLLRDNORM;
  1360. if (!mce_apei_read_done && apei_check_mce())
  1361. return POLLIN | POLLRDNORM;
  1362. return 0;
  1363. }
  1364. static long mce_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
  1365. {
  1366. int __user *p = (int __user *)arg;
  1367. if (!capable(CAP_SYS_ADMIN))
  1368. return -EPERM;
  1369. switch (cmd) {
  1370. case MCE_GET_RECORD_LEN:
  1371. return put_user(sizeof(struct mce), p);
  1372. case MCE_GET_LOG_LEN:
  1373. return put_user(MCE_LOG_LEN, p);
  1374. case MCE_GETCLEAR_FLAGS: {
  1375. unsigned flags;
  1376. do {
  1377. flags = mcelog.flags;
  1378. } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
  1379. return put_user(flags, p);
  1380. }
  1381. default:
  1382. return -ENOTTY;
  1383. }
  1384. }
  1385. /* Modified in mce-inject.c, so not static or const */
  1386. struct file_operations mce_chrdev_ops = {
  1387. .open = mce_open,
  1388. .release = mce_release,
  1389. .read = mce_read,
  1390. .poll = mce_poll,
  1391. .unlocked_ioctl = mce_ioctl,
  1392. .llseek = no_llseek,
  1393. };
  1394. EXPORT_SYMBOL_GPL(mce_chrdev_ops);
  1395. static struct miscdevice mce_log_device = {
  1396. MISC_MCELOG_MINOR,
  1397. "mcelog",
  1398. &mce_chrdev_ops,
  1399. };
  1400. /*
  1401. * mce=off Disables machine check
  1402. * mce=no_cmci Disables CMCI
  1403. * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
  1404. * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
  1405. * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
  1406. * monarchtimeout is how long to wait for other CPUs on machine
  1407. * check, or 0 to not wait
  1408. * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
  1409. * mce=nobootlog Don't log MCEs from before booting.
  1410. */
  1411. static int __init mcheck_enable(char *str)
  1412. {
  1413. if (*str == 0) {
  1414. enable_p5_mce();
  1415. return 1;
  1416. }
  1417. if (*str == '=')
  1418. str++;
  1419. if (!strcmp(str, "off"))
  1420. mce_disabled = 1;
  1421. else if (!strcmp(str, "no_cmci"))
  1422. mce_cmci_disabled = 1;
  1423. else if (!strcmp(str, "dont_log_ce"))
  1424. mce_dont_log_ce = 1;
  1425. else if (!strcmp(str, "ignore_ce"))
  1426. mce_ignore_ce = 1;
  1427. else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
  1428. mce_bootlog = (str[0] == 'b');
  1429. else if (isdigit(str[0])) {
  1430. get_option(&str, &tolerant);
  1431. if (*str == ',') {
  1432. ++str;
  1433. get_option(&str, &monarch_timeout);
  1434. }
  1435. } else {
  1436. printk(KERN_INFO "mce argument %s ignored. Please use /sys\n",
  1437. str);
  1438. return 0;
  1439. }
  1440. return 1;
  1441. }
  1442. __setup("mce", mcheck_enable);
  1443. int __init mcheck_init(void)
  1444. {
  1445. mcheck_intel_therm_init();
  1446. return 0;
  1447. }
  1448. /*
  1449. * Sysfs support
  1450. */
  1451. /*
  1452. * Disable machine checks on suspend and shutdown. We can't really handle
  1453. * them later.
  1454. */
  1455. static int mce_disable_error_reporting(void)
  1456. {
  1457. int i;
  1458. for (i = 0; i < banks; i++) {
  1459. struct mce_bank *b = &mce_banks[i];
  1460. if (b->init)
  1461. wrmsrl(MSR_IA32_MCx_CTL(i), 0);
  1462. }
  1463. return 0;
  1464. }
  1465. static int mce_suspend(void)
  1466. {
  1467. return mce_disable_error_reporting();
  1468. }
  1469. static void mce_shutdown(void)
  1470. {
  1471. mce_disable_error_reporting();
  1472. }
  1473. /*
  1474. * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
  1475. * Only one CPU is active at this time, the others get re-added later using
  1476. * CPU hotplug:
  1477. */
  1478. static void mce_resume(void)
  1479. {
  1480. __mcheck_cpu_init_generic();
  1481. __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info));
  1482. }
  1483. static struct syscore_ops mce_syscore_ops = {
  1484. .suspend = mce_suspend,
  1485. .shutdown = mce_shutdown,
  1486. .resume = mce_resume,
  1487. };
  1488. static void mce_cpu_restart(void *data)
  1489. {
  1490. del_timer_sync(&__get_cpu_var(mce_timer));
  1491. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1492. return;
  1493. __mcheck_cpu_init_generic();
  1494. __mcheck_cpu_init_timer();
  1495. }
  1496. /* Reinit MCEs after user configuration changes */
  1497. static void mce_restart(void)
  1498. {
  1499. on_each_cpu(mce_cpu_restart, NULL, 1);
  1500. }
  1501. /* Toggle features for corrected errors */
  1502. static void mce_disable_ce(void *all)
  1503. {
  1504. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1505. return;
  1506. if (all)
  1507. del_timer_sync(&__get_cpu_var(mce_timer));
  1508. cmci_clear();
  1509. }
  1510. static void mce_enable_ce(void *all)
  1511. {
  1512. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1513. return;
  1514. cmci_reenable();
  1515. cmci_recheck();
  1516. if (all)
  1517. __mcheck_cpu_init_timer();
  1518. }
  1519. static struct sysdev_class mce_sysclass = {
  1520. .name = "machinecheck",
  1521. };
  1522. DEFINE_PER_CPU(struct sys_device, mce_dev);
  1523. __cpuinitdata
  1524. void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
  1525. static inline struct mce_bank *attr_to_bank(struct sysdev_attribute *attr)
  1526. {
  1527. return container_of(attr, struct mce_bank, attr);
  1528. }
  1529. static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr,
  1530. char *buf)
  1531. {
  1532. return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
  1533. }
  1534. static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr,
  1535. const char *buf, size_t size)
  1536. {
  1537. u64 new;
  1538. if (strict_strtoull(buf, 0, &new) < 0)
  1539. return -EINVAL;
  1540. attr_to_bank(attr)->ctl = new;
  1541. mce_restart();
  1542. return size;
  1543. }
  1544. static ssize_t
  1545. show_trigger(struct sys_device *s, struct sysdev_attribute *attr, char *buf)
  1546. {
  1547. strcpy(buf, mce_helper);
  1548. strcat(buf, "\n");
  1549. return strlen(mce_helper) + 1;
  1550. }
  1551. static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
  1552. const char *buf, size_t siz)
  1553. {
  1554. char *p;
  1555. strncpy(mce_helper, buf, sizeof(mce_helper));
  1556. mce_helper[sizeof(mce_helper)-1] = 0;
  1557. p = strchr(mce_helper, '\n');
  1558. if (p)
  1559. *p = 0;
  1560. return strlen(mce_helper) + !!p;
  1561. }
  1562. static ssize_t set_ignore_ce(struct sys_device *s,
  1563. struct sysdev_attribute *attr,
  1564. const char *buf, size_t size)
  1565. {
  1566. u64 new;
  1567. if (strict_strtoull(buf, 0, &new) < 0)
  1568. return -EINVAL;
  1569. if (mce_ignore_ce ^ !!new) {
  1570. if (new) {
  1571. /* disable ce features */
  1572. on_each_cpu(mce_disable_ce, (void *)1, 1);
  1573. mce_ignore_ce = 1;
  1574. } else {
  1575. /* enable ce features */
  1576. mce_ignore_ce = 0;
  1577. on_each_cpu(mce_enable_ce, (void *)1, 1);
  1578. }
  1579. }
  1580. return size;
  1581. }
  1582. static ssize_t set_cmci_disabled(struct sys_device *s,
  1583. struct sysdev_attribute *attr,
  1584. const char *buf, size_t size)
  1585. {
  1586. u64 new;
  1587. if (strict_strtoull(buf, 0, &new) < 0)
  1588. return -EINVAL;
  1589. if (mce_cmci_disabled ^ !!new) {
  1590. if (new) {
  1591. /* disable cmci */
  1592. on_each_cpu(mce_disable_ce, NULL, 1);
  1593. mce_cmci_disabled = 1;
  1594. } else {
  1595. /* enable cmci */
  1596. mce_cmci_disabled = 0;
  1597. on_each_cpu(mce_enable_ce, NULL, 1);
  1598. }
  1599. }
  1600. return size;
  1601. }
  1602. static ssize_t store_int_with_restart(struct sys_device *s,
  1603. struct sysdev_attribute *attr,
  1604. const char *buf, size_t size)
  1605. {
  1606. ssize_t ret = sysdev_store_int(s, attr, buf, size);
  1607. mce_restart();
  1608. return ret;
  1609. }
  1610. static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
  1611. static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
  1612. static SYSDEV_INT_ATTR(monarch_timeout, 0644, monarch_timeout);
  1613. static SYSDEV_INT_ATTR(dont_log_ce, 0644, mce_dont_log_ce);
  1614. static struct sysdev_ext_attribute attr_check_interval = {
  1615. _SYSDEV_ATTR(check_interval, 0644, sysdev_show_int,
  1616. store_int_with_restart),
  1617. &check_interval
  1618. };
  1619. static struct sysdev_ext_attribute attr_ignore_ce = {
  1620. _SYSDEV_ATTR(ignore_ce, 0644, sysdev_show_int, set_ignore_ce),
  1621. &mce_ignore_ce
  1622. };
  1623. static struct sysdev_ext_attribute attr_cmci_disabled = {
  1624. _SYSDEV_ATTR(cmci_disabled, 0644, sysdev_show_int, set_cmci_disabled),
  1625. &mce_cmci_disabled
  1626. };
  1627. static struct sysdev_attribute *mce_attrs[] = {
  1628. &attr_tolerant.attr,
  1629. &attr_check_interval.attr,
  1630. &attr_trigger,
  1631. &attr_monarch_timeout.attr,
  1632. &attr_dont_log_ce.attr,
  1633. &attr_ignore_ce.attr,
  1634. &attr_cmci_disabled.attr,
  1635. NULL
  1636. };
  1637. static cpumask_var_t mce_dev_initialized;
  1638. /* Per cpu sysdev init. All of the cpus still share the same ctrl bank: */
  1639. static __cpuinit int mce_create_device(unsigned int cpu)
  1640. {
  1641. struct sys_device *sysdev = &per_cpu(mce_dev, cpu);
  1642. int err;
  1643. int i, j;
  1644. if (!mce_available(&boot_cpu_data))
  1645. return -EIO;
  1646. memset(&sysdev->kobj, 0, sizeof(struct kobject));
  1647. sysdev->id = cpu;
  1648. sysdev->cls = &mce_sysclass;
  1649. err = sysdev_register(sysdev);
  1650. if (err)
  1651. return err;
  1652. for (i = 0; mce_attrs[i]; i++) {
  1653. err = sysdev_create_file(sysdev, mce_attrs[i]);
  1654. if (err)
  1655. goto error;
  1656. }
  1657. for (j = 0; j < banks; j++) {
  1658. err = sysdev_create_file(sysdev, &mce_banks[j].attr);
  1659. if (err)
  1660. goto error2;
  1661. }
  1662. cpumask_set_cpu(cpu, mce_dev_initialized);
  1663. return 0;
  1664. error2:
  1665. while (--j >= 0)
  1666. sysdev_remove_file(sysdev, &mce_banks[j].attr);
  1667. error:
  1668. while (--i >= 0)
  1669. sysdev_remove_file(sysdev, mce_attrs[i]);
  1670. sysdev_unregister(sysdev);
  1671. return err;
  1672. }
  1673. static __cpuinit void mce_remove_device(unsigned int cpu)
  1674. {
  1675. struct sys_device *sysdev = &per_cpu(mce_dev, cpu);
  1676. int i;
  1677. if (!cpumask_test_cpu(cpu, mce_dev_initialized))
  1678. return;
  1679. for (i = 0; mce_attrs[i]; i++)
  1680. sysdev_remove_file(sysdev, mce_attrs[i]);
  1681. for (i = 0; i < banks; i++)
  1682. sysdev_remove_file(sysdev, &mce_banks[i].attr);
  1683. sysdev_unregister(sysdev);
  1684. cpumask_clear_cpu(cpu, mce_dev_initialized);
  1685. }
  1686. /* Make sure there are no machine checks on offlined CPUs. */
  1687. static void __cpuinit mce_disable_cpu(void *h)
  1688. {
  1689. unsigned long action = *(unsigned long *)h;
  1690. int i;
  1691. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1692. return;
  1693. if (!(action & CPU_TASKS_FROZEN))
  1694. cmci_clear();
  1695. for (i = 0; i < banks; i++) {
  1696. struct mce_bank *b = &mce_banks[i];
  1697. if (b->init)
  1698. wrmsrl(MSR_IA32_MCx_CTL(i), 0);
  1699. }
  1700. }
  1701. static void __cpuinit mce_reenable_cpu(void *h)
  1702. {
  1703. unsigned long action = *(unsigned long *)h;
  1704. int i;
  1705. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1706. return;
  1707. if (!(action & CPU_TASKS_FROZEN))
  1708. cmci_reenable();
  1709. for (i = 0; i < banks; i++) {
  1710. struct mce_bank *b = &mce_banks[i];
  1711. if (b->init)
  1712. wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
  1713. }
  1714. }
  1715. /* Get notified when a cpu comes on/off. Be hotplug friendly. */
  1716. static int __cpuinit
  1717. mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
  1718. {
  1719. unsigned int cpu = (unsigned long)hcpu;
  1720. struct timer_list *t = &per_cpu(mce_timer, cpu);
  1721. switch (action) {
  1722. case CPU_ONLINE:
  1723. case CPU_ONLINE_FROZEN:
  1724. mce_create_device(cpu);
  1725. if (threshold_cpu_callback)
  1726. threshold_cpu_callback(action, cpu);
  1727. break;
  1728. case CPU_DEAD:
  1729. case CPU_DEAD_FROZEN:
  1730. if (threshold_cpu_callback)
  1731. threshold_cpu_callback(action, cpu);
  1732. mce_remove_device(cpu);
  1733. break;
  1734. case CPU_DOWN_PREPARE:
  1735. case CPU_DOWN_PREPARE_FROZEN:
  1736. del_timer_sync(t);
  1737. smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
  1738. break;
  1739. case CPU_DOWN_FAILED:
  1740. case CPU_DOWN_FAILED_FROZEN:
  1741. if (!mce_ignore_ce && check_interval) {
  1742. t->expires = round_jiffies(jiffies +
  1743. __get_cpu_var(mce_next_interval));
  1744. add_timer_on(t, cpu);
  1745. }
  1746. smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
  1747. break;
  1748. case CPU_POST_DEAD:
  1749. /* intentionally ignoring frozen here */
  1750. cmci_rediscover(cpu);
  1751. break;
  1752. }
  1753. return NOTIFY_OK;
  1754. }
  1755. static struct notifier_block mce_cpu_notifier __cpuinitdata = {
  1756. .notifier_call = mce_cpu_callback,
  1757. };
  1758. static __init void mce_init_banks(void)
  1759. {
  1760. int i;
  1761. for (i = 0; i < banks; i++) {
  1762. struct mce_bank *b = &mce_banks[i];
  1763. struct sysdev_attribute *a = &b->attr;
  1764. sysfs_attr_init(&a->attr);
  1765. a->attr.name = b->attrname;
  1766. snprintf(b->attrname, ATTR_LEN, "bank%d", i);
  1767. a->attr.mode = 0644;
  1768. a->show = show_bank;
  1769. a->store = set_bank;
  1770. }
  1771. }
  1772. static __init int mcheck_init_device(void)
  1773. {
  1774. int err;
  1775. int i = 0;
  1776. if (!mce_available(&boot_cpu_data))
  1777. return -EIO;
  1778. zalloc_cpumask_var(&mce_dev_initialized, GFP_KERNEL);
  1779. mce_init_banks();
  1780. err = sysdev_class_register(&mce_sysclass);
  1781. if (err)
  1782. return err;
  1783. for_each_online_cpu(i) {
  1784. err = mce_create_device(i);
  1785. if (err)
  1786. return err;
  1787. }
  1788. register_syscore_ops(&mce_syscore_ops);
  1789. register_hotcpu_notifier(&mce_cpu_notifier);
  1790. misc_register(&mce_log_device);
  1791. return err;
  1792. }
  1793. device_initcall(mcheck_init_device);
  1794. /*
  1795. * Old style boot options parsing. Only for compatibility.
  1796. */
  1797. static int __init mcheck_disable(char *str)
  1798. {
  1799. mce_disabled = 1;
  1800. return 1;
  1801. }
  1802. __setup("nomce", mcheck_disable);
  1803. #ifdef CONFIG_DEBUG_FS
  1804. struct dentry *mce_get_debugfs_dir(void)
  1805. {
  1806. static struct dentry *dmce;
  1807. if (!dmce)
  1808. dmce = debugfs_create_dir("mce", NULL);
  1809. return dmce;
  1810. }
  1811. static void mce_reset(void)
  1812. {
  1813. cpu_missing = 0;
  1814. atomic_set(&mce_fake_paniced, 0);
  1815. atomic_set(&mce_executing, 0);
  1816. atomic_set(&mce_callin, 0);
  1817. atomic_set(&global_nwo, 0);
  1818. }
  1819. static int fake_panic_get(void *data, u64 *val)
  1820. {
  1821. *val = fake_panic;
  1822. return 0;
  1823. }
  1824. static int fake_panic_set(void *data, u64 val)
  1825. {
  1826. mce_reset();
  1827. fake_panic = val;
  1828. return 0;
  1829. }
  1830. DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
  1831. fake_panic_set, "%llu\n");
  1832. static int __init mcheck_debugfs_init(void)
  1833. {
  1834. struct dentry *dmce, *ffake_panic;
  1835. dmce = mce_get_debugfs_dir();
  1836. if (!dmce)
  1837. return -ENOMEM;
  1838. ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
  1839. &fake_panic_fops);
  1840. if (!ffake_panic)
  1841. return -ENOMEM;
  1842. return 0;
  1843. }
  1844. late_initcall(mcheck_debugfs_init);
  1845. #endif