mce.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195
  1. /*
  2. * Machine check handler.
  3. *
  4. * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
  5. * Rest from unknown author(s).
  6. * 2004 Andi Kleen. Rewrote most of it.
  7. * Copyright 2008 Intel Corporation
  8. * Author: Andi Kleen
  9. */
  10. #include <linux/thread_info.h>
  11. #include <linux/capability.h>
  12. #include <linux/miscdevice.h>
  13. #include <linux/ratelimit.h>
  14. #include <linux/kallsyms.h>
  15. #include <linux/rcupdate.h>
  16. #include <linux/kobject.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/kdebug.h>
  19. #include <linux/kernel.h>
  20. #include <linux/percpu.h>
  21. #include <linux/string.h>
  22. #include <linux/sysdev.h>
  23. #include <linux/syscore_ops.h>
  24. #include <linux/delay.h>
  25. #include <linux/ctype.h>
  26. #include <linux/sched.h>
  27. #include <linux/sysfs.h>
  28. #include <linux/types.h>
  29. #include <linux/slab.h>
  30. #include <linux/init.h>
  31. #include <linux/kmod.h>
  32. #include <linux/poll.h>
  33. #include <linux/nmi.h>
  34. #include <linux/cpu.h>
  35. #include <linux/smp.h>
  36. #include <linux/fs.h>
  37. #include <linux/mm.h>
  38. #include <linux/debugfs.h>
  39. #include <linux/edac_mce.h>
  40. #include <linux/irq_work.h>
  41. #include <asm/processor.h>
  42. #include <asm/mce.h>
  43. #include <asm/msr.h>
  44. #include "mce-internal.h"
  45. static DEFINE_MUTEX(mce_chrdev_read_mutex);
  46. #define rcu_dereference_check_mce(p) \
  47. rcu_dereference_index_check((p), \
  48. rcu_read_lock_sched_held() || \
  49. lockdep_is_held(&mce_chrdev_read_mutex))
  50. #define CREATE_TRACE_POINTS
  51. #include <trace/events/mce.h>
  52. int mce_disabled __read_mostly;
  53. #define MISC_MCELOG_MINOR 227
  54. #define SPINUNIT 100 /* 100ns */
  55. atomic_t mce_entry;
  56. DEFINE_PER_CPU(unsigned, mce_exception_count);
  57. /*
  58. * Tolerant levels:
  59. * 0: always panic on uncorrected errors, log corrected errors
  60. * 1: panic or SIGBUS on uncorrected errors, log corrected errors
  61. * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
  62. * 3: never panic or SIGBUS, log all errors (for testing only)
  63. */
  64. static int tolerant __read_mostly = 1;
  65. static int banks __read_mostly;
  66. static int rip_msr __read_mostly;
  67. static int mce_bootlog __read_mostly = -1;
  68. static int monarch_timeout __read_mostly = -1;
  69. static int mce_panic_timeout __read_mostly;
  70. static int mce_dont_log_ce __read_mostly;
  71. int mce_cmci_disabled __read_mostly;
  72. int mce_ignore_ce __read_mostly;
  73. int mce_ser __read_mostly;
  74. struct mce_bank *mce_banks __read_mostly;
  75. /* User mode helper program triggered by machine check event */
  76. static unsigned long mce_need_notify;
  77. static char mce_helper[128];
  78. static char *mce_helper_argv[2] = { mce_helper, NULL };
  79. static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
  80. static DEFINE_PER_CPU(struct mce, mces_seen);
  81. static int cpu_missing;
  82. /*
  83. * CPU/chipset specific EDAC code can register a notifier call here to print
  84. * MCE errors in a human-readable form.
  85. */
  86. ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
  87. EXPORT_SYMBOL_GPL(x86_mce_decoder_chain);
  88. /* MCA banks polled by the period polling timer for corrected events */
  89. DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
  90. [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
  91. };
  92. static DEFINE_PER_CPU(struct work_struct, mce_work);
  93. /* Do initial initialization of a struct mce */
  94. void mce_setup(struct mce *m)
  95. {
  96. memset(m, 0, sizeof(struct mce));
  97. m->cpu = m->extcpu = smp_processor_id();
  98. rdtscll(m->tsc);
  99. /* We hope get_seconds stays lockless */
  100. m->time = get_seconds();
  101. m->cpuvendor = boot_cpu_data.x86_vendor;
  102. m->cpuid = cpuid_eax(1);
  103. #ifdef CONFIG_SMP
  104. m->socketid = cpu_data(m->extcpu).phys_proc_id;
  105. #endif
  106. m->apicid = cpu_data(m->extcpu).initial_apicid;
  107. rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
  108. }
  109. DEFINE_PER_CPU(struct mce, injectm);
  110. EXPORT_PER_CPU_SYMBOL_GPL(injectm);
  111. /*
  112. * Lockless MCE logging infrastructure.
  113. * This avoids deadlocks on printk locks without having to break locks. Also
  114. * separate MCEs from kernel messages to avoid bogus bug reports.
  115. */
  116. static struct mce_log mcelog = {
  117. .signature = MCE_LOG_SIGNATURE,
  118. .len = MCE_LOG_LEN,
  119. .recordlen = sizeof(struct mce),
  120. };
  121. void mce_log(struct mce *mce)
  122. {
  123. unsigned next, entry;
  124. /* Emit the trace record: */
  125. trace_mce_record(mce);
  126. mce->finished = 0;
  127. wmb();
  128. for (;;) {
  129. entry = rcu_dereference_check_mce(mcelog.next);
  130. for (;;) {
  131. /*
  132. * If edac_mce is enabled, it will check the error type
  133. * and will process it, if it is a known error.
  134. * Otherwise, the error will be sent through mcelog
  135. * interface
  136. */
  137. if (edac_mce_parse(mce))
  138. return;
  139. /*
  140. * When the buffer fills up discard new entries.
  141. * Assume that the earlier errors are the more
  142. * interesting ones:
  143. */
  144. if (entry >= MCE_LOG_LEN) {
  145. set_bit(MCE_OVERFLOW,
  146. (unsigned long *)&mcelog.flags);
  147. return;
  148. }
  149. /* Old left over entry. Skip: */
  150. if (mcelog.entry[entry].finished) {
  151. entry++;
  152. continue;
  153. }
  154. break;
  155. }
  156. smp_rmb();
  157. next = entry + 1;
  158. if (cmpxchg(&mcelog.next, entry, next) == entry)
  159. break;
  160. }
  161. memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
  162. wmb();
  163. mcelog.entry[entry].finished = 1;
  164. wmb();
  165. mce->finished = 1;
  166. set_bit(0, &mce_need_notify);
  167. }
  168. static void print_mce(struct mce *m)
  169. {
  170. int ret = 0;
  171. pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
  172. m->extcpu, m->mcgstatus, m->bank, m->status);
  173. if (m->ip) {
  174. pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
  175. !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
  176. m->cs, m->ip);
  177. if (m->cs == __KERNEL_CS)
  178. print_symbol("{%s}", m->ip);
  179. pr_cont("\n");
  180. }
  181. pr_emerg(HW_ERR "TSC %llx ", m->tsc);
  182. if (m->addr)
  183. pr_cont("ADDR %llx ", m->addr);
  184. if (m->misc)
  185. pr_cont("MISC %llx ", m->misc);
  186. pr_cont("\n");
  187. pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
  188. m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid);
  189. /*
  190. * Print out human-readable details about the MCE error,
  191. * (if the CPU has an implementation for that)
  192. */
  193. ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
  194. if (ret == NOTIFY_STOP)
  195. return;
  196. pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
  197. }
  198. #define PANIC_TIMEOUT 5 /* 5 seconds */
  199. static atomic_t mce_paniced;
  200. static int fake_panic;
  201. static atomic_t mce_fake_paniced;
  202. /* Panic in progress. Enable interrupts and wait for final IPI */
  203. static void wait_for_panic(void)
  204. {
  205. long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
  206. preempt_disable();
  207. local_irq_enable();
  208. while (timeout-- > 0)
  209. udelay(1);
  210. if (panic_timeout == 0)
  211. panic_timeout = mce_panic_timeout;
  212. panic("Panicing machine check CPU died");
  213. }
  214. static void mce_panic(char *msg, struct mce *final, char *exp)
  215. {
  216. int i, apei_err = 0;
  217. if (!fake_panic) {
  218. /*
  219. * Make sure only one CPU runs in machine check panic
  220. */
  221. if (atomic_inc_return(&mce_paniced) > 1)
  222. wait_for_panic();
  223. barrier();
  224. bust_spinlocks(1);
  225. console_verbose();
  226. } else {
  227. /* Don't log too much for fake panic */
  228. if (atomic_inc_return(&mce_fake_paniced) > 1)
  229. return;
  230. }
  231. /* First print corrected ones that are still unlogged */
  232. for (i = 0; i < MCE_LOG_LEN; i++) {
  233. struct mce *m = &mcelog.entry[i];
  234. if (!(m->status & MCI_STATUS_VAL))
  235. continue;
  236. if (!(m->status & MCI_STATUS_UC)) {
  237. print_mce(m);
  238. if (!apei_err)
  239. apei_err = apei_write_mce(m);
  240. }
  241. }
  242. /* Now print uncorrected but with the final one last */
  243. for (i = 0; i < MCE_LOG_LEN; i++) {
  244. struct mce *m = &mcelog.entry[i];
  245. if (!(m->status & MCI_STATUS_VAL))
  246. continue;
  247. if (!(m->status & MCI_STATUS_UC))
  248. continue;
  249. if (!final || memcmp(m, final, sizeof(struct mce))) {
  250. print_mce(m);
  251. if (!apei_err)
  252. apei_err = apei_write_mce(m);
  253. }
  254. }
  255. if (final) {
  256. print_mce(final);
  257. if (!apei_err)
  258. apei_err = apei_write_mce(final);
  259. }
  260. if (cpu_missing)
  261. pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
  262. if (exp)
  263. pr_emerg(HW_ERR "Machine check: %s\n", exp);
  264. if (!fake_panic) {
  265. if (panic_timeout == 0)
  266. panic_timeout = mce_panic_timeout;
  267. panic(msg);
  268. } else
  269. pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
  270. }
  271. /* Support code for software error injection */
  272. static int msr_to_offset(u32 msr)
  273. {
  274. unsigned bank = __this_cpu_read(injectm.bank);
  275. if (msr == rip_msr)
  276. return offsetof(struct mce, ip);
  277. if (msr == MSR_IA32_MCx_STATUS(bank))
  278. return offsetof(struct mce, status);
  279. if (msr == MSR_IA32_MCx_ADDR(bank))
  280. return offsetof(struct mce, addr);
  281. if (msr == MSR_IA32_MCx_MISC(bank))
  282. return offsetof(struct mce, misc);
  283. if (msr == MSR_IA32_MCG_STATUS)
  284. return offsetof(struct mce, mcgstatus);
  285. return -1;
  286. }
  287. /* MSR access wrappers used for error injection */
  288. static u64 mce_rdmsrl(u32 msr)
  289. {
  290. u64 v;
  291. if (__this_cpu_read(injectm.finished)) {
  292. int offset = msr_to_offset(msr);
  293. if (offset < 0)
  294. return 0;
  295. return *(u64 *)((char *)&__get_cpu_var(injectm) + offset);
  296. }
  297. if (rdmsrl_safe(msr, &v)) {
  298. WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
  299. /*
  300. * Return zero in case the access faulted. This should
  301. * not happen normally but can happen if the CPU does
  302. * something weird, or if the code is buggy.
  303. */
  304. v = 0;
  305. }
  306. return v;
  307. }
  308. static void mce_wrmsrl(u32 msr, u64 v)
  309. {
  310. if (__this_cpu_read(injectm.finished)) {
  311. int offset = msr_to_offset(msr);
  312. if (offset >= 0)
  313. *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v;
  314. return;
  315. }
  316. wrmsrl(msr, v);
  317. }
  318. /*
  319. * Collect all global (w.r.t. this processor) status about this machine
  320. * check into our "mce" struct so that we can use it later to assess
  321. * the severity of the problem as we read per-bank specific details.
  322. */
  323. static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
  324. {
  325. mce_setup(m);
  326. m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
  327. if (regs) {
  328. /*
  329. * Get the address of the instruction at the time of
  330. * the machine check error.
  331. */
  332. if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
  333. m->ip = regs->ip;
  334. m->cs = regs->cs;
  335. }
  336. /* Use accurate RIP reporting if available. */
  337. if (rip_msr)
  338. m->ip = mce_rdmsrl(rip_msr);
  339. }
  340. }
  341. /*
  342. * Simple lockless ring to communicate PFNs from the exception handler with the
  343. * process context work function. This is vastly simplified because there's
  344. * only a single reader and a single writer.
  345. */
  346. #define MCE_RING_SIZE 16 /* we use one entry less */
  347. struct mce_ring {
  348. unsigned short start;
  349. unsigned short end;
  350. unsigned long ring[MCE_RING_SIZE];
  351. };
  352. static DEFINE_PER_CPU(struct mce_ring, mce_ring);
  353. /* Runs with CPU affinity in workqueue */
  354. static int mce_ring_empty(void)
  355. {
  356. struct mce_ring *r = &__get_cpu_var(mce_ring);
  357. return r->start == r->end;
  358. }
  359. static int mce_ring_get(unsigned long *pfn)
  360. {
  361. struct mce_ring *r;
  362. int ret = 0;
  363. *pfn = 0;
  364. get_cpu();
  365. r = &__get_cpu_var(mce_ring);
  366. if (r->start == r->end)
  367. goto out;
  368. *pfn = r->ring[r->start];
  369. r->start = (r->start + 1) % MCE_RING_SIZE;
  370. ret = 1;
  371. out:
  372. put_cpu();
  373. return ret;
  374. }
  375. /* Always runs in MCE context with preempt off */
  376. static int mce_ring_add(unsigned long pfn)
  377. {
  378. struct mce_ring *r = &__get_cpu_var(mce_ring);
  379. unsigned next;
  380. next = (r->end + 1) % MCE_RING_SIZE;
  381. if (next == r->start)
  382. return -1;
  383. r->ring[r->end] = pfn;
  384. wmb();
  385. r->end = next;
  386. return 0;
  387. }
  388. int mce_available(struct cpuinfo_x86 *c)
  389. {
  390. if (mce_disabled)
  391. return 0;
  392. return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
  393. }
  394. static void mce_schedule_work(void)
  395. {
  396. if (!mce_ring_empty()) {
  397. struct work_struct *work = &__get_cpu_var(mce_work);
  398. if (!work_pending(work))
  399. schedule_work(work);
  400. }
  401. }
  402. DEFINE_PER_CPU(struct irq_work, mce_irq_work);
  403. static void mce_irq_work_cb(struct irq_work *entry)
  404. {
  405. mce_notify_irq();
  406. mce_schedule_work();
  407. }
  408. static void mce_report_event(struct pt_regs *regs)
  409. {
  410. if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
  411. mce_notify_irq();
  412. /*
  413. * Triggering the work queue here is just an insurance
  414. * policy in case the syscall exit notify handler
  415. * doesn't run soon enough or ends up running on the
  416. * wrong CPU (can happen when audit sleeps)
  417. */
  418. mce_schedule_work();
  419. return;
  420. }
  421. irq_work_queue(&__get_cpu_var(mce_irq_work));
  422. }
  423. DEFINE_PER_CPU(unsigned, mce_poll_count);
  424. /*
  425. * Poll for corrected events or events that happened before reset.
  426. * Those are just logged through /dev/mcelog.
  427. *
  428. * This is executed in standard interrupt context.
  429. *
  430. * Note: spec recommends to panic for fatal unsignalled
  431. * errors here. However this would be quite problematic --
  432. * we would need to reimplement the Monarch handling and
  433. * it would mess up the exclusion between exception handler
  434. * and poll hander -- * so we skip this for now.
  435. * These cases should not happen anyways, or only when the CPU
  436. * is already totally * confused. In this case it's likely it will
  437. * not fully execute the machine check handler either.
  438. */
  439. void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
  440. {
  441. struct mce m;
  442. int i;
  443. percpu_inc(mce_poll_count);
  444. mce_gather_info(&m, NULL);
  445. for (i = 0; i < banks; i++) {
  446. if (!mce_banks[i].ctl || !test_bit(i, *b))
  447. continue;
  448. m.misc = 0;
  449. m.addr = 0;
  450. m.bank = i;
  451. m.tsc = 0;
  452. barrier();
  453. m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
  454. if (!(m.status & MCI_STATUS_VAL))
  455. continue;
  456. /*
  457. * Uncorrected or signalled events are handled by the exception
  458. * handler when it is enabled, so don't process those here.
  459. *
  460. * TBD do the same check for MCI_STATUS_EN here?
  461. */
  462. if (!(flags & MCP_UC) &&
  463. (m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)))
  464. continue;
  465. if (m.status & MCI_STATUS_MISCV)
  466. m.misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
  467. if (m.status & MCI_STATUS_ADDRV)
  468. m.addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
  469. if (!(flags & MCP_TIMESTAMP))
  470. m.tsc = 0;
  471. /*
  472. * Don't get the IP here because it's unlikely to
  473. * have anything to do with the actual error location.
  474. */
  475. if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce) {
  476. mce_log(&m);
  477. atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, &m);
  478. }
  479. /*
  480. * Clear state for this bank.
  481. */
  482. mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
  483. }
  484. /*
  485. * Don't clear MCG_STATUS here because it's only defined for
  486. * exceptions.
  487. */
  488. sync_core();
  489. }
  490. EXPORT_SYMBOL_GPL(machine_check_poll);
  491. /*
  492. * Do a quick check if any of the events requires a panic.
  493. * This decides if we keep the events around or clear them.
  494. */
  495. static int mce_no_way_out(struct mce *m, char **msg)
  496. {
  497. int i;
  498. for (i = 0; i < banks; i++) {
  499. m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
  500. if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
  501. return 1;
  502. }
  503. return 0;
  504. }
  505. /*
  506. * Variable to establish order between CPUs while scanning.
  507. * Each CPU spins initially until executing is equal its number.
  508. */
  509. static atomic_t mce_executing;
  510. /*
  511. * Defines order of CPUs on entry. First CPU becomes Monarch.
  512. */
  513. static atomic_t mce_callin;
  514. /*
  515. * Check if a timeout waiting for other CPUs happened.
  516. */
  517. static int mce_timed_out(u64 *t)
  518. {
  519. /*
  520. * The others already did panic for some reason.
  521. * Bail out like in a timeout.
  522. * rmb() to tell the compiler that system_state
  523. * might have been modified by someone else.
  524. */
  525. rmb();
  526. if (atomic_read(&mce_paniced))
  527. wait_for_panic();
  528. if (!monarch_timeout)
  529. goto out;
  530. if ((s64)*t < SPINUNIT) {
  531. /* CHECKME: Make panic default for 1 too? */
  532. if (tolerant < 1)
  533. mce_panic("Timeout synchronizing machine check over CPUs",
  534. NULL, NULL);
  535. cpu_missing = 1;
  536. return 1;
  537. }
  538. *t -= SPINUNIT;
  539. out:
  540. touch_nmi_watchdog();
  541. return 0;
  542. }
  543. /*
  544. * The Monarch's reign. The Monarch is the CPU who entered
  545. * the machine check handler first. It waits for the others to
  546. * raise the exception too and then grades them. When any
  547. * error is fatal panic. Only then let the others continue.
  548. *
  549. * The other CPUs entering the MCE handler will be controlled by the
  550. * Monarch. They are called Subjects.
  551. *
  552. * This way we prevent any potential data corruption in a unrecoverable case
  553. * and also makes sure always all CPU's errors are examined.
  554. *
  555. * Also this detects the case of a machine check event coming from outer
  556. * space (not detected by any CPUs) In this case some external agent wants
  557. * us to shut down, so panic too.
  558. *
  559. * The other CPUs might still decide to panic if the handler happens
  560. * in a unrecoverable place, but in this case the system is in a semi-stable
  561. * state and won't corrupt anything by itself. It's ok to let the others
  562. * continue for a bit first.
  563. *
  564. * All the spin loops have timeouts; when a timeout happens a CPU
  565. * typically elects itself to be Monarch.
  566. */
  567. static void mce_reign(void)
  568. {
  569. int cpu;
  570. struct mce *m = NULL;
  571. int global_worst = 0;
  572. char *msg = NULL;
  573. char *nmsg = NULL;
  574. /*
  575. * This CPU is the Monarch and the other CPUs have run
  576. * through their handlers.
  577. * Grade the severity of the errors of all the CPUs.
  578. */
  579. for_each_possible_cpu(cpu) {
  580. int severity = mce_severity(&per_cpu(mces_seen, cpu), tolerant,
  581. &nmsg);
  582. if (severity > global_worst) {
  583. msg = nmsg;
  584. global_worst = severity;
  585. m = &per_cpu(mces_seen, cpu);
  586. }
  587. }
  588. /*
  589. * Cannot recover? Panic here then.
  590. * This dumps all the mces in the log buffer and stops the
  591. * other CPUs.
  592. */
  593. if (m && global_worst >= MCE_PANIC_SEVERITY && tolerant < 3)
  594. mce_panic("Fatal Machine check", m, msg);
  595. /*
  596. * For UC somewhere we let the CPU who detects it handle it.
  597. * Also must let continue the others, otherwise the handling
  598. * CPU could deadlock on a lock.
  599. */
  600. /*
  601. * No machine check event found. Must be some external
  602. * source or one CPU is hung. Panic.
  603. */
  604. if (global_worst <= MCE_KEEP_SEVERITY && tolerant < 3)
  605. mce_panic("Machine check from unknown source", NULL, NULL);
  606. /*
  607. * Now clear all the mces_seen so that they don't reappear on
  608. * the next mce.
  609. */
  610. for_each_possible_cpu(cpu)
  611. memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
  612. }
  613. static atomic_t global_nwo;
  614. /*
  615. * Start of Monarch synchronization. This waits until all CPUs have
  616. * entered the exception handler and then determines if any of them
  617. * saw a fatal event that requires panic. Then it executes them
  618. * in the entry order.
  619. * TBD double check parallel CPU hotunplug
  620. */
  621. static int mce_start(int *no_way_out)
  622. {
  623. int order;
  624. int cpus = num_online_cpus();
  625. u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
  626. if (!timeout)
  627. return -1;
  628. atomic_add(*no_way_out, &global_nwo);
  629. /*
  630. * global_nwo should be updated before mce_callin
  631. */
  632. smp_wmb();
  633. order = atomic_inc_return(&mce_callin);
  634. /*
  635. * Wait for everyone.
  636. */
  637. while (atomic_read(&mce_callin) != cpus) {
  638. if (mce_timed_out(&timeout)) {
  639. atomic_set(&global_nwo, 0);
  640. return -1;
  641. }
  642. ndelay(SPINUNIT);
  643. }
  644. /*
  645. * mce_callin should be read before global_nwo
  646. */
  647. smp_rmb();
  648. if (order == 1) {
  649. /*
  650. * Monarch: Starts executing now, the others wait.
  651. */
  652. atomic_set(&mce_executing, 1);
  653. } else {
  654. /*
  655. * Subject: Now start the scanning loop one by one in
  656. * the original callin order.
  657. * This way when there are any shared banks it will be
  658. * only seen by one CPU before cleared, avoiding duplicates.
  659. */
  660. while (atomic_read(&mce_executing) < order) {
  661. if (mce_timed_out(&timeout)) {
  662. atomic_set(&global_nwo, 0);
  663. return -1;
  664. }
  665. ndelay(SPINUNIT);
  666. }
  667. }
  668. /*
  669. * Cache the global no_way_out state.
  670. */
  671. *no_way_out = atomic_read(&global_nwo);
  672. return order;
  673. }
  674. /*
  675. * Synchronize between CPUs after main scanning loop.
  676. * This invokes the bulk of the Monarch processing.
  677. */
  678. static int mce_end(int order)
  679. {
  680. int ret = -1;
  681. u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
  682. if (!timeout)
  683. goto reset;
  684. if (order < 0)
  685. goto reset;
  686. /*
  687. * Allow others to run.
  688. */
  689. atomic_inc(&mce_executing);
  690. if (order == 1) {
  691. /* CHECKME: Can this race with a parallel hotplug? */
  692. int cpus = num_online_cpus();
  693. /*
  694. * Monarch: Wait for everyone to go through their scanning
  695. * loops.
  696. */
  697. while (atomic_read(&mce_executing) <= cpus) {
  698. if (mce_timed_out(&timeout))
  699. goto reset;
  700. ndelay(SPINUNIT);
  701. }
  702. mce_reign();
  703. barrier();
  704. ret = 0;
  705. } else {
  706. /*
  707. * Subject: Wait for Monarch to finish.
  708. */
  709. while (atomic_read(&mce_executing) != 0) {
  710. if (mce_timed_out(&timeout))
  711. goto reset;
  712. ndelay(SPINUNIT);
  713. }
  714. /*
  715. * Don't reset anything. That's done by the Monarch.
  716. */
  717. return 0;
  718. }
  719. /*
  720. * Reset all global state.
  721. */
  722. reset:
  723. atomic_set(&global_nwo, 0);
  724. atomic_set(&mce_callin, 0);
  725. barrier();
  726. /*
  727. * Let others run again.
  728. */
  729. atomic_set(&mce_executing, 0);
  730. return ret;
  731. }
  732. /*
  733. * Check if the address reported by the CPU is in a format we can parse.
  734. * It would be possible to add code for most other cases, but all would
  735. * be somewhat complicated (e.g. segment offset would require an instruction
  736. * parser). So only support physical addresses up to page granuality for now.
  737. */
  738. static int mce_usable_address(struct mce *m)
  739. {
  740. if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
  741. return 0;
  742. if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
  743. return 0;
  744. if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
  745. return 0;
  746. return 1;
  747. }
  748. static void mce_clear_state(unsigned long *toclear)
  749. {
  750. int i;
  751. for (i = 0; i < banks; i++) {
  752. if (test_bit(i, toclear))
  753. mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
  754. }
  755. }
  756. /*
  757. * The actual machine check handler. This only handles real
  758. * exceptions when something got corrupted coming in through int 18.
  759. *
  760. * This is executed in NMI context not subject to normal locking rules. This
  761. * implies that most kernel services cannot be safely used. Don't even
  762. * think about putting a printk in there!
  763. *
  764. * On Intel systems this is entered on all CPUs in parallel through
  765. * MCE broadcast. However some CPUs might be broken beyond repair,
  766. * so be always careful when synchronizing with others.
  767. */
  768. void do_machine_check(struct pt_regs *regs, long error_code)
  769. {
  770. struct mce m, *final;
  771. int i;
  772. int worst = 0;
  773. int severity;
  774. /*
  775. * Establish sequential order between the CPUs entering the machine
  776. * check handler.
  777. */
  778. int order;
  779. /*
  780. * If no_way_out gets set, there is no safe way to recover from this
  781. * MCE. If tolerant is cranked up, we'll try anyway.
  782. */
  783. int no_way_out = 0;
  784. /*
  785. * If kill_it gets set, there might be a way to recover from this
  786. * error.
  787. */
  788. int kill_it = 0;
  789. DECLARE_BITMAP(toclear, MAX_NR_BANKS);
  790. char *msg = "Unknown";
  791. atomic_inc(&mce_entry);
  792. percpu_inc(mce_exception_count);
  793. if (notify_die(DIE_NMI, "machine check", regs, error_code,
  794. 18, SIGKILL) == NOTIFY_STOP)
  795. goto out;
  796. if (!banks)
  797. goto out;
  798. mce_gather_info(&m, regs);
  799. final = &__get_cpu_var(mces_seen);
  800. *final = m;
  801. no_way_out = mce_no_way_out(&m, &msg);
  802. barrier();
  803. /*
  804. * When no restart IP must always kill or panic.
  805. */
  806. if (!(m.mcgstatus & MCG_STATUS_RIPV))
  807. kill_it = 1;
  808. /*
  809. * Go through all the banks in exclusion of the other CPUs.
  810. * This way we don't report duplicated events on shared banks
  811. * because the first one to see it will clear it.
  812. */
  813. order = mce_start(&no_way_out);
  814. for (i = 0; i < banks; i++) {
  815. __clear_bit(i, toclear);
  816. if (!mce_banks[i].ctl)
  817. continue;
  818. m.misc = 0;
  819. m.addr = 0;
  820. m.bank = i;
  821. m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
  822. if ((m.status & MCI_STATUS_VAL) == 0)
  823. continue;
  824. /*
  825. * Non uncorrected or non signaled errors are handled by
  826. * machine_check_poll. Leave them alone, unless this panics.
  827. */
  828. if (!(m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
  829. !no_way_out)
  830. continue;
  831. /*
  832. * Set taint even when machine check was not enabled.
  833. */
  834. add_taint(TAINT_MACHINE_CHECK);
  835. severity = mce_severity(&m, tolerant, NULL);
  836. /*
  837. * When machine check was for corrected handler don't touch,
  838. * unless we're panicing.
  839. */
  840. if (severity == MCE_KEEP_SEVERITY && !no_way_out)
  841. continue;
  842. __set_bit(i, toclear);
  843. if (severity == MCE_NO_SEVERITY) {
  844. /*
  845. * Machine check event was not enabled. Clear, but
  846. * ignore.
  847. */
  848. continue;
  849. }
  850. /*
  851. * Kill on action required.
  852. */
  853. if (severity == MCE_AR_SEVERITY)
  854. kill_it = 1;
  855. if (m.status & MCI_STATUS_MISCV)
  856. m.misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
  857. if (m.status & MCI_STATUS_ADDRV)
  858. m.addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
  859. /*
  860. * Action optional error. Queue address for later processing.
  861. * When the ring overflows we just ignore the AO error.
  862. * RED-PEN add some logging mechanism when
  863. * usable_address or mce_add_ring fails.
  864. * RED-PEN don't ignore overflow for tolerant == 0
  865. */
  866. if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
  867. mce_ring_add(m.addr >> PAGE_SHIFT);
  868. mce_log(&m);
  869. if (severity > worst) {
  870. *final = m;
  871. worst = severity;
  872. }
  873. }
  874. if (!no_way_out)
  875. mce_clear_state(toclear);
  876. /*
  877. * Do most of the synchronization with other CPUs.
  878. * When there's any problem use only local no_way_out state.
  879. */
  880. if (mce_end(order) < 0)
  881. no_way_out = worst >= MCE_PANIC_SEVERITY;
  882. /*
  883. * If we have decided that we just CAN'T continue, and the user
  884. * has not set tolerant to an insane level, give up and die.
  885. *
  886. * This is mainly used in the case when the system doesn't
  887. * support MCE broadcasting or it has been disabled.
  888. */
  889. if (no_way_out && tolerant < 3)
  890. mce_panic("Fatal machine check on current CPU", final, msg);
  891. /*
  892. * If the error seems to be unrecoverable, something should be
  893. * done. Try to kill as little as possible. If we can kill just
  894. * one task, do that. If the user has set the tolerance very
  895. * high, don't try to do anything at all.
  896. */
  897. if (kill_it && tolerant < 3)
  898. force_sig(SIGBUS, current);
  899. /* notify userspace ASAP */
  900. set_thread_flag(TIF_MCE_NOTIFY);
  901. if (worst > 0)
  902. mce_report_event(regs);
  903. mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
  904. out:
  905. atomic_dec(&mce_entry);
  906. sync_core();
  907. }
  908. EXPORT_SYMBOL_GPL(do_machine_check);
  909. /* dummy to break dependency. actual code is in mm/memory-failure.c */
  910. void __attribute__((weak)) memory_failure(unsigned long pfn, int vector)
  911. {
  912. printk(KERN_ERR "Action optional memory failure at %lx ignored\n", pfn);
  913. }
  914. /*
  915. * Called after mce notification in process context. This code
  916. * is allowed to sleep. Call the high level VM handler to process
  917. * any corrupted pages.
  918. * Assume that the work queue code only calls this one at a time
  919. * per CPU.
  920. * Note we don't disable preemption, so this code might run on the wrong
  921. * CPU. In this case the event is picked up by the scheduled work queue.
  922. * This is merely a fast path to expedite processing in some common
  923. * cases.
  924. */
  925. void mce_notify_process(void)
  926. {
  927. unsigned long pfn;
  928. mce_notify_irq();
  929. while (mce_ring_get(&pfn))
  930. memory_failure(pfn, MCE_VECTOR);
  931. }
  932. static void mce_process_work(struct work_struct *dummy)
  933. {
  934. mce_notify_process();
  935. }
  936. #ifdef CONFIG_X86_MCE_INTEL
  937. /***
  938. * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
  939. * @cpu: The CPU on which the event occurred.
  940. * @status: Event status information
  941. *
  942. * This function should be called by the thermal interrupt after the
  943. * event has been processed and the decision was made to log the event
  944. * further.
  945. *
  946. * The status parameter will be saved to the 'status' field of 'struct mce'
  947. * and historically has been the register value of the
  948. * MSR_IA32_THERMAL_STATUS (Intel) msr.
  949. */
  950. void mce_log_therm_throt_event(__u64 status)
  951. {
  952. struct mce m;
  953. mce_setup(&m);
  954. m.bank = MCE_THERMAL_BANK;
  955. m.status = status;
  956. mce_log(&m);
  957. }
  958. #endif /* CONFIG_X86_MCE_INTEL */
  959. /*
  960. * Periodic polling timer for "silent" machine check errors. If the
  961. * poller finds an MCE, poll 2x faster. When the poller finds no more
  962. * errors, poll 2x slower (up to check_interval seconds).
  963. */
  964. static int check_interval = 5 * 60; /* 5 minutes */
  965. static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
  966. static DEFINE_PER_CPU(struct timer_list, mce_timer);
  967. static void mce_start_timer(unsigned long data)
  968. {
  969. struct timer_list *t = &per_cpu(mce_timer, data);
  970. int *n;
  971. WARN_ON(smp_processor_id() != data);
  972. if (mce_available(__this_cpu_ptr(&cpu_info))) {
  973. machine_check_poll(MCP_TIMESTAMP,
  974. &__get_cpu_var(mce_poll_banks));
  975. }
  976. /*
  977. * Alert userspace if needed. If we logged an MCE, reduce the
  978. * polling interval, otherwise increase the polling interval.
  979. */
  980. n = &__get_cpu_var(mce_next_interval);
  981. if (mce_notify_irq())
  982. *n = max(*n/2, HZ/100);
  983. else
  984. *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
  985. t->expires = jiffies + *n;
  986. add_timer_on(t, smp_processor_id());
  987. }
  988. /* Must not be called in IRQ context where del_timer_sync() can deadlock */
  989. static void mce_timer_delete_all(void)
  990. {
  991. int cpu;
  992. for_each_online_cpu(cpu)
  993. del_timer_sync(&per_cpu(mce_timer, cpu));
  994. }
  995. static void mce_do_trigger(struct work_struct *work)
  996. {
  997. call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
  998. }
  999. static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
  1000. /*
  1001. * Notify the user(s) about new machine check events.
  1002. * Can be called from interrupt context, but not from machine check/NMI
  1003. * context.
  1004. */
  1005. int mce_notify_irq(void)
  1006. {
  1007. /* Not more than two messages every minute */
  1008. static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
  1009. clear_thread_flag(TIF_MCE_NOTIFY);
  1010. if (test_and_clear_bit(0, &mce_need_notify)) {
  1011. /* wake processes polling /dev/mcelog */
  1012. wake_up_interruptible(&mce_chrdev_wait);
  1013. /*
  1014. * There is no risk of missing notifications because
  1015. * work_pending is always cleared before the function is
  1016. * executed.
  1017. */
  1018. if (mce_helper[0] && !work_pending(&mce_trigger_work))
  1019. schedule_work(&mce_trigger_work);
  1020. if (__ratelimit(&ratelimit))
  1021. pr_info(HW_ERR "Machine check events logged\n");
  1022. return 1;
  1023. }
  1024. return 0;
  1025. }
  1026. EXPORT_SYMBOL_GPL(mce_notify_irq);
  1027. static int __cpuinit __mcheck_cpu_mce_banks_init(void)
  1028. {
  1029. int i;
  1030. mce_banks = kzalloc(banks * sizeof(struct mce_bank), GFP_KERNEL);
  1031. if (!mce_banks)
  1032. return -ENOMEM;
  1033. for (i = 0; i < banks; i++) {
  1034. struct mce_bank *b = &mce_banks[i];
  1035. b->ctl = -1ULL;
  1036. b->init = 1;
  1037. }
  1038. return 0;
  1039. }
  1040. /*
  1041. * Initialize Machine Checks for a CPU.
  1042. */
  1043. static int __cpuinit __mcheck_cpu_cap_init(void)
  1044. {
  1045. unsigned b;
  1046. u64 cap;
  1047. rdmsrl(MSR_IA32_MCG_CAP, cap);
  1048. b = cap & MCG_BANKCNT_MASK;
  1049. if (!banks)
  1050. printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b);
  1051. if (b > MAX_NR_BANKS) {
  1052. printk(KERN_WARNING
  1053. "MCE: Using only %u machine check banks out of %u\n",
  1054. MAX_NR_BANKS, b);
  1055. b = MAX_NR_BANKS;
  1056. }
  1057. /* Don't support asymmetric configurations today */
  1058. WARN_ON(banks != 0 && b != banks);
  1059. banks = b;
  1060. if (!mce_banks) {
  1061. int err = __mcheck_cpu_mce_banks_init();
  1062. if (err)
  1063. return err;
  1064. }
  1065. /* Use accurate RIP reporting if available. */
  1066. if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
  1067. rip_msr = MSR_IA32_MCG_EIP;
  1068. if (cap & MCG_SER_P)
  1069. mce_ser = 1;
  1070. return 0;
  1071. }
  1072. static void __mcheck_cpu_init_generic(void)
  1073. {
  1074. mce_banks_t all_banks;
  1075. u64 cap;
  1076. int i;
  1077. /*
  1078. * Log the machine checks left over from the previous reset.
  1079. */
  1080. bitmap_fill(all_banks, MAX_NR_BANKS);
  1081. machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks);
  1082. set_in_cr4(X86_CR4_MCE);
  1083. rdmsrl(MSR_IA32_MCG_CAP, cap);
  1084. if (cap & MCG_CTL_P)
  1085. wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
  1086. for (i = 0; i < banks; i++) {
  1087. struct mce_bank *b = &mce_banks[i];
  1088. if (!b->init)
  1089. continue;
  1090. wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
  1091. wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
  1092. }
  1093. }
  1094. /* Add per CPU specific workarounds here */
  1095. static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
  1096. {
  1097. if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
  1098. pr_info("MCE: unknown CPU type - not enabling MCE support.\n");
  1099. return -EOPNOTSUPP;
  1100. }
  1101. /* This should be disabled by the BIOS, but isn't always */
  1102. if (c->x86_vendor == X86_VENDOR_AMD) {
  1103. if (c->x86 == 15 && banks > 4) {
  1104. /*
  1105. * disable GART TBL walk error reporting, which
  1106. * trips off incorrectly with the IOMMU & 3ware
  1107. * & Cerberus:
  1108. */
  1109. clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
  1110. }
  1111. if (c->x86 <= 17 && mce_bootlog < 0) {
  1112. /*
  1113. * Lots of broken BIOS around that don't clear them
  1114. * by default and leave crap in there. Don't log:
  1115. */
  1116. mce_bootlog = 0;
  1117. }
  1118. /*
  1119. * Various K7s with broken bank 0 around. Always disable
  1120. * by default.
  1121. */
  1122. if (c->x86 == 6 && banks > 0)
  1123. mce_banks[0].ctl = 0;
  1124. }
  1125. if (c->x86_vendor == X86_VENDOR_INTEL) {
  1126. /*
  1127. * SDM documents that on family 6 bank 0 should not be written
  1128. * because it aliases to another special BIOS controlled
  1129. * register.
  1130. * But it's not aliased anymore on model 0x1a+
  1131. * Don't ignore bank 0 completely because there could be a
  1132. * valid event later, merely don't write CTL0.
  1133. */
  1134. if (c->x86 == 6 && c->x86_model < 0x1A && banks > 0)
  1135. mce_banks[0].init = 0;
  1136. /*
  1137. * All newer Intel systems support MCE broadcasting. Enable
  1138. * synchronization with a one second timeout.
  1139. */
  1140. if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
  1141. monarch_timeout < 0)
  1142. monarch_timeout = USEC_PER_SEC;
  1143. /*
  1144. * There are also broken BIOSes on some Pentium M and
  1145. * earlier systems:
  1146. */
  1147. if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0)
  1148. mce_bootlog = 0;
  1149. }
  1150. if (monarch_timeout < 0)
  1151. monarch_timeout = 0;
  1152. if (mce_bootlog != 0)
  1153. mce_panic_timeout = 30;
  1154. return 0;
  1155. }
  1156. static int __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
  1157. {
  1158. if (c->x86 != 5)
  1159. return 0;
  1160. switch (c->x86_vendor) {
  1161. case X86_VENDOR_INTEL:
  1162. intel_p5_mcheck_init(c);
  1163. return 1;
  1164. break;
  1165. case X86_VENDOR_CENTAUR:
  1166. winchip_mcheck_init(c);
  1167. return 1;
  1168. break;
  1169. }
  1170. return 0;
  1171. }
  1172. static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
  1173. {
  1174. switch (c->x86_vendor) {
  1175. case X86_VENDOR_INTEL:
  1176. mce_intel_feature_init(c);
  1177. break;
  1178. case X86_VENDOR_AMD:
  1179. mce_amd_feature_init(c);
  1180. break;
  1181. default:
  1182. break;
  1183. }
  1184. }
  1185. static void __mcheck_cpu_init_timer(void)
  1186. {
  1187. struct timer_list *t = &__get_cpu_var(mce_timer);
  1188. int *n = &__get_cpu_var(mce_next_interval);
  1189. setup_timer(t, mce_start_timer, smp_processor_id());
  1190. if (mce_ignore_ce)
  1191. return;
  1192. *n = check_interval * HZ;
  1193. if (!*n)
  1194. return;
  1195. t->expires = round_jiffies(jiffies + *n);
  1196. add_timer_on(t, smp_processor_id());
  1197. }
  1198. /* Handle unconfigured int18 (should never happen) */
  1199. static void unexpected_machine_check(struct pt_regs *regs, long error_code)
  1200. {
  1201. printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n",
  1202. smp_processor_id());
  1203. }
  1204. /* Call the installed machine check handler for this CPU setup. */
  1205. void (*machine_check_vector)(struct pt_regs *, long error_code) =
  1206. unexpected_machine_check;
  1207. /*
  1208. * Called for each booted CPU to set up machine checks.
  1209. * Must be called with preempt off:
  1210. */
  1211. void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
  1212. {
  1213. if (mce_disabled)
  1214. return;
  1215. if (__mcheck_cpu_ancient_init(c))
  1216. return;
  1217. if (!mce_available(c))
  1218. return;
  1219. if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
  1220. mce_disabled = 1;
  1221. return;
  1222. }
  1223. machine_check_vector = do_machine_check;
  1224. __mcheck_cpu_init_generic();
  1225. __mcheck_cpu_init_vendor(c);
  1226. __mcheck_cpu_init_timer();
  1227. INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
  1228. init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb);
  1229. }
  1230. /*
  1231. * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log.
  1232. */
  1233. static DEFINE_SPINLOCK(mce_chrdev_state_lock);
  1234. static int mce_chrdev_open_count; /* #times opened */
  1235. static int mce_chrdev_open_exclu; /* already open exclusive? */
  1236. static int mce_chrdev_open(struct inode *inode, struct file *file)
  1237. {
  1238. spin_lock(&mce_chrdev_state_lock);
  1239. if (mce_chrdev_open_exclu ||
  1240. (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
  1241. spin_unlock(&mce_chrdev_state_lock);
  1242. return -EBUSY;
  1243. }
  1244. if (file->f_flags & O_EXCL)
  1245. mce_chrdev_open_exclu = 1;
  1246. mce_chrdev_open_count++;
  1247. spin_unlock(&mce_chrdev_state_lock);
  1248. return nonseekable_open(inode, file);
  1249. }
  1250. static int mce_chrdev_release(struct inode *inode, struct file *file)
  1251. {
  1252. spin_lock(&mce_chrdev_state_lock);
  1253. mce_chrdev_open_count--;
  1254. mce_chrdev_open_exclu = 0;
  1255. spin_unlock(&mce_chrdev_state_lock);
  1256. return 0;
  1257. }
  1258. static void collect_tscs(void *data)
  1259. {
  1260. unsigned long *cpu_tsc = (unsigned long *)data;
  1261. rdtscll(cpu_tsc[smp_processor_id()]);
  1262. }
  1263. static int mce_apei_read_done;
  1264. /* Collect MCE record of previous boot in persistent storage via APEI ERST. */
  1265. static int __mce_read_apei(char __user **ubuf, size_t usize)
  1266. {
  1267. int rc;
  1268. u64 record_id;
  1269. struct mce m;
  1270. if (usize < sizeof(struct mce))
  1271. return -EINVAL;
  1272. rc = apei_read_mce(&m, &record_id);
  1273. /* Error or no more MCE record */
  1274. if (rc <= 0) {
  1275. mce_apei_read_done = 1;
  1276. return rc;
  1277. }
  1278. rc = -EFAULT;
  1279. if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
  1280. return rc;
  1281. /*
  1282. * In fact, we should have cleared the record after that has
  1283. * been flushed to the disk or sent to network in
  1284. * /sbin/mcelog, but we have no interface to support that now,
  1285. * so just clear it to avoid duplication.
  1286. */
  1287. rc = apei_clear_mce(record_id);
  1288. if (rc) {
  1289. mce_apei_read_done = 1;
  1290. return rc;
  1291. }
  1292. *ubuf += sizeof(struct mce);
  1293. return 0;
  1294. }
  1295. static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
  1296. size_t usize, loff_t *off)
  1297. {
  1298. char __user *buf = ubuf;
  1299. unsigned long *cpu_tsc;
  1300. unsigned prev, next;
  1301. int i, err;
  1302. cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
  1303. if (!cpu_tsc)
  1304. return -ENOMEM;
  1305. mutex_lock(&mce_chrdev_read_mutex);
  1306. if (!mce_apei_read_done) {
  1307. err = __mce_read_apei(&buf, usize);
  1308. if (err || buf != ubuf)
  1309. goto out;
  1310. }
  1311. next = rcu_dereference_check_mce(mcelog.next);
  1312. /* Only supports full reads right now */
  1313. err = -EINVAL;
  1314. if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
  1315. goto out;
  1316. err = 0;
  1317. prev = 0;
  1318. do {
  1319. for (i = prev; i < next; i++) {
  1320. unsigned long start = jiffies;
  1321. struct mce *m = &mcelog.entry[i];
  1322. while (!m->finished) {
  1323. if (time_after_eq(jiffies, start + 2)) {
  1324. memset(m, 0, sizeof(*m));
  1325. goto timeout;
  1326. }
  1327. cpu_relax();
  1328. }
  1329. smp_rmb();
  1330. err |= copy_to_user(buf, m, sizeof(*m));
  1331. buf += sizeof(*m);
  1332. timeout:
  1333. ;
  1334. }
  1335. memset(mcelog.entry + prev, 0,
  1336. (next - prev) * sizeof(struct mce));
  1337. prev = next;
  1338. next = cmpxchg(&mcelog.next, prev, 0);
  1339. } while (next != prev);
  1340. synchronize_sched();
  1341. /*
  1342. * Collect entries that were still getting written before the
  1343. * synchronize.
  1344. */
  1345. on_each_cpu(collect_tscs, cpu_tsc, 1);
  1346. for (i = next; i < MCE_LOG_LEN; i++) {
  1347. struct mce *m = &mcelog.entry[i];
  1348. if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
  1349. err |= copy_to_user(buf, m, sizeof(*m));
  1350. smp_rmb();
  1351. buf += sizeof(*m);
  1352. memset(m, 0, sizeof(*m));
  1353. }
  1354. }
  1355. if (err)
  1356. err = -EFAULT;
  1357. out:
  1358. mutex_unlock(&mce_chrdev_read_mutex);
  1359. kfree(cpu_tsc);
  1360. return err ? err : buf - ubuf;
  1361. }
  1362. static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
  1363. {
  1364. poll_wait(file, &mce_chrdev_wait, wait);
  1365. if (rcu_access_index(mcelog.next))
  1366. return POLLIN | POLLRDNORM;
  1367. if (!mce_apei_read_done && apei_check_mce())
  1368. return POLLIN | POLLRDNORM;
  1369. return 0;
  1370. }
  1371. static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
  1372. unsigned long arg)
  1373. {
  1374. int __user *p = (int __user *)arg;
  1375. if (!capable(CAP_SYS_ADMIN))
  1376. return -EPERM;
  1377. switch (cmd) {
  1378. case MCE_GET_RECORD_LEN:
  1379. return put_user(sizeof(struct mce), p);
  1380. case MCE_GET_LOG_LEN:
  1381. return put_user(MCE_LOG_LEN, p);
  1382. case MCE_GETCLEAR_FLAGS: {
  1383. unsigned flags;
  1384. do {
  1385. flags = mcelog.flags;
  1386. } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
  1387. return put_user(flags, p);
  1388. }
  1389. default:
  1390. return -ENOTTY;
  1391. }
  1392. }
  1393. /* Modified in mce-inject.c, so not static or const */
  1394. struct file_operations mce_chrdev_ops = {
  1395. .open = mce_chrdev_open,
  1396. .release = mce_chrdev_release,
  1397. .read = mce_chrdev_read,
  1398. .poll = mce_chrdev_poll,
  1399. .unlocked_ioctl = mce_chrdev_ioctl,
  1400. .llseek = no_llseek,
  1401. };
  1402. EXPORT_SYMBOL_GPL(mce_chrdev_ops);
  1403. static struct miscdevice mce_chrdev_device = {
  1404. MISC_MCELOG_MINOR,
  1405. "mcelog",
  1406. &mce_chrdev_ops,
  1407. };
  1408. /*
  1409. * mce=off Disables machine check
  1410. * mce=no_cmci Disables CMCI
  1411. * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
  1412. * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
  1413. * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
  1414. * monarchtimeout is how long to wait for other CPUs on machine
  1415. * check, or 0 to not wait
  1416. * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
  1417. * mce=nobootlog Don't log MCEs from before booting.
  1418. */
  1419. static int __init mcheck_enable(char *str)
  1420. {
  1421. if (*str == 0) {
  1422. enable_p5_mce();
  1423. return 1;
  1424. }
  1425. if (*str == '=')
  1426. str++;
  1427. if (!strcmp(str, "off"))
  1428. mce_disabled = 1;
  1429. else if (!strcmp(str, "no_cmci"))
  1430. mce_cmci_disabled = 1;
  1431. else if (!strcmp(str, "dont_log_ce"))
  1432. mce_dont_log_ce = 1;
  1433. else if (!strcmp(str, "ignore_ce"))
  1434. mce_ignore_ce = 1;
  1435. else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
  1436. mce_bootlog = (str[0] == 'b');
  1437. else if (isdigit(str[0])) {
  1438. get_option(&str, &tolerant);
  1439. if (*str == ',') {
  1440. ++str;
  1441. get_option(&str, &monarch_timeout);
  1442. }
  1443. } else {
  1444. printk(KERN_INFO "mce argument %s ignored. Please use /sys\n",
  1445. str);
  1446. return 0;
  1447. }
  1448. return 1;
  1449. }
  1450. __setup("mce", mcheck_enable);
  1451. int __init mcheck_init(void)
  1452. {
  1453. mcheck_intel_therm_init();
  1454. return 0;
  1455. }
  1456. /*
  1457. * mce_syscore: PM support
  1458. */
  1459. /*
  1460. * Disable machine checks on suspend and shutdown. We can't really handle
  1461. * them later.
  1462. */
  1463. static int mce_disable_error_reporting(void)
  1464. {
  1465. int i;
  1466. for (i = 0; i < banks; i++) {
  1467. struct mce_bank *b = &mce_banks[i];
  1468. if (b->init)
  1469. wrmsrl(MSR_IA32_MCx_CTL(i), 0);
  1470. }
  1471. return 0;
  1472. }
  1473. static int mce_syscore_suspend(void)
  1474. {
  1475. return mce_disable_error_reporting();
  1476. }
  1477. static void mce_syscore_shutdown(void)
  1478. {
  1479. mce_disable_error_reporting();
  1480. }
  1481. /*
  1482. * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
  1483. * Only one CPU is active at this time, the others get re-added later using
  1484. * CPU hotplug:
  1485. */
  1486. static void mce_syscore_resume(void)
  1487. {
  1488. __mcheck_cpu_init_generic();
  1489. __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info));
  1490. }
  1491. static struct syscore_ops mce_syscore_ops = {
  1492. .suspend = mce_syscore_suspend,
  1493. .shutdown = mce_syscore_shutdown,
  1494. .resume = mce_syscore_resume,
  1495. };
  1496. /*
  1497. * mce_sysdev: Sysfs support
  1498. */
  1499. static void mce_cpu_restart(void *data)
  1500. {
  1501. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1502. return;
  1503. __mcheck_cpu_init_generic();
  1504. __mcheck_cpu_init_timer();
  1505. }
  1506. /* Reinit MCEs after user configuration changes */
  1507. static void mce_restart(void)
  1508. {
  1509. mce_timer_delete_all();
  1510. on_each_cpu(mce_cpu_restart, NULL, 1);
  1511. }
  1512. /* Toggle features for corrected errors */
  1513. static void mce_disable_cmci(void *data)
  1514. {
  1515. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1516. return;
  1517. cmci_clear();
  1518. }
  1519. static void mce_enable_ce(void *all)
  1520. {
  1521. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1522. return;
  1523. cmci_reenable();
  1524. cmci_recheck();
  1525. if (all)
  1526. __mcheck_cpu_init_timer();
  1527. }
  1528. static struct sysdev_class mce_sysdev_class = {
  1529. .name = "machinecheck",
  1530. };
  1531. DEFINE_PER_CPU(struct sys_device, mce_sysdev);
  1532. __cpuinitdata
  1533. void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
  1534. static inline struct mce_bank *attr_to_bank(struct sysdev_attribute *attr)
  1535. {
  1536. return container_of(attr, struct mce_bank, attr);
  1537. }
  1538. static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr,
  1539. char *buf)
  1540. {
  1541. return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
  1542. }
  1543. static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr,
  1544. const char *buf, size_t size)
  1545. {
  1546. u64 new;
  1547. if (strict_strtoull(buf, 0, &new) < 0)
  1548. return -EINVAL;
  1549. attr_to_bank(attr)->ctl = new;
  1550. mce_restart();
  1551. return size;
  1552. }
  1553. static ssize_t
  1554. show_trigger(struct sys_device *s, struct sysdev_attribute *attr, char *buf)
  1555. {
  1556. strcpy(buf, mce_helper);
  1557. strcat(buf, "\n");
  1558. return strlen(mce_helper) + 1;
  1559. }
  1560. static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
  1561. const char *buf, size_t siz)
  1562. {
  1563. char *p;
  1564. strncpy(mce_helper, buf, sizeof(mce_helper));
  1565. mce_helper[sizeof(mce_helper)-1] = 0;
  1566. p = strchr(mce_helper, '\n');
  1567. if (p)
  1568. *p = 0;
  1569. return strlen(mce_helper) + !!p;
  1570. }
  1571. static ssize_t set_ignore_ce(struct sys_device *s,
  1572. struct sysdev_attribute *attr,
  1573. const char *buf, size_t size)
  1574. {
  1575. u64 new;
  1576. if (strict_strtoull(buf, 0, &new) < 0)
  1577. return -EINVAL;
  1578. if (mce_ignore_ce ^ !!new) {
  1579. if (new) {
  1580. /* disable ce features */
  1581. mce_timer_delete_all();
  1582. on_each_cpu(mce_disable_cmci, NULL, 1);
  1583. mce_ignore_ce = 1;
  1584. } else {
  1585. /* enable ce features */
  1586. mce_ignore_ce = 0;
  1587. on_each_cpu(mce_enable_ce, (void *)1, 1);
  1588. }
  1589. }
  1590. return size;
  1591. }
  1592. static ssize_t set_cmci_disabled(struct sys_device *s,
  1593. struct sysdev_attribute *attr,
  1594. const char *buf, size_t size)
  1595. {
  1596. u64 new;
  1597. if (strict_strtoull(buf, 0, &new) < 0)
  1598. return -EINVAL;
  1599. if (mce_cmci_disabled ^ !!new) {
  1600. if (new) {
  1601. /* disable cmci */
  1602. on_each_cpu(mce_disable_cmci, NULL, 1);
  1603. mce_cmci_disabled = 1;
  1604. } else {
  1605. /* enable cmci */
  1606. mce_cmci_disabled = 0;
  1607. on_each_cpu(mce_enable_ce, NULL, 1);
  1608. }
  1609. }
  1610. return size;
  1611. }
  1612. static ssize_t store_int_with_restart(struct sys_device *s,
  1613. struct sysdev_attribute *attr,
  1614. const char *buf, size_t size)
  1615. {
  1616. ssize_t ret = sysdev_store_int(s, attr, buf, size);
  1617. mce_restart();
  1618. return ret;
  1619. }
  1620. static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
  1621. static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
  1622. static SYSDEV_INT_ATTR(monarch_timeout, 0644, monarch_timeout);
  1623. static SYSDEV_INT_ATTR(dont_log_ce, 0644, mce_dont_log_ce);
  1624. static struct sysdev_ext_attribute attr_check_interval = {
  1625. _SYSDEV_ATTR(check_interval, 0644, sysdev_show_int,
  1626. store_int_with_restart),
  1627. &check_interval
  1628. };
  1629. static struct sysdev_ext_attribute attr_ignore_ce = {
  1630. _SYSDEV_ATTR(ignore_ce, 0644, sysdev_show_int, set_ignore_ce),
  1631. &mce_ignore_ce
  1632. };
  1633. static struct sysdev_ext_attribute attr_cmci_disabled = {
  1634. _SYSDEV_ATTR(cmci_disabled, 0644, sysdev_show_int, set_cmci_disabled),
  1635. &mce_cmci_disabled
  1636. };
  1637. static struct sysdev_attribute *mce_sysdev_attrs[] = {
  1638. &attr_tolerant.attr,
  1639. &attr_check_interval.attr,
  1640. &attr_trigger,
  1641. &attr_monarch_timeout.attr,
  1642. &attr_dont_log_ce.attr,
  1643. &attr_ignore_ce.attr,
  1644. &attr_cmci_disabled.attr,
  1645. NULL
  1646. };
  1647. static cpumask_var_t mce_sysdev_initialized;
  1648. /* Per cpu sysdev init. All of the cpus still share the same ctrl bank: */
  1649. static __cpuinit int mce_sysdev_create(unsigned int cpu)
  1650. {
  1651. struct sys_device *sysdev = &per_cpu(mce_sysdev, cpu);
  1652. int err;
  1653. int i, j;
  1654. if (!mce_available(&boot_cpu_data))
  1655. return -EIO;
  1656. memset(&sysdev->kobj, 0, sizeof(struct kobject));
  1657. sysdev->id = cpu;
  1658. sysdev->cls = &mce_sysdev_class;
  1659. err = sysdev_register(sysdev);
  1660. if (err)
  1661. return err;
  1662. for (i = 0; mce_sysdev_attrs[i]; i++) {
  1663. err = sysdev_create_file(sysdev, mce_sysdev_attrs[i]);
  1664. if (err)
  1665. goto error;
  1666. }
  1667. for (j = 0; j < banks; j++) {
  1668. err = sysdev_create_file(sysdev, &mce_banks[j].attr);
  1669. if (err)
  1670. goto error2;
  1671. }
  1672. cpumask_set_cpu(cpu, mce_sysdev_initialized);
  1673. return 0;
  1674. error2:
  1675. while (--j >= 0)
  1676. sysdev_remove_file(sysdev, &mce_banks[j].attr);
  1677. error:
  1678. while (--i >= 0)
  1679. sysdev_remove_file(sysdev, mce_sysdev_attrs[i]);
  1680. sysdev_unregister(sysdev);
  1681. return err;
  1682. }
  1683. static __cpuinit void mce_sysdev_remove(unsigned int cpu)
  1684. {
  1685. struct sys_device *sysdev = &per_cpu(mce_sysdev, cpu);
  1686. int i;
  1687. if (!cpumask_test_cpu(cpu, mce_sysdev_initialized))
  1688. return;
  1689. for (i = 0; mce_sysdev_attrs[i]; i++)
  1690. sysdev_remove_file(sysdev, mce_sysdev_attrs[i]);
  1691. for (i = 0; i < banks; i++)
  1692. sysdev_remove_file(sysdev, &mce_banks[i].attr);
  1693. sysdev_unregister(sysdev);
  1694. cpumask_clear_cpu(cpu, mce_sysdev_initialized);
  1695. }
  1696. /* Make sure there are no machine checks on offlined CPUs. */
  1697. static void __cpuinit mce_disable_cpu(void *h)
  1698. {
  1699. unsigned long action = *(unsigned long *)h;
  1700. int i;
  1701. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1702. return;
  1703. if (!(action & CPU_TASKS_FROZEN))
  1704. cmci_clear();
  1705. for (i = 0; i < banks; i++) {
  1706. struct mce_bank *b = &mce_banks[i];
  1707. if (b->init)
  1708. wrmsrl(MSR_IA32_MCx_CTL(i), 0);
  1709. }
  1710. }
  1711. static void __cpuinit mce_reenable_cpu(void *h)
  1712. {
  1713. unsigned long action = *(unsigned long *)h;
  1714. int i;
  1715. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1716. return;
  1717. if (!(action & CPU_TASKS_FROZEN))
  1718. cmci_reenable();
  1719. for (i = 0; i < banks; i++) {
  1720. struct mce_bank *b = &mce_banks[i];
  1721. if (b->init)
  1722. wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
  1723. }
  1724. }
  1725. /* Get notified when a cpu comes on/off. Be hotplug friendly. */
  1726. static int __cpuinit
  1727. mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
  1728. {
  1729. unsigned int cpu = (unsigned long)hcpu;
  1730. struct timer_list *t = &per_cpu(mce_timer, cpu);
  1731. switch (action) {
  1732. case CPU_ONLINE:
  1733. case CPU_ONLINE_FROZEN:
  1734. mce_sysdev_create(cpu);
  1735. if (threshold_cpu_callback)
  1736. threshold_cpu_callback(action, cpu);
  1737. break;
  1738. case CPU_DEAD:
  1739. case CPU_DEAD_FROZEN:
  1740. if (threshold_cpu_callback)
  1741. threshold_cpu_callback(action, cpu);
  1742. mce_sysdev_remove(cpu);
  1743. break;
  1744. case CPU_DOWN_PREPARE:
  1745. case CPU_DOWN_PREPARE_FROZEN:
  1746. del_timer_sync(t);
  1747. smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
  1748. break;
  1749. case CPU_DOWN_FAILED:
  1750. case CPU_DOWN_FAILED_FROZEN:
  1751. if (!mce_ignore_ce && check_interval) {
  1752. t->expires = round_jiffies(jiffies +
  1753. __get_cpu_var(mce_next_interval));
  1754. add_timer_on(t, cpu);
  1755. }
  1756. smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
  1757. break;
  1758. case CPU_POST_DEAD:
  1759. /* intentionally ignoring frozen here */
  1760. cmci_rediscover(cpu);
  1761. break;
  1762. }
  1763. return NOTIFY_OK;
  1764. }
  1765. static struct notifier_block mce_cpu_notifier __cpuinitdata = {
  1766. .notifier_call = mce_cpu_callback,
  1767. };
  1768. static __init void mce_init_banks(void)
  1769. {
  1770. int i;
  1771. for (i = 0; i < banks; i++) {
  1772. struct mce_bank *b = &mce_banks[i];
  1773. struct sysdev_attribute *a = &b->attr;
  1774. sysfs_attr_init(&a->attr);
  1775. a->attr.name = b->attrname;
  1776. snprintf(b->attrname, ATTR_LEN, "bank%d", i);
  1777. a->attr.mode = 0644;
  1778. a->show = show_bank;
  1779. a->store = set_bank;
  1780. }
  1781. }
  1782. static __init int mcheck_init_device(void)
  1783. {
  1784. int err;
  1785. int i = 0;
  1786. if (!mce_available(&boot_cpu_data))
  1787. return -EIO;
  1788. zalloc_cpumask_var(&mce_sysdev_initialized, GFP_KERNEL);
  1789. mce_init_banks();
  1790. err = sysdev_class_register(&mce_sysdev_class);
  1791. if (err)
  1792. return err;
  1793. for_each_online_cpu(i) {
  1794. err = mce_sysdev_create(i);
  1795. if (err)
  1796. return err;
  1797. }
  1798. register_syscore_ops(&mce_syscore_ops);
  1799. register_hotcpu_notifier(&mce_cpu_notifier);
  1800. /* register character device /dev/mcelog */
  1801. misc_register(&mce_chrdev_device);
  1802. return err;
  1803. }
  1804. device_initcall(mcheck_init_device);
  1805. /*
  1806. * Old style boot options parsing. Only for compatibility.
  1807. */
  1808. static int __init mcheck_disable(char *str)
  1809. {
  1810. mce_disabled = 1;
  1811. return 1;
  1812. }
  1813. __setup("nomce", mcheck_disable);
  1814. #ifdef CONFIG_DEBUG_FS
  1815. struct dentry *mce_get_debugfs_dir(void)
  1816. {
  1817. static struct dentry *dmce;
  1818. if (!dmce)
  1819. dmce = debugfs_create_dir("mce", NULL);
  1820. return dmce;
  1821. }
  1822. static void mce_reset(void)
  1823. {
  1824. cpu_missing = 0;
  1825. atomic_set(&mce_fake_paniced, 0);
  1826. atomic_set(&mce_executing, 0);
  1827. atomic_set(&mce_callin, 0);
  1828. atomic_set(&global_nwo, 0);
  1829. }
  1830. static int fake_panic_get(void *data, u64 *val)
  1831. {
  1832. *val = fake_panic;
  1833. return 0;
  1834. }
  1835. static int fake_panic_set(void *data, u64 val)
  1836. {
  1837. mce_reset();
  1838. fake_panic = val;
  1839. return 0;
  1840. }
  1841. DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
  1842. fake_panic_set, "%llu\n");
  1843. static int __init mcheck_debugfs_init(void)
  1844. {
  1845. struct dentry *dmce, *ffake_panic;
  1846. dmce = mce_get_debugfs_dir();
  1847. if (!dmce)
  1848. return -ENOMEM;
  1849. ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
  1850. &fake_panic_fops);
  1851. if (!ffake_panic)
  1852. return -ENOMEM;
  1853. return 0;
  1854. }
  1855. late_initcall(mcheck_debugfs_init);
  1856. #endif