mce.c 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222
  1. /*
  2. * Machine check handler.
  3. *
  4. * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
  5. * Rest from unknown author(s).
  6. * 2004 Andi Kleen. Rewrote most of it.
  7. * Copyright 2008 Intel Corporation
  8. * Author: Andi Kleen
  9. */
  10. #include <linux/thread_info.h>
  11. #include <linux/capability.h>
  12. #include <linux/miscdevice.h>
  13. #include <linux/ratelimit.h>
  14. #include <linux/kallsyms.h>
  15. #include <linux/rcupdate.h>
  16. #include <linux/kobject.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/kdebug.h>
  19. #include <linux/kernel.h>
  20. #include <linux/percpu.h>
  21. #include <linux/string.h>
  22. #include <linux/sysdev.h>
  23. #include <linux/syscore_ops.h>
  24. #include <linux/delay.h>
  25. #include <linux/ctype.h>
  26. #include <linux/sched.h>
  27. #include <linux/sysfs.h>
  28. #include <linux/types.h>
  29. #include <linux/slab.h>
  30. #include <linux/init.h>
  31. #include <linux/kmod.h>
  32. #include <linux/poll.h>
  33. #include <linux/nmi.h>
  34. #include <linux/cpu.h>
  35. #include <linux/smp.h>
  36. #include <linux/fs.h>
  37. #include <linux/mm.h>
  38. #include <linux/debugfs.h>
  39. #include <linux/irq_work.h>
  40. #include <linux/export.h>
  41. #include <asm/processor.h>
  42. #include <asm/mce.h>
  43. #include <asm/msr.h>
  44. #include "mce-internal.h"
  45. static DEFINE_MUTEX(mce_chrdev_read_mutex);
  46. #define rcu_dereference_check_mce(p) \
  47. rcu_dereference_index_check((p), \
  48. rcu_read_lock_sched_held() || \
  49. lockdep_is_held(&mce_chrdev_read_mutex))
  50. #define CREATE_TRACE_POINTS
  51. #include <trace/events/mce.h>
  52. int mce_disabled __read_mostly;
  53. #define MISC_MCELOG_MINOR 227
  54. #define SPINUNIT 100 /* 100ns */
  55. atomic_t mce_entry;
  56. DEFINE_PER_CPU(unsigned, mce_exception_count);
  57. /*
  58. * Tolerant levels:
  59. * 0: always panic on uncorrected errors, log corrected errors
  60. * 1: panic or SIGBUS on uncorrected errors, log corrected errors
  61. * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
  62. * 3: never panic or SIGBUS, log all errors (for testing only)
  63. */
  64. static int tolerant __read_mostly = 1;
  65. static int banks __read_mostly;
  66. static int rip_msr __read_mostly;
  67. static int mce_bootlog __read_mostly = -1;
  68. static int monarch_timeout __read_mostly = -1;
  69. static int mce_panic_timeout __read_mostly;
  70. static int mce_dont_log_ce __read_mostly;
  71. int mce_cmci_disabled __read_mostly;
  72. int mce_ignore_ce __read_mostly;
  73. int mce_ser __read_mostly;
  74. struct mce_bank *mce_banks __read_mostly;
  75. /* User mode helper program triggered by machine check event */
  76. static unsigned long mce_need_notify;
  77. static char mce_helper[128];
  78. static char *mce_helper_argv[2] = { mce_helper, NULL };
  79. static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
  80. static DEFINE_PER_CPU(struct mce, mces_seen);
  81. static int cpu_missing;
  82. /* MCA banks polled by the period polling timer for corrected events */
  83. DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
  84. [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
  85. };
  86. static DEFINE_PER_CPU(struct work_struct, mce_work);
  87. /*
  88. * CPU/chipset specific EDAC code can register a notifier call here to print
  89. * MCE errors in a human-readable form.
  90. */
  91. ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
  92. /* Do initial initialization of a struct mce */
  93. void mce_setup(struct mce *m)
  94. {
  95. memset(m, 0, sizeof(struct mce));
  96. m->cpu = m->extcpu = smp_processor_id();
  97. rdtscll(m->tsc);
  98. /* We hope get_seconds stays lockless */
  99. m->time = get_seconds();
  100. m->cpuvendor = boot_cpu_data.x86_vendor;
  101. m->cpuid = cpuid_eax(1);
  102. #ifdef CONFIG_SMP
  103. m->socketid = cpu_data(m->extcpu).phys_proc_id;
  104. #endif
  105. m->apicid = cpu_data(m->extcpu).initial_apicid;
  106. rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
  107. }
  108. DEFINE_PER_CPU(struct mce, injectm);
  109. EXPORT_PER_CPU_SYMBOL_GPL(injectm);
  110. /*
  111. * Lockless MCE logging infrastructure.
  112. * This avoids deadlocks on printk locks without having to break locks. Also
  113. * separate MCEs from kernel messages to avoid bogus bug reports.
  114. */
  115. static struct mce_log mcelog = {
  116. .signature = MCE_LOG_SIGNATURE,
  117. .len = MCE_LOG_LEN,
  118. .recordlen = sizeof(struct mce),
  119. };
  120. void mce_log(struct mce *mce)
  121. {
  122. unsigned next, entry;
  123. int ret = 0;
  124. /* Emit the trace record: */
  125. trace_mce_record(mce);
  126. ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
  127. if (ret == NOTIFY_STOP)
  128. return;
  129. mce->finished = 0;
  130. wmb();
  131. for (;;) {
  132. entry = rcu_dereference_check_mce(mcelog.next);
  133. for (;;) {
  134. /*
  135. * When the buffer fills up discard new entries.
  136. * Assume that the earlier errors are the more
  137. * interesting ones:
  138. */
  139. if (entry >= MCE_LOG_LEN) {
  140. set_bit(MCE_OVERFLOW,
  141. (unsigned long *)&mcelog.flags);
  142. return;
  143. }
  144. /* Old left over entry. Skip: */
  145. if (mcelog.entry[entry].finished) {
  146. entry++;
  147. continue;
  148. }
  149. break;
  150. }
  151. smp_rmb();
  152. next = entry + 1;
  153. if (cmpxchg(&mcelog.next, entry, next) == entry)
  154. break;
  155. }
  156. memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
  157. wmb();
  158. mcelog.entry[entry].finished = 1;
  159. wmb();
  160. mce->finished = 1;
  161. set_bit(0, &mce_need_notify);
  162. }
  163. void mce_register_decode_chain(struct notifier_block *nb)
  164. {
  165. atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
  166. }
  167. EXPORT_SYMBOL_GPL(mce_register_decode_chain);
  168. void mce_unregister_decode_chain(struct notifier_block *nb)
  169. {
  170. atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
  171. }
  172. EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
  173. static void print_mce(struct mce *m)
  174. {
  175. int ret = 0;
  176. pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
  177. m->extcpu, m->mcgstatus, m->bank, m->status);
  178. if (m->ip) {
  179. pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
  180. !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
  181. m->cs, m->ip);
  182. if (m->cs == __KERNEL_CS)
  183. print_symbol("{%s}", m->ip);
  184. pr_cont("\n");
  185. }
  186. pr_emerg(HW_ERR "TSC %llx ", m->tsc);
  187. if (m->addr)
  188. pr_cont("ADDR %llx ", m->addr);
  189. if (m->misc)
  190. pr_cont("MISC %llx ", m->misc);
  191. pr_cont("\n");
  192. /*
  193. * Note this output is parsed by external tools and old fields
  194. * should not be changed.
  195. */
  196. pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
  197. m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
  198. cpu_data(m->extcpu).microcode);
  199. /*
  200. * Print out human-readable details about the MCE error,
  201. * (if the CPU has an implementation for that)
  202. */
  203. ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
  204. if (ret == NOTIFY_STOP)
  205. return;
  206. pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
  207. }
  208. #define PANIC_TIMEOUT 5 /* 5 seconds */
  209. static atomic_t mce_paniced;
  210. static int fake_panic;
  211. static atomic_t mce_fake_paniced;
  212. /* Panic in progress. Enable interrupts and wait for final IPI */
  213. static void wait_for_panic(void)
  214. {
  215. long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
  216. preempt_disable();
  217. local_irq_enable();
  218. while (timeout-- > 0)
  219. udelay(1);
  220. if (panic_timeout == 0)
  221. panic_timeout = mce_panic_timeout;
  222. panic("Panicing machine check CPU died");
  223. }
  224. static void mce_panic(char *msg, struct mce *final, char *exp)
  225. {
  226. int i, apei_err = 0;
  227. if (!fake_panic) {
  228. /*
  229. * Make sure only one CPU runs in machine check panic
  230. */
  231. if (atomic_inc_return(&mce_paniced) > 1)
  232. wait_for_panic();
  233. barrier();
  234. bust_spinlocks(1);
  235. console_verbose();
  236. } else {
  237. /* Don't log too much for fake panic */
  238. if (atomic_inc_return(&mce_fake_paniced) > 1)
  239. return;
  240. }
  241. /* First print corrected ones that are still unlogged */
  242. for (i = 0; i < MCE_LOG_LEN; i++) {
  243. struct mce *m = &mcelog.entry[i];
  244. if (!(m->status & MCI_STATUS_VAL))
  245. continue;
  246. if (!(m->status & MCI_STATUS_UC)) {
  247. print_mce(m);
  248. if (!apei_err)
  249. apei_err = apei_write_mce(m);
  250. }
  251. }
  252. /* Now print uncorrected but with the final one last */
  253. for (i = 0; i < MCE_LOG_LEN; i++) {
  254. struct mce *m = &mcelog.entry[i];
  255. if (!(m->status & MCI_STATUS_VAL))
  256. continue;
  257. if (!(m->status & MCI_STATUS_UC))
  258. continue;
  259. if (!final || memcmp(m, final, sizeof(struct mce))) {
  260. print_mce(m);
  261. if (!apei_err)
  262. apei_err = apei_write_mce(m);
  263. }
  264. }
  265. if (final) {
  266. print_mce(final);
  267. if (!apei_err)
  268. apei_err = apei_write_mce(final);
  269. }
  270. if (cpu_missing)
  271. pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
  272. if (exp)
  273. pr_emerg(HW_ERR "Machine check: %s\n", exp);
  274. if (!fake_panic) {
  275. if (panic_timeout == 0)
  276. panic_timeout = mce_panic_timeout;
  277. panic(msg);
  278. } else
  279. pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
  280. }
  281. /* Support code for software error injection */
  282. static int msr_to_offset(u32 msr)
  283. {
  284. unsigned bank = __this_cpu_read(injectm.bank);
  285. if (msr == rip_msr)
  286. return offsetof(struct mce, ip);
  287. if (msr == MSR_IA32_MCx_STATUS(bank))
  288. return offsetof(struct mce, status);
  289. if (msr == MSR_IA32_MCx_ADDR(bank))
  290. return offsetof(struct mce, addr);
  291. if (msr == MSR_IA32_MCx_MISC(bank))
  292. return offsetof(struct mce, misc);
  293. if (msr == MSR_IA32_MCG_STATUS)
  294. return offsetof(struct mce, mcgstatus);
  295. return -1;
  296. }
  297. /* MSR access wrappers used for error injection */
  298. static u64 mce_rdmsrl(u32 msr)
  299. {
  300. u64 v;
  301. if (__this_cpu_read(injectm.finished)) {
  302. int offset = msr_to_offset(msr);
  303. if (offset < 0)
  304. return 0;
  305. return *(u64 *)((char *)&__get_cpu_var(injectm) + offset);
  306. }
  307. if (rdmsrl_safe(msr, &v)) {
  308. WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
  309. /*
  310. * Return zero in case the access faulted. This should
  311. * not happen normally but can happen if the CPU does
  312. * something weird, or if the code is buggy.
  313. */
  314. v = 0;
  315. }
  316. return v;
  317. }
  318. static void mce_wrmsrl(u32 msr, u64 v)
  319. {
  320. if (__this_cpu_read(injectm.finished)) {
  321. int offset = msr_to_offset(msr);
  322. if (offset >= 0)
  323. *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v;
  324. return;
  325. }
  326. wrmsrl(msr, v);
  327. }
  328. /*
  329. * Collect all global (w.r.t. this processor) status about this machine
  330. * check into our "mce" struct so that we can use it later to assess
  331. * the severity of the problem as we read per-bank specific details.
  332. */
  333. static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
  334. {
  335. mce_setup(m);
  336. m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
  337. if (regs) {
  338. /*
  339. * Get the address of the instruction at the time of
  340. * the machine check error.
  341. */
  342. if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
  343. m->ip = regs->ip;
  344. m->cs = regs->cs;
  345. }
  346. /* Use accurate RIP reporting if available. */
  347. if (rip_msr)
  348. m->ip = mce_rdmsrl(rip_msr);
  349. }
  350. }
  351. /*
  352. * Simple lockless ring to communicate PFNs from the exception handler with the
  353. * process context work function. This is vastly simplified because there's
  354. * only a single reader and a single writer.
  355. */
  356. #define MCE_RING_SIZE 16 /* we use one entry less */
  357. struct mce_ring {
  358. unsigned short start;
  359. unsigned short end;
  360. unsigned long ring[MCE_RING_SIZE];
  361. };
  362. static DEFINE_PER_CPU(struct mce_ring, mce_ring);
  363. /* Runs with CPU affinity in workqueue */
  364. static int mce_ring_empty(void)
  365. {
  366. struct mce_ring *r = &__get_cpu_var(mce_ring);
  367. return r->start == r->end;
  368. }
  369. static int mce_ring_get(unsigned long *pfn)
  370. {
  371. struct mce_ring *r;
  372. int ret = 0;
  373. *pfn = 0;
  374. get_cpu();
  375. r = &__get_cpu_var(mce_ring);
  376. if (r->start == r->end)
  377. goto out;
  378. *pfn = r->ring[r->start];
  379. r->start = (r->start + 1) % MCE_RING_SIZE;
  380. ret = 1;
  381. out:
  382. put_cpu();
  383. return ret;
  384. }
  385. /* Always runs in MCE context with preempt off */
  386. static int mce_ring_add(unsigned long pfn)
  387. {
  388. struct mce_ring *r = &__get_cpu_var(mce_ring);
  389. unsigned next;
  390. next = (r->end + 1) % MCE_RING_SIZE;
  391. if (next == r->start)
  392. return -1;
  393. r->ring[r->end] = pfn;
  394. wmb();
  395. r->end = next;
  396. return 0;
  397. }
  398. int mce_available(struct cpuinfo_x86 *c)
  399. {
  400. if (mce_disabled)
  401. return 0;
  402. return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
  403. }
  404. static void mce_schedule_work(void)
  405. {
  406. if (!mce_ring_empty()) {
  407. struct work_struct *work = &__get_cpu_var(mce_work);
  408. if (!work_pending(work))
  409. schedule_work(work);
  410. }
  411. }
  412. DEFINE_PER_CPU(struct irq_work, mce_irq_work);
  413. static void mce_irq_work_cb(struct irq_work *entry)
  414. {
  415. mce_notify_irq();
  416. mce_schedule_work();
  417. }
  418. static void mce_report_event(struct pt_regs *regs)
  419. {
  420. if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
  421. mce_notify_irq();
  422. /*
  423. * Triggering the work queue here is just an insurance
  424. * policy in case the syscall exit notify handler
  425. * doesn't run soon enough or ends up running on the
  426. * wrong CPU (can happen when audit sleeps)
  427. */
  428. mce_schedule_work();
  429. return;
  430. }
  431. irq_work_queue(&__get_cpu_var(mce_irq_work));
  432. }
  433. DEFINE_PER_CPU(unsigned, mce_poll_count);
  434. /*
  435. * Poll for corrected events or events that happened before reset.
  436. * Those are just logged through /dev/mcelog.
  437. *
  438. * This is executed in standard interrupt context.
  439. *
  440. * Note: spec recommends to panic for fatal unsignalled
  441. * errors here. However this would be quite problematic --
  442. * we would need to reimplement the Monarch handling and
  443. * it would mess up the exclusion between exception handler
  444. * and poll hander -- * so we skip this for now.
  445. * These cases should not happen anyways, or only when the CPU
  446. * is already totally * confused. In this case it's likely it will
  447. * not fully execute the machine check handler either.
  448. */
  449. void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
  450. {
  451. struct mce m;
  452. int i;
  453. percpu_inc(mce_poll_count);
  454. mce_gather_info(&m, NULL);
  455. for (i = 0; i < banks; i++) {
  456. if (!mce_banks[i].ctl || !test_bit(i, *b))
  457. continue;
  458. m.misc = 0;
  459. m.addr = 0;
  460. m.bank = i;
  461. m.tsc = 0;
  462. barrier();
  463. m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
  464. if (!(m.status & MCI_STATUS_VAL))
  465. continue;
  466. /*
  467. * Uncorrected or signalled events are handled by the exception
  468. * handler when it is enabled, so don't process those here.
  469. *
  470. * TBD do the same check for MCI_STATUS_EN here?
  471. */
  472. if (!(flags & MCP_UC) &&
  473. (m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)))
  474. continue;
  475. if (m.status & MCI_STATUS_MISCV)
  476. m.misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
  477. if (m.status & MCI_STATUS_ADDRV)
  478. m.addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
  479. if (!(flags & MCP_TIMESTAMP))
  480. m.tsc = 0;
  481. /*
  482. * Don't get the IP here because it's unlikely to
  483. * have anything to do with the actual error location.
  484. */
  485. if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce)
  486. mce_log(&m);
  487. /*
  488. * Clear state for this bank.
  489. */
  490. mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
  491. }
  492. /*
  493. * Don't clear MCG_STATUS here because it's only defined for
  494. * exceptions.
  495. */
  496. sync_core();
  497. }
  498. EXPORT_SYMBOL_GPL(machine_check_poll);
  499. /*
  500. * Do a quick check if any of the events requires a panic.
  501. * This decides if we keep the events around or clear them.
  502. */
  503. static int mce_no_way_out(struct mce *m, char **msg)
  504. {
  505. int i;
  506. for (i = 0; i < banks; i++) {
  507. m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
  508. if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
  509. return 1;
  510. }
  511. return 0;
  512. }
  513. /*
  514. * Variable to establish order between CPUs while scanning.
  515. * Each CPU spins initially until executing is equal its number.
  516. */
  517. static atomic_t mce_executing;
  518. /*
  519. * Defines order of CPUs on entry. First CPU becomes Monarch.
  520. */
  521. static atomic_t mce_callin;
  522. /*
  523. * Check if a timeout waiting for other CPUs happened.
  524. */
  525. static int mce_timed_out(u64 *t)
  526. {
  527. /*
  528. * The others already did panic for some reason.
  529. * Bail out like in a timeout.
  530. * rmb() to tell the compiler that system_state
  531. * might have been modified by someone else.
  532. */
  533. rmb();
  534. if (atomic_read(&mce_paniced))
  535. wait_for_panic();
  536. if (!monarch_timeout)
  537. goto out;
  538. if ((s64)*t < SPINUNIT) {
  539. /* CHECKME: Make panic default for 1 too? */
  540. if (tolerant < 1)
  541. mce_panic("Timeout synchronizing machine check over CPUs",
  542. NULL, NULL);
  543. cpu_missing = 1;
  544. return 1;
  545. }
  546. *t -= SPINUNIT;
  547. out:
  548. touch_nmi_watchdog();
  549. return 0;
  550. }
  551. /*
  552. * The Monarch's reign. The Monarch is the CPU who entered
  553. * the machine check handler first. It waits for the others to
  554. * raise the exception too and then grades them. When any
  555. * error is fatal panic. Only then let the others continue.
  556. *
  557. * The other CPUs entering the MCE handler will be controlled by the
  558. * Monarch. They are called Subjects.
  559. *
  560. * This way we prevent any potential data corruption in a unrecoverable case
  561. * and also makes sure always all CPU's errors are examined.
  562. *
  563. * Also this detects the case of a machine check event coming from outer
  564. * space (not detected by any CPUs) In this case some external agent wants
  565. * us to shut down, so panic too.
  566. *
  567. * The other CPUs might still decide to panic if the handler happens
  568. * in a unrecoverable place, but in this case the system is in a semi-stable
  569. * state and won't corrupt anything by itself. It's ok to let the others
  570. * continue for a bit first.
  571. *
  572. * All the spin loops have timeouts; when a timeout happens a CPU
  573. * typically elects itself to be Monarch.
  574. */
  575. static void mce_reign(void)
  576. {
  577. int cpu;
  578. struct mce *m = NULL;
  579. int global_worst = 0;
  580. char *msg = NULL;
  581. char *nmsg = NULL;
  582. /*
  583. * This CPU is the Monarch and the other CPUs have run
  584. * through their handlers.
  585. * Grade the severity of the errors of all the CPUs.
  586. */
  587. for_each_possible_cpu(cpu) {
  588. int severity = mce_severity(&per_cpu(mces_seen, cpu), tolerant,
  589. &nmsg);
  590. if (severity > global_worst) {
  591. msg = nmsg;
  592. global_worst = severity;
  593. m = &per_cpu(mces_seen, cpu);
  594. }
  595. }
  596. /*
  597. * Cannot recover? Panic here then.
  598. * This dumps all the mces in the log buffer and stops the
  599. * other CPUs.
  600. */
  601. if (m && global_worst >= MCE_PANIC_SEVERITY && tolerant < 3)
  602. mce_panic("Fatal Machine check", m, msg);
  603. /*
  604. * For UC somewhere we let the CPU who detects it handle it.
  605. * Also must let continue the others, otherwise the handling
  606. * CPU could deadlock on a lock.
  607. */
  608. /*
  609. * No machine check event found. Must be some external
  610. * source or one CPU is hung. Panic.
  611. */
  612. if (global_worst <= MCE_KEEP_SEVERITY && tolerant < 3)
  613. mce_panic("Machine check from unknown source", NULL, NULL);
  614. /*
  615. * Now clear all the mces_seen so that they don't reappear on
  616. * the next mce.
  617. */
  618. for_each_possible_cpu(cpu)
  619. memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
  620. }
  621. static atomic_t global_nwo;
  622. /*
  623. * Start of Monarch synchronization. This waits until all CPUs have
  624. * entered the exception handler and then determines if any of them
  625. * saw a fatal event that requires panic. Then it executes them
  626. * in the entry order.
  627. * TBD double check parallel CPU hotunplug
  628. */
  629. static int mce_start(int *no_way_out)
  630. {
  631. int order;
  632. int cpus = num_online_cpus();
  633. u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
  634. if (!timeout)
  635. return -1;
  636. atomic_add(*no_way_out, &global_nwo);
  637. /*
  638. * global_nwo should be updated before mce_callin
  639. */
  640. smp_wmb();
  641. order = atomic_inc_return(&mce_callin);
  642. /*
  643. * Wait for everyone.
  644. */
  645. while (atomic_read(&mce_callin) != cpus) {
  646. if (mce_timed_out(&timeout)) {
  647. atomic_set(&global_nwo, 0);
  648. return -1;
  649. }
  650. ndelay(SPINUNIT);
  651. }
  652. /*
  653. * mce_callin should be read before global_nwo
  654. */
  655. smp_rmb();
  656. if (order == 1) {
  657. /*
  658. * Monarch: Starts executing now, the others wait.
  659. */
  660. atomic_set(&mce_executing, 1);
  661. } else {
  662. /*
  663. * Subject: Now start the scanning loop one by one in
  664. * the original callin order.
  665. * This way when there are any shared banks it will be
  666. * only seen by one CPU before cleared, avoiding duplicates.
  667. */
  668. while (atomic_read(&mce_executing) < order) {
  669. if (mce_timed_out(&timeout)) {
  670. atomic_set(&global_nwo, 0);
  671. return -1;
  672. }
  673. ndelay(SPINUNIT);
  674. }
  675. }
  676. /*
  677. * Cache the global no_way_out state.
  678. */
  679. *no_way_out = atomic_read(&global_nwo);
  680. return order;
  681. }
  682. /*
  683. * Synchronize between CPUs after main scanning loop.
  684. * This invokes the bulk of the Monarch processing.
  685. */
  686. static int mce_end(int order)
  687. {
  688. int ret = -1;
  689. u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
  690. if (!timeout)
  691. goto reset;
  692. if (order < 0)
  693. goto reset;
  694. /*
  695. * Allow others to run.
  696. */
  697. atomic_inc(&mce_executing);
  698. if (order == 1) {
  699. /* CHECKME: Can this race with a parallel hotplug? */
  700. int cpus = num_online_cpus();
  701. /*
  702. * Monarch: Wait for everyone to go through their scanning
  703. * loops.
  704. */
  705. while (atomic_read(&mce_executing) <= cpus) {
  706. if (mce_timed_out(&timeout))
  707. goto reset;
  708. ndelay(SPINUNIT);
  709. }
  710. mce_reign();
  711. barrier();
  712. ret = 0;
  713. } else {
  714. /*
  715. * Subject: Wait for Monarch to finish.
  716. */
  717. while (atomic_read(&mce_executing) != 0) {
  718. if (mce_timed_out(&timeout))
  719. goto reset;
  720. ndelay(SPINUNIT);
  721. }
  722. /*
  723. * Don't reset anything. That's done by the Monarch.
  724. */
  725. return 0;
  726. }
  727. /*
  728. * Reset all global state.
  729. */
  730. reset:
  731. atomic_set(&global_nwo, 0);
  732. atomic_set(&mce_callin, 0);
  733. barrier();
  734. /*
  735. * Let others run again.
  736. */
  737. atomic_set(&mce_executing, 0);
  738. return ret;
  739. }
  740. /*
  741. * Check if the address reported by the CPU is in a format we can parse.
  742. * It would be possible to add code for most other cases, but all would
  743. * be somewhat complicated (e.g. segment offset would require an instruction
  744. * parser). So only support physical addresses up to page granuality for now.
  745. */
  746. static int mce_usable_address(struct mce *m)
  747. {
  748. if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
  749. return 0;
  750. if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
  751. return 0;
  752. if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
  753. return 0;
  754. return 1;
  755. }
  756. static void mce_clear_state(unsigned long *toclear)
  757. {
  758. int i;
  759. for (i = 0; i < banks; i++) {
  760. if (test_bit(i, toclear))
  761. mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
  762. }
  763. }
  764. /*
  765. * The actual machine check handler. This only handles real
  766. * exceptions when something got corrupted coming in through int 18.
  767. *
  768. * This is executed in NMI context not subject to normal locking rules. This
  769. * implies that most kernel services cannot be safely used. Don't even
  770. * think about putting a printk in there!
  771. *
  772. * On Intel systems this is entered on all CPUs in parallel through
  773. * MCE broadcast. However some CPUs might be broken beyond repair,
  774. * so be always careful when synchronizing with others.
  775. */
  776. void do_machine_check(struct pt_regs *regs, long error_code)
  777. {
  778. struct mce m, *final;
  779. int i;
  780. int worst = 0;
  781. int severity;
  782. /*
  783. * Establish sequential order between the CPUs entering the machine
  784. * check handler.
  785. */
  786. int order;
  787. /*
  788. * If no_way_out gets set, there is no safe way to recover from this
  789. * MCE. If tolerant is cranked up, we'll try anyway.
  790. */
  791. int no_way_out = 0;
  792. /*
  793. * If kill_it gets set, there might be a way to recover from this
  794. * error.
  795. */
  796. int kill_it = 0;
  797. DECLARE_BITMAP(toclear, MAX_NR_BANKS);
  798. char *msg = "Unknown";
  799. atomic_inc(&mce_entry);
  800. percpu_inc(mce_exception_count);
  801. if (!banks)
  802. goto out;
  803. mce_gather_info(&m, regs);
  804. final = &__get_cpu_var(mces_seen);
  805. *final = m;
  806. no_way_out = mce_no_way_out(&m, &msg);
  807. barrier();
  808. /*
  809. * When no restart IP must always kill or panic.
  810. */
  811. if (!(m.mcgstatus & MCG_STATUS_RIPV))
  812. kill_it = 1;
  813. /*
  814. * Go through all the banks in exclusion of the other CPUs.
  815. * This way we don't report duplicated events on shared banks
  816. * because the first one to see it will clear it.
  817. */
  818. order = mce_start(&no_way_out);
  819. for (i = 0; i < banks; i++) {
  820. __clear_bit(i, toclear);
  821. if (!mce_banks[i].ctl)
  822. continue;
  823. m.misc = 0;
  824. m.addr = 0;
  825. m.bank = i;
  826. m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
  827. if ((m.status & MCI_STATUS_VAL) == 0)
  828. continue;
  829. /*
  830. * Non uncorrected or non signaled errors are handled by
  831. * machine_check_poll. Leave them alone, unless this panics.
  832. */
  833. if (!(m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
  834. !no_way_out)
  835. continue;
  836. /*
  837. * Set taint even when machine check was not enabled.
  838. */
  839. add_taint(TAINT_MACHINE_CHECK);
  840. severity = mce_severity(&m, tolerant, NULL);
  841. /*
  842. * When machine check was for corrected handler don't touch,
  843. * unless we're panicing.
  844. */
  845. if (severity == MCE_KEEP_SEVERITY && !no_way_out)
  846. continue;
  847. __set_bit(i, toclear);
  848. if (severity == MCE_NO_SEVERITY) {
  849. /*
  850. * Machine check event was not enabled. Clear, but
  851. * ignore.
  852. */
  853. continue;
  854. }
  855. /*
  856. * Kill on action required.
  857. */
  858. if (severity == MCE_AR_SEVERITY)
  859. kill_it = 1;
  860. if (m.status & MCI_STATUS_MISCV)
  861. m.misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
  862. if (m.status & MCI_STATUS_ADDRV)
  863. m.addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
  864. /*
  865. * Action optional error. Queue address for later processing.
  866. * When the ring overflows we just ignore the AO error.
  867. * RED-PEN add some logging mechanism when
  868. * usable_address or mce_add_ring fails.
  869. * RED-PEN don't ignore overflow for tolerant == 0
  870. */
  871. if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
  872. mce_ring_add(m.addr >> PAGE_SHIFT);
  873. mce_log(&m);
  874. if (severity > worst) {
  875. *final = m;
  876. worst = severity;
  877. }
  878. }
  879. if (!no_way_out)
  880. mce_clear_state(toclear);
  881. /*
  882. * Do most of the synchronization with other CPUs.
  883. * When there's any problem use only local no_way_out state.
  884. */
  885. if (mce_end(order) < 0)
  886. no_way_out = worst >= MCE_PANIC_SEVERITY;
  887. /*
  888. * If we have decided that we just CAN'T continue, and the user
  889. * has not set tolerant to an insane level, give up and die.
  890. *
  891. * This is mainly used in the case when the system doesn't
  892. * support MCE broadcasting or it has been disabled.
  893. */
  894. if (no_way_out && tolerant < 3)
  895. mce_panic("Fatal machine check on current CPU", final, msg);
  896. /*
  897. * If the error seems to be unrecoverable, something should be
  898. * done. Try to kill as little as possible. If we can kill just
  899. * one task, do that. If the user has set the tolerance very
  900. * high, don't try to do anything at all.
  901. */
  902. if (kill_it && tolerant < 3)
  903. force_sig(SIGBUS, current);
  904. /* notify userspace ASAP */
  905. set_thread_flag(TIF_MCE_NOTIFY);
  906. if (worst > 0)
  907. mce_report_event(regs);
  908. mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
  909. out:
  910. atomic_dec(&mce_entry);
  911. sync_core();
  912. }
  913. EXPORT_SYMBOL_GPL(do_machine_check);
  914. /* dummy to break dependency. actual code is in mm/memory-failure.c */
  915. void __attribute__((weak)) memory_failure(unsigned long pfn, int vector)
  916. {
  917. printk(KERN_ERR "Action optional memory failure at %lx ignored\n", pfn);
  918. }
  919. /*
  920. * Called after mce notification in process context. This code
  921. * is allowed to sleep. Call the high level VM handler to process
  922. * any corrupted pages.
  923. * Assume that the work queue code only calls this one at a time
  924. * per CPU.
  925. * Note we don't disable preemption, so this code might run on the wrong
  926. * CPU. In this case the event is picked up by the scheduled work queue.
  927. * This is merely a fast path to expedite processing in some common
  928. * cases.
  929. */
  930. void mce_notify_process(void)
  931. {
  932. unsigned long pfn;
  933. mce_notify_irq();
  934. while (mce_ring_get(&pfn))
  935. memory_failure(pfn, MCE_VECTOR);
  936. }
  937. static void mce_process_work(struct work_struct *dummy)
  938. {
  939. mce_notify_process();
  940. }
  941. #ifdef CONFIG_X86_MCE_INTEL
  942. /***
  943. * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
  944. * @cpu: The CPU on which the event occurred.
  945. * @status: Event status information
  946. *
  947. * This function should be called by the thermal interrupt after the
  948. * event has been processed and the decision was made to log the event
  949. * further.
  950. *
  951. * The status parameter will be saved to the 'status' field of 'struct mce'
  952. * and historically has been the register value of the
  953. * MSR_IA32_THERMAL_STATUS (Intel) msr.
  954. */
  955. void mce_log_therm_throt_event(__u64 status)
  956. {
  957. struct mce m;
  958. mce_setup(&m);
  959. m.bank = MCE_THERMAL_BANK;
  960. m.status = status;
  961. mce_log(&m);
  962. }
  963. #endif /* CONFIG_X86_MCE_INTEL */
  964. /*
  965. * Periodic polling timer for "silent" machine check errors. If the
  966. * poller finds an MCE, poll 2x faster. When the poller finds no more
  967. * errors, poll 2x slower (up to check_interval seconds).
  968. */
  969. static int check_interval = 5 * 60; /* 5 minutes */
  970. static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
  971. static DEFINE_PER_CPU(struct timer_list, mce_timer);
  972. static void mce_start_timer(unsigned long data)
  973. {
  974. struct timer_list *t = &per_cpu(mce_timer, data);
  975. int *n;
  976. WARN_ON(smp_processor_id() != data);
  977. if (mce_available(__this_cpu_ptr(&cpu_info))) {
  978. machine_check_poll(MCP_TIMESTAMP,
  979. &__get_cpu_var(mce_poll_banks));
  980. }
  981. /*
  982. * Alert userspace if needed. If we logged an MCE, reduce the
  983. * polling interval, otherwise increase the polling interval.
  984. */
  985. n = &__get_cpu_var(mce_next_interval);
  986. if (mce_notify_irq())
  987. *n = max(*n/2, HZ/100);
  988. else
  989. *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
  990. t->expires = jiffies + *n;
  991. add_timer_on(t, smp_processor_id());
  992. }
  993. /* Must not be called in IRQ context where del_timer_sync() can deadlock */
  994. static void mce_timer_delete_all(void)
  995. {
  996. int cpu;
  997. for_each_online_cpu(cpu)
  998. del_timer_sync(&per_cpu(mce_timer, cpu));
  999. }
  1000. static void mce_do_trigger(struct work_struct *work)
  1001. {
  1002. call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
  1003. }
  1004. static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
  1005. /*
  1006. * Notify the user(s) about new machine check events.
  1007. * Can be called from interrupt context, but not from machine check/NMI
  1008. * context.
  1009. */
  1010. int mce_notify_irq(void)
  1011. {
  1012. /* Not more than two messages every minute */
  1013. static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
  1014. clear_thread_flag(TIF_MCE_NOTIFY);
  1015. if (test_and_clear_bit(0, &mce_need_notify)) {
  1016. /* wake processes polling /dev/mcelog */
  1017. wake_up_interruptible(&mce_chrdev_wait);
  1018. /*
  1019. * There is no risk of missing notifications because
  1020. * work_pending is always cleared before the function is
  1021. * executed.
  1022. */
  1023. if (mce_helper[0] && !work_pending(&mce_trigger_work))
  1024. schedule_work(&mce_trigger_work);
  1025. if (__ratelimit(&ratelimit))
  1026. pr_info(HW_ERR "Machine check events logged\n");
  1027. return 1;
  1028. }
  1029. return 0;
  1030. }
  1031. EXPORT_SYMBOL_GPL(mce_notify_irq);
  1032. static int __cpuinit __mcheck_cpu_mce_banks_init(void)
  1033. {
  1034. int i;
  1035. mce_banks = kzalloc(banks * sizeof(struct mce_bank), GFP_KERNEL);
  1036. if (!mce_banks)
  1037. return -ENOMEM;
  1038. for (i = 0; i < banks; i++) {
  1039. struct mce_bank *b = &mce_banks[i];
  1040. b->ctl = -1ULL;
  1041. b->init = 1;
  1042. }
  1043. return 0;
  1044. }
  1045. /*
  1046. * Initialize Machine Checks for a CPU.
  1047. */
  1048. static int __cpuinit __mcheck_cpu_cap_init(void)
  1049. {
  1050. unsigned b;
  1051. u64 cap;
  1052. rdmsrl(MSR_IA32_MCG_CAP, cap);
  1053. b = cap & MCG_BANKCNT_MASK;
  1054. if (!banks)
  1055. printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b);
  1056. if (b > MAX_NR_BANKS) {
  1057. printk(KERN_WARNING
  1058. "MCE: Using only %u machine check banks out of %u\n",
  1059. MAX_NR_BANKS, b);
  1060. b = MAX_NR_BANKS;
  1061. }
  1062. /* Don't support asymmetric configurations today */
  1063. WARN_ON(banks != 0 && b != banks);
  1064. banks = b;
  1065. if (!mce_banks) {
  1066. int err = __mcheck_cpu_mce_banks_init();
  1067. if (err)
  1068. return err;
  1069. }
  1070. /* Use accurate RIP reporting if available. */
  1071. if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
  1072. rip_msr = MSR_IA32_MCG_EIP;
  1073. if (cap & MCG_SER_P)
  1074. mce_ser = 1;
  1075. return 0;
  1076. }
  1077. static void __mcheck_cpu_init_generic(void)
  1078. {
  1079. mce_banks_t all_banks;
  1080. u64 cap;
  1081. int i;
  1082. /*
  1083. * Log the machine checks left over from the previous reset.
  1084. */
  1085. bitmap_fill(all_banks, MAX_NR_BANKS);
  1086. machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks);
  1087. set_in_cr4(X86_CR4_MCE);
  1088. rdmsrl(MSR_IA32_MCG_CAP, cap);
  1089. if (cap & MCG_CTL_P)
  1090. wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
  1091. for (i = 0; i < banks; i++) {
  1092. struct mce_bank *b = &mce_banks[i];
  1093. if (!b->init)
  1094. continue;
  1095. wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
  1096. wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
  1097. }
  1098. }
  1099. /* Add per CPU specific workarounds here */
  1100. static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
  1101. {
  1102. if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
  1103. pr_info("MCE: unknown CPU type - not enabling MCE support.\n");
  1104. return -EOPNOTSUPP;
  1105. }
  1106. /* This should be disabled by the BIOS, but isn't always */
  1107. if (c->x86_vendor == X86_VENDOR_AMD) {
  1108. if (c->x86 == 15 && banks > 4) {
  1109. /*
  1110. * disable GART TBL walk error reporting, which
  1111. * trips off incorrectly with the IOMMU & 3ware
  1112. * & Cerberus:
  1113. */
  1114. clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
  1115. }
  1116. if (c->x86 <= 17 && mce_bootlog < 0) {
  1117. /*
  1118. * Lots of broken BIOS around that don't clear them
  1119. * by default and leave crap in there. Don't log:
  1120. */
  1121. mce_bootlog = 0;
  1122. }
  1123. /*
  1124. * Various K7s with broken bank 0 around. Always disable
  1125. * by default.
  1126. */
  1127. if (c->x86 == 6 && banks > 0)
  1128. mce_banks[0].ctl = 0;
  1129. }
  1130. if (c->x86_vendor == X86_VENDOR_INTEL) {
  1131. /*
  1132. * SDM documents that on family 6 bank 0 should not be written
  1133. * because it aliases to another special BIOS controlled
  1134. * register.
  1135. * But it's not aliased anymore on model 0x1a+
  1136. * Don't ignore bank 0 completely because there could be a
  1137. * valid event later, merely don't write CTL0.
  1138. */
  1139. if (c->x86 == 6 && c->x86_model < 0x1A && banks > 0)
  1140. mce_banks[0].init = 0;
  1141. /*
  1142. * All newer Intel systems support MCE broadcasting. Enable
  1143. * synchronization with a one second timeout.
  1144. */
  1145. if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
  1146. monarch_timeout < 0)
  1147. monarch_timeout = USEC_PER_SEC;
  1148. /*
  1149. * There are also broken BIOSes on some Pentium M and
  1150. * earlier systems:
  1151. */
  1152. if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0)
  1153. mce_bootlog = 0;
  1154. }
  1155. if (monarch_timeout < 0)
  1156. monarch_timeout = 0;
  1157. if (mce_bootlog != 0)
  1158. mce_panic_timeout = 30;
  1159. return 0;
  1160. }
  1161. static int __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
  1162. {
  1163. if (c->x86 != 5)
  1164. return 0;
  1165. switch (c->x86_vendor) {
  1166. case X86_VENDOR_INTEL:
  1167. intel_p5_mcheck_init(c);
  1168. return 1;
  1169. break;
  1170. case X86_VENDOR_CENTAUR:
  1171. winchip_mcheck_init(c);
  1172. return 1;
  1173. break;
  1174. }
  1175. return 0;
  1176. }
  1177. static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
  1178. {
  1179. switch (c->x86_vendor) {
  1180. case X86_VENDOR_INTEL:
  1181. mce_intel_feature_init(c);
  1182. break;
  1183. case X86_VENDOR_AMD:
  1184. mce_amd_feature_init(c);
  1185. break;
  1186. default:
  1187. break;
  1188. }
  1189. }
  1190. static void __mcheck_cpu_init_timer(void)
  1191. {
  1192. struct timer_list *t = &__get_cpu_var(mce_timer);
  1193. int *n = &__get_cpu_var(mce_next_interval);
  1194. setup_timer(t, mce_start_timer, smp_processor_id());
  1195. if (mce_ignore_ce)
  1196. return;
  1197. *n = check_interval * HZ;
  1198. if (!*n)
  1199. return;
  1200. t->expires = round_jiffies(jiffies + *n);
  1201. add_timer_on(t, smp_processor_id());
  1202. }
  1203. /* Handle unconfigured int18 (should never happen) */
  1204. static void unexpected_machine_check(struct pt_regs *regs, long error_code)
  1205. {
  1206. printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n",
  1207. smp_processor_id());
  1208. }
  1209. /* Call the installed machine check handler for this CPU setup. */
  1210. void (*machine_check_vector)(struct pt_regs *, long error_code) =
  1211. unexpected_machine_check;
  1212. /*
  1213. * Called for each booted CPU to set up machine checks.
  1214. * Must be called with preempt off:
  1215. */
  1216. void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
  1217. {
  1218. if (mce_disabled)
  1219. return;
  1220. if (__mcheck_cpu_ancient_init(c))
  1221. return;
  1222. if (!mce_available(c))
  1223. return;
  1224. if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
  1225. mce_disabled = 1;
  1226. return;
  1227. }
  1228. machine_check_vector = do_machine_check;
  1229. __mcheck_cpu_init_generic();
  1230. __mcheck_cpu_init_vendor(c);
  1231. __mcheck_cpu_init_timer();
  1232. INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
  1233. init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb);
  1234. }
  1235. /*
  1236. * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log.
  1237. */
  1238. static DEFINE_SPINLOCK(mce_chrdev_state_lock);
  1239. static int mce_chrdev_open_count; /* #times opened */
  1240. static int mce_chrdev_open_exclu; /* already open exclusive? */
  1241. static int mce_chrdev_open(struct inode *inode, struct file *file)
  1242. {
  1243. spin_lock(&mce_chrdev_state_lock);
  1244. if (mce_chrdev_open_exclu ||
  1245. (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
  1246. spin_unlock(&mce_chrdev_state_lock);
  1247. return -EBUSY;
  1248. }
  1249. if (file->f_flags & O_EXCL)
  1250. mce_chrdev_open_exclu = 1;
  1251. mce_chrdev_open_count++;
  1252. spin_unlock(&mce_chrdev_state_lock);
  1253. return nonseekable_open(inode, file);
  1254. }
  1255. static int mce_chrdev_release(struct inode *inode, struct file *file)
  1256. {
  1257. spin_lock(&mce_chrdev_state_lock);
  1258. mce_chrdev_open_count--;
  1259. mce_chrdev_open_exclu = 0;
  1260. spin_unlock(&mce_chrdev_state_lock);
  1261. return 0;
  1262. }
  1263. static void collect_tscs(void *data)
  1264. {
  1265. unsigned long *cpu_tsc = (unsigned long *)data;
  1266. rdtscll(cpu_tsc[smp_processor_id()]);
  1267. }
  1268. static int mce_apei_read_done;
  1269. /* Collect MCE record of previous boot in persistent storage via APEI ERST. */
  1270. static int __mce_read_apei(char __user **ubuf, size_t usize)
  1271. {
  1272. int rc;
  1273. u64 record_id;
  1274. struct mce m;
  1275. if (usize < sizeof(struct mce))
  1276. return -EINVAL;
  1277. rc = apei_read_mce(&m, &record_id);
  1278. /* Error or no more MCE record */
  1279. if (rc <= 0) {
  1280. mce_apei_read_done = 1;
  1281. return rc;
  1282. }
  1283. rc = -EFAULT;
  1284. if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
  1285. return rc;
  1286. /*
  1287. * In fact, we should have cleared the record after that has
  1288. * been flushed to the disk or sent to network in
  1289. * /sbin/mcelog, but we have no interface to support that now,
  1290. * so just clear it to avoid duplication.
  1291. */
  1292. rc = apei_clear_mce(record_id);
  1293. if (rc) {
  1294. mce_apei_read_done = 1;
  1295. return rc;
  1296. }
  1297. *ubuf += sizeof(struct mce);
  1298. return 0;
  1299. }
  1300. static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
  1301. size_t usize, loff_t *off)
  1302. {
  1303. char __user *buf = ubuf;
  1304. unsigned long *cpu_tsc;
  1305. unsigned prev, next;
  1306. int i, err;
  1307. cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
  1308. if (!cpu_tsc)
  1309. return -ENOMEM;
  1310. mutex_lock(&mce_chrdev_read_mutex);
  1311. if (!mce_apei_read_done) {
  1312. err = __mce_read_apei(&buf, usize);
  1313. if (err || buf != ubuf)
  1314. goto out;
  1315. }
  1316. next = rcu_dereference_check_mce(mcelog.next);
  1317. /* Only supports full reads right now */
  1318. err = -EINVAL;
  1319. if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
  1320. goto out;
  1321. err = 0;
  1322. prev = 0;
  1323. do {
  1324. for (i = prev; i < next; i++) {
  1325. unsigned long start = jiffies;
  1326. struct mce *m = &mcelog.entry[i];
  1327. while (!m->finished) {
  1328. if (time_after_eq(jiffies, start + 2)) {
  1329. memset(m, 0, sizeof(*m));
  1330. goto timeout;
  1331. }
  1332. cpu_relax();
  1333. }
  1334. smp_rmb();
  1335. err |= copy_to_user(buf, m, sizeof(*m));
  1336. buf += sizeof(*m);
  1337. timeout:
  1338. ;
  1339. }
  1340. memset(mcelog.entry + prev, 0,
  1341. (next - prev) * sizeof(struct mce));
  1342. prev = next;
  1343. next = cmpxchg(&mcelog.next, prev, 0);
  1344. } while (next != prev);
  1345. synchronize_sched();
  1346. /*
  1347. * Collect entries that were still getting written before the
  1348. * synchronize.
  1349. */
  1350. on_each_cpu(collect_tscs, cpu_tsc, 1);
  1351. for (i = next; i < MCE_LOG_LEN; i++) {
  1352. struct mce *m = &mcelog.entry[i];
  1353. if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
  1354. err |= copy_to_user(buf, m, sizeof(*m));
  1355. smp_rmb();
  1356. buf += sizeof(*m);
  1357. memset(m, 0, sizeof(*m));
  1358. }
  1359. }
  1360. if (err)
  1361. err = -EFAULT;
  1362. out:
  1363. mutex_unlock(&mce_chrdev_read_mutex);
  1364. kfree(cpu_tsc);
  1365. return err ? err : buf - ubuf;
  1366. }
  1367. static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
  1368. {
  1369. poll_wait(file, &mce_chrdev_wait, wait);
  1370. if (rcu_access_index(mcelog.next))
  1371. return POLLIN | POLLRDNORM;
  1372. if (!mce_apei_read_done && apei_check_mce())
  1373. return POLLIN | POLLRDNORM;
  1374. return 0;
  1375. }
  1376. static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
  1377. unsigned long arg)
  1378. {
  1379. int __user *p = (int __user *)arg;
  1380. if (!capable(CAP_SYS_ADMIN))
  1381. return -EPERM;
  1382. switch (cmd) {
  1383. case MCE_GET_RECORD_LEN:
  1384. return put_user(sizeof(struct mce), p);
  1385. case MCE_GET_LOG_LEN:
  1386. return put_user(MCE_LOG_LEN, p);
  1387. case MCE_GETCLEAR_FLAGS: {
  1388. unsigned flags;
  1389. do {
  1390. flags = mcelog.flags;
  1391. } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
  1392. return put_user(flags, p);
  1393. }
  1394. default:
  1395. return -ENOTTY;
  1396. }
  1397. }
  1398. static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf,
  1399. size_t usize, loff_t *off);
  1400. void register_mce_write_callback(ssize_t (*fn)(struct file *filp,
  1401. const char __user *ubuf,
  1402. size_t usize, loff_t *off))
  1403. {
  1404. mce_write = fn;
  1405. }
  1406. EXPORT_SYMBOL_GPL(register_mce_write_callback);
  1407. ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
  1408. size_t usize, loff_t *off)
  1409. {
  1410. if (mce_write)
  1411. return mce_write(filp, ubuf, usize, off);
  1412. else
  1413. return -EINVAL;
  1414. }
  1415. static const struct file_operations mce_chrdev_ops = {
  1416. .open = mce_chrdev_open,
  1417. .release = mce_chrdev_release,
  1418. .read = mce_chrdev_read,
  1419. .write = mce_chrdev_write,
  1420. .poll = mce_chrdev_poll,
  1421. .unlocked_ioctl = mce_chrdev_ioctl,
  1422. .llseek = no_llseek,
  1423. };
  1424. static struct miscdevice mce_chrdev_device = {
  1425. MISC_MCELOG_MINOR,
  1426. "mcelog",
  1427. &mce_chrdev_ops,
  1428. };
  1429. /*
  1430. * mce=off Disables machine check
  1431. * mce=no_cmci Disables CMCI
  1432. * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
  1433. * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
  1434. * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
  1435. * monarchtimeout is how long to wait for other CPUs on machine
  1436. * check, or 0 to not wait
  1437. * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
  1438. * mce=nobootlog Don't log MCEs from before booting.
  1439. */
  1440. static int __init mcheck_enable(char *str)
  1441. {
  1442. if (*str == 0) {
  1443. enable_p5_mce();
  1444. return 1;
  1445. }
  1446. if (*str == '=')
  1447. str++;
  1448. if (!strcmp(str, "off"))
  1449. mce_disabled = 1;
  1450. else if (!strcmp(str, "no_cmci"))
  1451. mce_cmci_disabled = 1;
  1452. else if (!strcmp(str, "dont_log_ce"))
  1453. mce_dont_log_ce = 1;
  1454. else if (!strcmp(str, "ignore_ce"))
  1455. mce_ignore_ce = 1;
  1456. else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
  1457. mce_bootlog = (str[0] == 'b');
  1458. else if (isdigit(str[0])) {
  1459. get_option(&str, &tolerant);
  1460. if (*str == ',') {
  1461. ++str;
  1462. get_option(&str, &monarch_timeout);
  1463. }
  1464. } else {
  1465. printk(KERN_INFO "mce argument %s ignored. Please use /sys\n",
  1466. str);
  1467. return 0;
  1468. }
  1469. return 1;
  1470. }
  1471. __setup("mce", mcheck_enable);
  1472. int __init mcheck_init(void)
  1473. {
  1474. mcheck_intel_therm_init();
  1475. return 0;
  1476. }
  1477. /*
  1478. * mce_syscore: PM support
  1479. */
  1480. /*
  1481. * Disable machine checks on suspend and shutdown. We can't really handle
  1482. * them later.
  1483. */
  1484. static int mce_disable_error_reporting(void)
  1485. {
  1486. int i;
  1487. for (i = 0; i < banks; i++) {
  1488. struct mce_bank *b = &mce_banks[i];
  1489. if (b->init)
  1490. wrmsrl(MSR_IA32_MCx_CTL(i), 0);
  1491. }
  1492. return 0;
  1493. }
  1494. static int mce_syscore_suspend(void)
  1495. {
  1496. return mce_disable_error_reporting();
  1497. }
  1498. static void mce_syscore_shutdown(void)
  1499. {
  1500. mce_disable_error_reporting();
  1501. }
  1502. /*
  1503. * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
  1504. * Only one CPU is active at this time, the others get re-added later using
  1505. * CPU hotplug:
  1506. */
  1507. static void mce_syscore_resume(void)
  1508. {
  1509. __mcheck_cpu_init_generic();
  1510. __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info));
  1511. }
  1512. static struct syscore_ops mce_syscore_ops = {
  1513. .suspend = mce_syscore_suspend,
  1514. .shutdown = mce_syscore_shutdown,
  1515. .resume = mce_syscore_resume,
  1516. };
  1517. /*
  1518. * mce_sysdev: Sysfs support
  1519. */
  1520. static void mce_cpu_restart(void *data)
  1521. {
  1522. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1523. return;
  1524. __mcheck_cpu_init_generic();
  1525. __mcheck_cpu_init_timer();
  1526. }
  1527. /* Reinit MCEs after user configuration changes */
  1528. static void mce_restart(void)
  1529. {
  1530. mce_timer_delete_all();
  1531. on_each_cpu(mce_cpu_restart, NULL, 1);
  1532. }
  1533. /* Toggle features for corrected errors */
  1534. static void mce_disable_cmci(void *data)
  1535. {
  1536. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1537. return;
  1538. cmci_clear();
  1539. }
  1540. static void mce_enable_ce(void *all)
  1541. {
  1542. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1543. return;
  1544. cmci_reenable();
  1545. cmci_recheck();
  1546. if (all)
  1547. __mcheck_cpu_init_timer();
  1548. }
  1549. static struct sysdev_class mce_sysdev_class = {
  1550. .name = "machinecheck",
  1551. };
  1552. DEFINE_PER_CPU(struct sys_device, mce_sysdev);
  1553. __cpuinitdata
  1554. void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
  1555. static inline struct mce_bank *attr_to_bank(struct sysdev_attribute *attr)
  1556. {
  1557. return container_of(attr, struct mce_bank, attr);
  1558. }
  1559. static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr,
  1560. char *buf)
  1561. {
  1562. return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
  1563. }
  1564. static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr,
  1565. const char *buf, size_t size)
  1566. {
  1567. u64 new;
  1568. if (strict_strtoull(buf, 0, &new) < 0)
  1569. return -EINVAL;
  1570. attr_to_bank(attr)->ctl = new;
  1571. mce_restart();
  1572. return size;
  1573. }
  1574. static ssize_t
  1575. show_trigger(struct sys_device *s, struct sysdev_attribute *attr, char *buf)
  1576. {
  1577. strcpy(buf, mce_helper);
  1578. strcat(buf, "\n");
  1579. return strlen(mce_helper) + 1;
  1580. }
  1581. static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
  1582. const char *buf, size_t siz)
  1583. {
  1584. char *p;
  1585. strncpy(mce_helper, buf, sizeof(mce_helper));
  1586. mce_helper[sizeof(mce_helper)-1] = 0;
  1587. p = strchr(mce_helper, '\n');
  1588. if (p)
  1589. *p = 0;
  1590. return strlen(mce_helper) + !!p;
  1591. }
  1592. static ssize_t set_ignore_ce(struct sys_device *s,
  1593. struct sysdev_attribute *attr,
  1594. const char *buf, size_t size)
  1595. {
  1596. u64 new;
  1597. if (strict_strtoull(buf, 0, &new) < 0)
  1598. return -EINVAL;
  1599. if (mce_ignore_ce ^ !!new) {
  1600. if (new) {
  1601. /* disable ce features */
  1602. mce_timer_delete_all();
  1603. on_each_cpu(mce_disable_cmci, NULL, 1);
  1604. mce_ignore_ce = 1;
  1605. } else {
  1606. /* enable ce features */
  1607. mce_ignore_ce = 0;
  1608. on_each_cpu(mce_enable_ce, (void *)1, 1);
  1609. }
  1610. }
  1611. return size;
  1612. }
  1613. static ssize_t set_cmci_disabled(struct sys_device *s,
  1614. struct sysdev_attribute *attr,
  1615. const char *buf, size_t size)
  1616. {
  1617. u64 new;
  1618. if (strict_strtoull(buf, 0, &new) < 0)
  1619. return -EINVAL;
  1620. if (mce_cmci_disabled ^ !!new) {
  1621. if (new) {
  1622. /* disable cmci */
  1623. on_each_cpu(mce_disable_cmci, NULL, 1);
  1624. mce_cmci_disabled = 1;
  1625. } else {
  1626. /* enable cmci */
  1627. mce_cmci_disabled = 0;
  1628. on_each_cpu(mce_enable_ce, NULL, 1);
  1629. }
  1630. }
  1631. return size;
  1632. }
  1633. static ssize_t store_int_with_restart(struct sys_device *s,
  1634. struct sysdev_attribute *attr,
  1635. const char *buf, size_t size)
  1636. {
  1637. ssize_t ret = sysdev_store_int(s, attr, buf, size);
  1638. mce_restart();
  1639. return ret;
  1640. }
  1641. static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
  1642. static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
  1643. static SYSDEV_INT_ATTR(monarch_timeout, 0644, monarch_timeout);
  1644. static SYSDEV_INT_ATTR(dont_log_ce, 0644, mce_dont_log_ce);
  1645. static struct sysdev_ext_attribute attr_check_interval = {
  1646. _SYSDEV_ATTR(check_interval, 0644, sysdev_show_int,
  1647. store_int_with_restart),
  1648. &check_interval
  1649. };
  1650. static struct sysdev_ext_attribute attr_ignore_ce = {
  1651. _SYSDEV_ATTR(ignore_ce, 0644, sysdev_show_int, set_ignore_ce),
  1652. &mce_ignore_ce
  1653. };
  1654. static struct sysdev_ext_attribute attr_cmci_disabled = {
  1655. _SYSDEV_ATTR(cmci_disabled, 0644, sysdev_show_int, set_cmci_disabled),
  1656. &mce_cmci_disabled
  1657. };
  1658. static struct sysdev_attribute *mce_sysdev_attrs[] = {
  1659. &attr_tolerant.attr,
  1660. &attr_check_interval.attr,
  1661. &attr_trigger,
  1662. &attr_monarch_timeout.attr,
  1663. &attr_dont_log_ce.attr,
  1664. &attr_ignore_ce.attr,
  1665. &attr_cmci_disabled.attr,
  1666. NULL
  1667. };
  1668. static cpumask_var_t mce_sysdev_initialized;
  1669. /* Per cpu sysdev init. All of the cpus still share the same ctrl bank: */
  1670. static __cpuinit int mce_sysdev_create(unsigned int cpu)
  1671. {
  1672. struct sys_device *sysdev = &per_cpu(mce_sysdev, cpu);
  1673. int err;
  1674. int i, j;
  1675. if (!mce_available(&boot_cpu_data))
  1676. return -EIO;
  1677. memset(&sysdev->kobj, 0, sizeof(struct kobject));
  1678. sysdev->id = cpu;
  1679. sysdev->cls = &mce_sysdev_class;
  1680. err = sysdev_register(sysdev);
  1681. if (err)
  1682. return err;
  1683. for (i = 0; mce_sysdev_attrs[i]; i++) {
  1684. err = sysdev_create_file(sysdev, mce_sysdev_attrs[i]);
  1685. if (err)
  1686. goto error;
  1687. }
  1688. for (j = 0; j < banks; j++) {
  1689. err = sysdev_create_file(sysdev, &mce_banks[j].attr);
  1690. if (err)
  1691. goto error2;
  1692. }
  1693. cpumask_set_cpu(cpu, mce_sysdev_initialized);
  1694. return 0;
  1695. error2:
  1696. while (--j >= 0)
  1697. sysdev_remove_file(sysdev, &mce_banks[j].attr);
  1698. error:
  1699. while (--i >= 0)
  1700. sysdev_remove_file(sysdev, mce_sysdev_attrs[i]);
  1701. sysdev_unregister(sysdev);
  1702. return err;
  1703. }
  1704. static __cpuinit void mce_sysdev_remove(unsigned int cpu)
  1705. {
  1706. struct sys_device *sysdev = &per_cpu(mce_sysdev, cpu);
  1707. int i;
  1708. if (!cpumask_test_cpu(cpu, mce_sysdev_initialized))
  1709. return;
  1710. for (i = 0; mce_sysdev_attrs[i]; i++)
  1711. sysdev_remove_file(sysdev, mce_sysdev_attrs[i]);
  1712. for (i = 0; i < banks; i++)
  1713. sysdev_remove_file(sysdev, &mce_banks[i].attr);
  1714. sysdev_unregister(sysdev);
  1715. cpumask_clear_cpu(cpu, mce_sysdev_initialized);
  1716. }
  1717. /* Make sure there are no machine checks on offlined CPUs. */
  1718. static void __cpuinit mce_disable_cpu(void *h)
  1719. {
  1720. unsigned long action = *(unsigned long *)h;
  1721. int i;
  1722. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1723. return;
  1724. if (!(action & CPU_TASKS_FROZEN))
  1725. cmci_clear();
  1726. for (i = 0; i < banks; i++) {
  1727. struct mce_bank *b = &mce_banks[i];
  1728. if (b->init)
  1729. wrmsrl(MSR_IA32_MCx_CTL(i), 0);
  1730. }
  1731. }
  1732. static void __cpuinit mce_reenable_cpu(void *h)
  1733. {
  1734. unsigned long action = *(unsigned long *)h;
  1735. int i;
  1736. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1737. return;
  1738. if (!(action & CPU_TASKS_FROZEN))
  1739. cmci_reenable();
  1740. for (i = 0; i < banks; i++) {
  1741. struct mce_bank *b = &mce_banks[i];
  1742. if (b->init)
  1743. wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
  1744. }
  1745. }
  1746. /* Get notified when a cpu comes on/off. Be hotplug friendly. */
  1747. static int __cpuinit
  1748. mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
  1749. {
  1750. unsigned int cpu = (unsigned long)hcpu;
  1751. struct timer_list *t = &per_cpu(mce_timer, cpu);
  1752. switch (action) {
  1753. case CPU_ONLINE:
  1754. case CPU_ONLINE_FROZEN:
  1755. mce_sysdev_create(cpu);
  1756. if (threshold_cpu_callback)
  1757. threshold_cpu_callback(action, cpu);
  1758. break;
  1759. case CPU_DEAD:
  1760. case CPU_DEAD_FROZEN:
  1761. if (threshold_cpu_callback)
  1762. threshold_cpu_callback(action, cpu);
  1763. mce_sysdev_remove(cpu);
  1764. break;
  1765. case CPU_DOWN_PREPARE:
  1766. case CPU_DOWN_PREPARE_FROZEN:
  1767. del_timer_sync(t);
  1768. smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
  1769. break;
  1770. case CPU_DOWN_FAILED:
  1771. case CPU_DOWN_FAILED_FROZEN:
  1772. if (!mce_ignore_ce && check_interval) {
  1773. t->expires = round_jiffies(jiffies +
  1774. __get_cpu_var(mce_next_interval));
  1775. add_timer_on(t, cpu);
  1776. }
  1777. smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
  1778. break;
  1779. case CPU_POST_DEAD:
  1780. /* intentionally ignoring frozen here */
  1781. cmci_rediscover(cpu);
  1782. break;
  1783. }
  1784. return NOTIFY_OK;
  1785. }
  1786. static struct notifier_block mce_cpu_notifier __cpuinitdata = {
  1787. .notifier_call = mce_cpu_callback,
  1788. };
  1789. static __init void mce_init_banks(void)
  1790. {
  1791. int i;
  1792. for (i = 0; i < banks; i++) {
  1793. struct mce_bank *b = &mce_banks[i];
  1794. struct sysdev_attribute *a = &b->attr;
  1795. sysfs_attr_init(&a->attr);
  1796. a->attr.name = b->attrname;
  1797. snprintf(b->attrname, ATTR_LEN, "bank%d", i);
  1798. a->attr.mode = 0644;
  1799. a->show = show_bank;
  1800. a->store = set_bank;
  1801. }
  1802. }
  1803. static __init int mcheck_init_device(void)
  1804. {
  1805. int err;
  1806. int i = 0;
  1807. if (!mce_available(&boot_cpu_data))
  1808. return -EIO;
  1809. zalloc_cpumask_var(&mce_sysdev_initialized, GFP_KERNEL);
  1810. mce_init_banks();
  1811. err = sysdev_class_register(&mce_sysdev_class);
  1812. if (err)
  1813. return err;
  1814. for_each_online_cpu(i) {
  1815. err = mce_sysdev_create(i);
  1816. if (err)
  1817. return err;
  1818. }
  1819. register_syscore_ops(&mce_syscore_ops);
  1820. register_hotcpu_notifier(&mce_cpu_notifier);
  1821. /* register character device /dev/mcelog */
  1822. misc_register(&mce_chrdev_device);
  1823. return err;
  1824. }
  1825. device_initcall(mcheck_init_device);
  1826. /*
  1827. * Old style boot options parsing. Only for compatibility.
  1828. */
  1829. static int __init mcheck_disable(char *str)
  1830. {
  1831. mce_disabled = 1;
  1832. return 1;
  1833. }
  1834. __setup("nomce", mcheck_disable);
  1835. #ifdef CONFIG_DEBUG_FS
  1836. struct dentry *mce_get_debugfs_dir(void)
  1837. {
  1838. static struct dentry *dmce;
  1839. if (!dmce)
  1840. dmce = debugfs_create_dir("mce", NULL);
  1841. return dmce;
  1842. }
  1843. static void mce_reset(void)
  1844. {
  1845. cpu_missing = 0;
  1846. atomic_set(&mce_fake_paniced, 0);
  1847. atomic_set(&mce_executing, 0);
  1848. atomic_set(&mce_callin, 0);
  1849. atomic_set(&global_nwo, 0);
  1850. }
  1851. static int fake_panic_get(void *data, u64 *val)
  1852. {
  1853. *val = fake_panic;
  1854. return 0;
  1855. }
  1856. static int fake_panic_set(void *data, u64 val)
  1857. {
  1858. mce_reset();
  1859. fake_panic = val;
  1860. return 0;
  1861. }
  1862. DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
  1863. fake_panic_set, "%llu\n");
  1864. static int __init mcheck_debugfs_init(void)
  1865. {
  1866. struct dentry *dmce, *ffake_panic;
  1867. dmce = mce_get_debugfs_dir();
  1868. if (!dmce)
  1869. return -ENOMEM;
  1870. ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
  1871. &fake_panic_fops);
  1872. if (!ffake_panic)
  1873. return -ENOMEM;
  1874. return 0;
  1875. }
  1876. late_initcall(mcheck_debugfs_init);
  1877. #endif