mce.c 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273
  1. /*
  2. * Machine check handler.
  3. *
  4. * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
  5. * Rest from unknown author(s).
  6. * 2004 Andi Kleen. Rewrote most of it.
  7. * Copyright 2008 Intel Corporation
  8. * Author: Andi Kleen
  9. */
  10. #include <linux/thread_info.h>
  11. #include <linux/capability.h>
  12. #include <linux/miscdevice.h>
  13. #include <linux/ratelimit.h>
  14. #include <linux/kallsyms.h>
  15. #include <linux/rcupdate.h>
  16. #include <linux/kobject.h>
  17. #include <linux/uaccess.h>
  18. #include <linux/kdebug.h>
  19. #include <linux/kernel.h>
  20. #include <linux/percpu.h>
  21. #include <linux/string.h>
  22. #include <linux/sysdev.h>
  23. #include <linux/syscore_ops.h>
  24. #include <linux/delay.h>
  25. #include <linux/ctype.h>
  26. #include <linux/sched.h>
  27. #include <linux/sysfs.h>
  28. #include <linux/types.h>
  29. #include <linux/slab.h>
  30. #include <linux/init.h>
  31. #include <linux/kmod.h>
  32. #include <linux/poll.h>
  33. #include <linux/nmi.h>
  34. #include <linux/cpu.h>
  35. #include <linux/smp.h>
  36. #include <linux/fs.h>
  37. #include <linux/mm.h>
  38. #include <linux/debugfs.h>
  39. #include <linux/irq_work.h>
  40. #include <linux/export.h>
  41. #include <asm/processor.h>
  42. #include <asm/mce.h>
  43. #include <asm/msr.h>
  44. #include "mce-internal.h"
  45. static DEFINE_MUTEX(mce_chrdev_read_mutex);
  46. #define rcu_dereference_check_mce(p) \
  47. rcu_dereference_index_check((p), \
  48. rcu_read_lock_sched_held() || \
  49. lockdep_is_held(&mce_chrdev_read_mutex))
  50. #define CREATE_TRACE_POINTS
  51. #include <trace/events/mce.h>
  52. int mce_disabled __read_mostly;
  53. #define MISC_MCELOG_MINOR 227
  54. #define SPINUNIT 100 /* 100ns */
  55. atomic_t mce_entry;
  56. DEFINE_PER_CPU(unsigned, mce_exception_count);
  57. /*
  58. * Tolerant levels:
  59. * 0: always panic on uncorrected errors, log corrected errors
  60. * 1: panic or SIGBUS on uncorrected errors, log corrected errors
  61. * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
  62. * 3: never panic or SIGBUS, log all errors (for testing only)
  63. */
  64. static int tolerant __read_mostly = 1;
  65. static int banks __read_mostly;
  66. static int rip_msr __read_mostly;
  67. static int mce_bootlog __read_mostly = -1;
  68. static int monarch_timeout __read_mostly = -1;
  69. static int mce_panic_timeout __read_mostly;
  70. static int mce_dont_log_ce __read_mostly;
  71. int mce_cmci_disabled __read_mostly;
  72. int mce_ignore_ce __read_mostly;
  73. int mce_ser __read_mostly;
  74. struct mce_bank *mce_banks __read_mostly;
  75. /* User mode helper program triggered by machine check event */
  76. static unsigned long mce_need_notify;
  77. static char mce_helper[128];
  78. static char *mce_helper_argv[2] = { mce_helper, NULL };
  79. static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
  80. static DEFINE_PER_CPU(struct mce, mces_seen);
  81. static int cpu_missing;
  82. /*
  83. * CPU/chipset specific EDAC code can register a notifier call here to print
  84. * MCE errors in a human-readable form.
  85. */
  86. ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
  87. EXPORT_SYMBOL_GPL(x86_mce_decoder_chain);
  88. /* MCA banks polled by the period polling timer for corrected events */
  89. DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
  90. [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
  91. };
  92. static DEFINE_PER_CPU(struct work_struct, mce_work);
  93. /* Do initial initialization of a struct mce */
  94. void mce_setup(struct mce *m)
  95. {
  96. memset(m, 0, sizeof(struct mce));
  97. m->cpu = m->extcpu = smp_processor_id();
  98. rdtscll(m->tsc);
  99. /* We hope get_seconds stays lockless */
  100. m->time = get_seconds();
  101. m->cpuvendor = boot_cpu_data.x86_vendor;
  102. m->cpuid = cpuid_eax(1);
  103. #ifdef CONFIG_SMP
  104. m->socketid = cpu_data(m->extcpu).phys_proc_id;
  105. #endif
  106. m->apicid = cpu_data(m->extcpu).initial_apicid;
  107. rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
  108. }
  109. DEFINE_PER_CPU(struct mce, injectm);
  110. EXPORT_PER_CPU_SYMBOL_GPL(injectm);
  111. /*
  112. * Lockless MCE logging infrastructure.
  113. * This avoids deadlocks on printk locks without having to break locks. Also
  114. * separate MCEs from kernel messages to avoid bogus bug reports.
  115. */
  116. static struct mce_log mcelog = {
  117. .signature = MCE_LOG_SIGNATURE,
  118. .len = MCE_LOG_LEN,
  119. .recordlen = sizeof(struct mce),
  120. };
  121. void mce_log(struct mce *mce)
  122. {
  123. unsigned next, entry;
  124. int ret = 0;
  125. /* Emit the trace record: */
  126. trace_mce_record(mce);
  127. ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
  128. if (ret == NOTIFY_STOP)
  129. return;
  130. mce->finished = 0;
  131. wmb();
  132. for (;;) {
  133. entry = rcu_dereference_check_mce(mcelog.next);
  134. for (;;) {
  135. /*
  136. * When the buffer fills up discard new entries.
  137. * Assume that the earlier errors are the more
  138. * interesting ones:
  139. */
  140. if (entry >= MCE_LOG_LEN) {
  141. set_bit(MCE_OVERFLOW,
  142. (unsigned long *)&mcelog.flags);
  143. return;
  144. }
  145. /* Old left over entry. Skip: */
  146. if (mcelog.entry[entry].finished) {
  147. entry++;
  148. continue;
  149. }
  150. break;
  151. }
  152. smp_rmb();
  153. next = entry + 1;
  154. if (cmpxchg(&mcelog.next, entry, next) == entry)
  155. break;
  156. }
  157. memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
  158. wmb();
  159. mcelog.entry[entry].finished = 1;
  160. wmb();
  161. mce->finished = 1;
  162. set_bit(0, &mce_need_notify);
  163. }
  164. static void print_mce(struct mce *m)
  165. {
  166. int ret = 0;
  167. pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
  168. m->extcpu, m->mcgstatus, m->bank, m->status);
  169. if (m->ip) {
  170. pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
  171. !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
  172. m->cs, m->ip);
  173. if (m->cs == __KERNEL_CS)
  174. print_symbol("{%s}", m->ip);
  175. pr_cont("\n");
  176. }
  177. pr_emerg(HW_ERR "TSC %llx ", m->tsc);
  178. if (m->addr)
  179. pr_cont("ADDR %llx ", m->addr);
  180. if (m->misc)
  181. pr_cont("MISC %llx ", m->misc);
  182. pr_cont("\n");
  183. /*
  184. * Note this output is parsed by external tools and old fields
  185. * should not be changed.
  186. */
  187. pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
  188. m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
  189. cpu_data(m->extcpu).microcode);
  190. /*
  191. * Print out human-readable details about the MCE error,
  192. * (if the CPU has an implementation for that)
  193. */
  194. ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
  195. if (ret == NOTIFY_STOP)
  196. return;
  197. pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
  198. }
  199. #define PANIC_TIMEOUT 5 /* 5 seconds */
  200. static atomic_t mce_paniced;
  201. static int fake_panic;
  202. static atomic_t mce_fake_paniced;
  203. /* Panic in progress. Enable interrupts and wait for final IPI */
  204. static void wait_for_panic(void)
  205. {
  206. long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
  207. preempt_disable();
  208. local_irq_enable();
  209. while (timeout-- > 0)
  210. udelay(1);
  211. if (panic_timeout == 0)
  212. panic_timeout = mce_panic_timeout;
  213. panic("Panicing machine check CPU died");
  214. }
  215. static void mce_panic(char *msg, struct mce *final, char *exp)
  216. {
  217. int i, apei_err = 0;
  218. if (!fake_panic) {
  219. /*
  220. * Make sure only one CPU runs in machine check panic
  221. */
  222. if (atomic_inc_return(&mce_paniced) > 1)
  223. wait_for_panic();
  224. barrier();
  225. bust_spinlocks(1);
  226. console_verbose();
  227. } else {
  228. /* Don't log too much for fake panic */
  229. if (atomic_inc_return(&mce_fake_paniced) > 1)
  230. return;
  231. }
  232. /* First print corrected ones that are still unlogged */
  233. for (i = 0; i < MCE_LOG_LEN; i++) {
  234. struct mce *m = &mcelog.entry[i];
  235. if (!(m->status & MCI_STATUS_VAL))
  236. continue;
  237. if (!(m->status & MCI_STATUS_UC)) {
  238. print_mce(m);
  239. if (!apei_err)
  240. apei_err = apei_write_mce(m);
  241. }
  242. }
  243. /* Now print uncorrected but with the final one last */
  244. for (i = 0; i < MCE_LOG_LEN; i++) {
  245. struct mce *m = &mcelog.entry[i];
  246. if (!(m->status & MCI_STATUS_VAL))
  247. continue;
  248. if (!(m->status & MCI_STATUS_UC))
  249. continue;
  250. if (!final || memcmp(m, final, sizeof(struct mce))) {
  251. print_mce(m);
  252. if (!apei_err)
  253. apei_err = apei_write_mce(m);
  254. }
  255. }
  256. if (final) {
  257. print_mce(final);
  258. if (!apei_err)
  259. apei_err = apei_write_mce(final);
  260. }
  261. if (cpu_missing)
  262. pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
  263. if (exp)
  264. pr_emerg(HW_ERR "Machine check: %s\n", exp);
  265. if (!fake_panic) {
  266. if (panic_timeout == 0)
  267. panic_timeout = mce_panic_timeout;
  268. panic(msg);
  269. } else
  270. pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
  271. }
  272. /* Support code for software error injection */
  273. static int msr_to_offset(u32 msr)
  274. {
  275. unsigned bank = __this_cpu_read(injectm.bank);
  276. if (msr == rip_msr)
  277. return offsetof(struct mce, ip);
  278. if (msr == MSR_IA32_MCx_STATUS(bank))
  279. return offsetof(struct mce, status);
  280. if (msr == MSR_IA32_MCx_ADDR(bank))
  281. return offsetof(struct mce, addr);
  282. if (msr == MSR_IA32_MCx_MISC(bank))
  283. return offsetof(struct mce, misc);
  284. if (msr == MSR_IA32_MCG_STATUS)
  285. return offsetof(struct mce, mcgstatus);
  286. return -1;
  287. }
  288. /* MSR access wrappers used for error injection */
  289. static u64 mce_rdmsrl(u32 msr)
  290. {
  291. u64 v;
  292. if (__this_cpu_read(injectm.finished)) {
  293. int offset = msr_to_offset(msr);
  294. if (offset < 0)
  295. return 0;
  296. return *(u64 *)((char *)&__get_cpu_var(injectm) + offset);
  297. }
  298. if (rdmsrl_safe(msr, &v)) {
  299. WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
  300. /*
  301. * Return zero in case the access faulted. This should
  302. * not happen normally but can happen if the CPU does
  303. * something weird, or if the code is buggy.
  304. */
  305. v = 0;
  306. }
  307. return v;
  308. }
  309. static void mce_wrmsrl(u32 msr, u64 v)
  310. {
  311. if (__this_cpu_read(injectm.finished)) {
  312. int offset = msr_to_offset(msr);
  313. if (offset >= 0)
  314. *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v;
  315. return;
  316. }
  317. wrmsrl(msr, v);
  318. }
  319. /*
  320. * Collect all global (w.r.t. this processor) status about this machine
  321. * check into our "mce" struct so that we can use it later to assess
  322. * the severity of the problem as we read per-bank specific details.
  323. */
  324. static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
  325. {
  326. mce_setup(m);
  327. m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
  328. if (regs) {
  329. /*
  330. * Get the address of the instruction at the time of
  331. * the machine check error.
  332. */
  333. if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
  334. m->ip = regs->ip;
  335. m->cs = regs->cs;
  336. }
  337. /* Use accurate RIP reporting if available. */
  338. if (rip_msr)
  339. m->ip = mce_rdmsrl(rip_msr);
  340. }
  341. }
  342. /*
  343. * Simple lockless ring to communicate PFNs from the exception handler with the
  344. * process context work function. This is vastly simplified because there's
  345. * only a single reader and a single writer.
  346. */
  347. #define MCE_RING_SIZE 16 /* we use one entry less */
  348. struct mce_ring {
  349. unsigned short start;
  350. unsigned short end;
  351. unsigned long ring[MCE_RING_SIZE];
  352. };
  353. static DEFINE_PER_CPU(struct mce_ring, mce_ring);
  354. /* Runs with CPU affinity in workqueue */
  355. static int mce_ring_empty(void)
  356. {
  357. struct mce_ring *r = &__get_cpu_var(mce_ring);
  358. return r->start == r->end;
  359. }
  360. static int mce_ring_get(unsigned long *pfn)
  361. {
  362. struct mce_ring *r;
  363. int ret = 0;
  364. *pfn = 0;
  365. get_cpu();
  366. r = &__get_cpu_var(mce_ring);
  367. if (r->start == r->end)
  368. goto out;
  369. *pfn = r->ring[r->start];
  370. r->start = (r->start + 1) % MCE_RING_SIZE;
  371. ret = 1;
  372. out:
  373. put_cpu();
  374. return ret;
  375. }
  376. /* Always runs in MCE context with preempt off */
  377. static int mce_ring_add(unsigned long pfn)
  378. {
  379. struct mce_ring *r = &__get_cpu_var(mce_ring);
  380. unsigned next;
  381. next = (r->end + 1) % MCE_RING_SIZE;
  382. if (next == r->start)
  383. return -1;
  384. r->ring[r->end] = pfn;
  385. wmb();
  386. r->end = next;
  387. return 0;
  388. }
  389. int mce_available(struct cpuinfo_x86 *c)
  390. {
  391. if (mce_disabled)
  392. return 0;
  393. return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
  394. }
  395. static void mce_schedule_work(void)
  396. {
  397. if (!mce_ring_empty()) {
  398. struct work_struct *work = &__get_cpu_var(mce_work);
  399. if (!work_pending(work))
  400. schedule_work(work);
  401. }
  402. }
  403. DEFINE_PER_CPU(struct irq_work, mce_irq_work);
  404. static void mce_irq_work_cb(struct irq_work *entry)
  405. {
  406. mce_notify_irq();
  407. mce_schedule_work();
  408. }
  409. static void mce_report_event(struct pt_regs *regs)
  410. {
  411. if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
  412. mce_notify_irq();
  413. /*
  414. * Triggering the work queue here is just an insurance
  415. * policy in case the syscall exit notify handler
  416. * doesn't run soon enough or ends up running on the
  417. * wrong CPU (can happen when audit sleeps)
  418. */
  419. mce_schedule_work();
  420. return;
  421. }
  422. irq_work_queue(&__get_cpu_var(mce_irq_work));
  423. }
  424. /*
  425. * Read ADDR and MISC registers.
  426. */
  427. static void mce_read_aux(struct mce *m, int i)
  428. {
  429. if (m->status & MCI_STATUS_MISCV)
  430. m->misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
  431. if (m->status & MCI_STATUS_ADDRV) {
  432. m->addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
  433. /*
  434. * Mask the reported address by the reported granularity.
  435. */
  436. if (mce_ser && (m->status & MCI_STATUS_MISCV)) {
  437. u8 shift = MCI_MISC_ADDR_LSB(m->misc);
  438. m->addr >>= shift;
  439. m->addr <<= shift;
  440. }
  441. }
  442. }
  443. DEFINE_PER_CPU(unsigned, mce_poll_count);
  444. /*
  445. * Poll for corrected events or events that happened before reset.
  446. * Those are just logged through /dev/mcelog.
  447. *
  448. * This is executed in standard interrupt context.
  449. *
  450. * Note: spec recommends to panic for fatal unsignalled
  451. * errors here. However this would be quite problematic --
  452. * we would need to reimplement the Monarch handling and
  453. * it would mess up the exclusion between exception handler
  454. * and poll hander -- * so we skip this for now.
  455. * These cases should not happen anyways, or only when the CPU
  456. * is already totally * confused. In this case it's likely it will
  457. * not fully execute the machine check handler either.
  458. */
  459. void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
  460. {
  461. struct mce m;
  462. int i;
  463. percpu_inc(mce_poll_count);
  464. mce_gather_info(&m, NULL);
  465. for (i = 0; i < banks; i++) {
  466. if (!mce_banks[i].ctl || !test_bit(i, *b))
  467. continue;
  468. m.misc = 0;
  469. m.addr = 0;
  470. m.bank = i;
  471. m.tsc = 0;
  472. barrier();
  473. m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
  474. if (!(m.status & MCI_STATUS_VAL))
  475. continue;
  476. /*
  477. * Uncorrected or signalled events are handled by the exception
  478. * handler when it is enabled, so don't process those here.
  479. *
  480. * TBD do the same check for MCI_STATUS_EN here?
  481. */
  482. if (!(flags & MCP_UC) &&
  483. (m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)))
  484. continue;
  485. mce_read_aux(&m, i);
  486. if (!(flags & MCP_TIMESTAMP))
  487. m.tsc = 0;
  488. /*
  489. * Don't get the IP here because it's unlikely to
  490. * have anything to do with the actual error location.
  491. */
  492. if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce)
  493. mce_log(&m);
  494. /*
  495. * Clear state for this bank.
  496. */
  497. mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
  498. }
  499. /*
  500. * Don't clear MCG_STATUS here because it's only defined for
  501. * exceptions.
  502. */
  503. sync_core();
  504. }
  505. EXPORT_SYMBOL_GPL(machine_check_poll);
  506. /*
  507. * Do a quick check if any of the events requires a panic.
  508. * This decides if we keep the events around or clear them.
  509. */
  510. static int mce_no_way_out(struct mce *m, char **msg)
  511. {
  512. int i;
  513. for (i = 0; i < banks; i++) {
  514. m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
  515. if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
  516. return 1;
  517. }
  518. return 0;
  519. }
  520. /*
  521. * Variable to establish order between CPUs while scanning.
  522. * Each CPU spins initially until executing is equal its number.
  523. */
  524. static atomic_t mce_executing;
  525. /*
  526. * Defines order of CPUs on entry. First CPU becomes Monarch.
  527. */
  528. static atomic_t mce_callin;
  529. /*
  530. * Check if a timeout waiting for other CPUs happened.
  531. */
  532. static int mce_timed_out(u64 *t)
  533. {
  534. /*
  535. * The others already did panic for some reason.
  536. * Bail out like in a timeout.
  537. * rmb() to tell the compiler that system_state
  538. * might have been modified by someone else.
  539. */
  540. rmb();
  541. if (atomic_read(&mce_paniced))
  542. wait_for_panic();
  543. if (!monarch_timeout)
  544. goto out;
  545. if ((s64)*t < SPINUNIT) {
  546. /* CHECKME: Make panic default for 1 too? */
  547. if (tolerant < 1)
  548. mce_panic("Timeout synchronizing machine check over CPUs",
  549. NULL, NULL);
  550. cpu_missing = 1;
  551. return 1;
  552. }
  553. *t -= SPINUNIT;
  554. out:
  555. touch_nmi_watchdog();
  556. return 0;
  557. }
  558. /*
  559. * The Monarch's reign. The Monarch is the CPU who entered
  560. * the machine check handler first. It waits for the others to
  561. * raise the exception too and then grades them. When any
  562. * error is fatal panic. Only then let the others continue.
  563. *
  564. * The other CPUs entering the MCE handler will be controlled by the
  565. * Monarch. They are called Subjects.
  566. *
  567. * This way we prevent any potential data corruption in a unrecoverable case
  568. * and also makes sure always all CPU's errors are examined.
  569. *
  570. * Also this detects the case of a machine check event coming from outer
  571. * space (not detected by any CPUs) In this case some external agent wants
  572. * us to shut down, so panic too.
  573. *
  574. * The other CPUs might still decide to panic if the handler happens
  575. * in a unrecoverable place, but in this case the system is in a semi-stable
  576. * state and won't corrupt anything by itself. It's ok to let the others
  577. * continue for a bit first.
  578. *
  579. * All the spin loops have timeouts; when a timeout happens a CPU
  580. * typically elects itself to be Monarch.
  581. */
  582. static void mce_reign(void)
  583. {
  584. int cpu;
  585. struct mce *m = NULL;
  586. int global_worst = 0;
  587. char *msg = NULL;
  588. char *nmsg = NULL;
  589. /*
  590. * This CPU is the Monarch and the other CPUs have run
  591. * through their handlers.
  592. * Grade the severity of the errors of all the CPUs.
  593. */
  594. for_each_possible_cpu(cpu) {
  595. int severity = mce_severity(&per_cpu(mces_seen, cpu), tolerant,
  596. &nmsg);
  597. if (severity > global_worst) {
  598. msg = nmsg;
  599. global_worst = severity;
  600. m = &per_cpu(mces_seen, cpu);
  601. }
  602. }
  603. /*
  604. * Cannot recover? Panic here then.
  605. * This dumps all the mces in the log buffer and stops the
  606. * other CPUs.
  607. */
  608. if (m && global_worst >= MCE_PANIC_SEVERITY && tolerant < 3)
  609. mce_panic("Fatal Machine check", m, msg);
  610. /*
  611. * For UC somewhere we let the CPU who detects it handle it.
  612. * Also must let continue the others, otherwise the handling
  613. * CPU could deadlock on a lock.
  614. */
  615. /*
  616. * No machine check event found. Must be some external
  617. * source or one CPU is hung. Panic.
  618. */
  619. if (global_worst <= MCE_KEEP_SEVERITY && tolerant < 3)
  620. mce_panic("Machine check from unknown source", NULL, NULL);
  621. /*
  622. * Now clear all the mces_seen so that they don't reappear on
  623. * the next mce.
  624. */
  625. for_each_possible_cpu(cpu)
  626. memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
  627. }
  628. static atomic_t global_nwo;
  629. /*
  630. * Start of Monarch synchronization. This waits until all CPUs have
  631. * entered the exception handler and then determines if any of them
  632. * saw a fatal event that requires panic. Then it executes them
  633. * in the entry order.
  634. * TBD double check parallel CPU hotunplug
  635. */
  636. static int mce_start(int *no_way_out)
  637. {
  638. int order;
  639. int cpus = num_online_cpus();
  640. u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
  641. if (!timeout)
  642. return -1;
  643. atomic_add(*no_way_out, &global_nwo);
  644. /*
  645. * global_nwo should be updated before mce_callin
  646. */
  647. smp_wmb();
  648. order = atomic_inc_return(&mce_callin);
  649. /*
  650. * Wait for everyone.
  651. */
  652. while (atomic_read(&mce_callin) != cpus) {
  653. if (mce_timed_out(&timeout)) {
  654. atomic_set(&global_nwo, 0);
  655. return -1;
  656. }
  657. ndelay(SPINUNIT);
  658. }
  659. /*
  660. * mce_callin should be read before global_nwo
  661. */
  662. smp_rmb();
  663. if (order == 1) {
  664. /*
  665. * Monarch: Starts executing now, the others wait.
  666. */
  667. atomic_set(&mce_executing, 1);
  668. } else {
  669. /*
  670. * Subject: Now start the scanning loop one by one in
  671. * the original callin order.
  672. * This way when there are any shared banks it will be
  673. * only seen by one CPU before cleared, avoiding duplicates.
  674. */
  675. while (atomic_read(&mce_executing) < order) {
  676. if (mce_timed_out(&timeout)) {
  677. atomic_set(&global_nwo, 0);
  678. return -1;
  679. }
  680. ndelay(SPINUNIT);
  681. }
  682. }
  683. /*
  684. * Cache the global no_way_out state.
  685. */
  686. *no_way_out = atomic_read(&global_nwo);
  687. return order;
  688. }
  689. /*
  690. * Synchronize between CPUs after main scanning loop.
  691. * This invokes the bulk of the Monarch processing.
  692. */
  693. static int mce_end(int order)
  694. {
  695. int ret = -1;
  696. u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
  697. if (!timeout)
  698. goto reset;
  699. if (order < 0)
  700. goto reset;
  701. /*
  702. * Allow others to run.
  703. */
  704. atomic_inc(&mce_executing);
  705. if (order == 1) {
  706. /* CHECKME: Can this race with a parallel hotplug? */
  707. int cpus = num_online_cpus();
  708. /*
  709. * Monarch: Wait for everyone to go through their scanning
  710. * loops.
  711. */
  712. while (atomic_read(&mce_executing) <= cpus) {
  713. if (mce_timed_out(&timeout))
  714. goto reset;
  715. ndelay(SPINUNIT);
  716. }
  717. mce_reign();
  718. barrier();
  719. ret = 0;
  720. } else {
  721. /*
  722. * Subject: Wait for Monarch to finish.
  723. */
  724. while (atomic_read(&mce_executing) != 0) {
  725. if (mce_timed_out(&timeout))
  726. goto reset;
  727. ndelay(SPINUNIT);
  728. }
  729. /*
  730. * Don't reset anything. That's done by the Monarch.
  731. */
  732. return 0;
  733. }
  734. /*
  735. * Reset all global state.
  736. */
  737. reset:
  738. atomic_set(&global_nwo, 0);
  739. atomic_set(&mce_callin, 0);
  740. barrier();
  741. /*
  742. * Let others run again.
  743. */
  744. atomic_set(&mce_executing, 0);
  745. return ret;
  746. }
  747. /*
  748. * Check if the address reported by the CPU is in a format we can parse.
  749. * It would be possible to add code for most other cases, but all would
  750. * be somewhat complicated (e.g. segment offset would require an instruction
  751. * parser). So only support physical addresses up to page granuality for now.
  752. */
  753. static int mce_usable_address(struct mce *m)
  754. {
  755. if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
  756. return 0;
  757. if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
  758. return 0;
  759. if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
  760. return 0;
  761. return 1;
  762. }
  763. static void mce_clear_state(unsigned long *toclear)
  764. {
  765. int i;
  766. for (i = 0; i < banks; i++) {
  767. if (test_bit(i, toclear))
  768. mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
  769. }
  770. }
  771. /*
  772. * Need to save faulting physical address associated with a process
  773. * in the machine check handler some place where we can grab it back
  774. * later in mce_notify_process()
  775. */
  776. #define MCE_INFO_MAX 16
  777. struct mce_info {
  778. atomic_t inuse;
  779. struct task_struct *t;
  780. __u64 paddr;
  781. } mce_info[MCE_INFO_MAX];
  782. static void mce_save_info(__u64 addr)
  783. {
  784. struct mce_info *mi;
  785. for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++) {
  786. if (atomic_cmpxchg(&mi->inuse, 0, 1) == 0) {
  787. mi->t = current;
  788. mi->paddr = addr;
  789. return;
  790. }
  791. }
  792. mce_panic("Too many concurrent recoverable errors", NULL, NULL);
  793. }
  794. static struct mce_info *mce_find_info(void)
  795. {
  796. struct mce_info *mi;
  797. for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++)
  798. if (atomic_read(&mi->inuse) && mi->t == current)
  799. return mi;
  800. return NULL;
  801. }
  802. static void mce_clear_info(struct mce_info *mi)
  803. {
  804. atomic_set(&mi->inuse, 0);
  805. }
  806. /*
  807. * The actual machine check handler. This only handles real
  808. * exceptions when something got corrupted coming in through int 18.
  809. *
  810. * This is executed in NMI context not subject to normal locking rules. This
  811. * implies that most kernel services cannot be safely used. Don't even
  812. * think about putting a printk in there!
  813. *
  814. * On Intel systems this is entered on all CPUs in parallel through
  815. * MCE broadcast. However some CPUs might be broken beyond repair,
  816. * so be always careful when synchronizing with others.
  817. */
  818. void do_machine_check(struct pt_regs *regs, long error_code)
  819. {
  820. struct mce m, *final;
  821. int i;
  822. int worst = 0;
  823. int severity;
  824. /*
  825. * Establish sequential order between the CPUs entering the machine
  826. * check handler.
  827. */
  828. int order;
  829. /*
  830. * If no_way_out gets set, there is no safe way to recover from this
  831. * MCE. If tolerant is cranked up, we'll try anyway.
  832. */
  833. int no_way_out = 0;
  834. /*
  835. * If kill_it gets set, there might be a way to recover from this
  836. * error.
  837. */
  838. int kill_it = 0;
  839. DECLARE_BITMAP(toclear, MAX_NR_BANKS);
  840. char *msg = "Unknown";
  841. atomic_inc(&mce_entry);
  842. percpu_inc(mce_exception_count);
  843. if (!banks)
  844. goto out;
  845. mce_gather_info(&m, regs);
  846. final = &__get_cpu_var(mces_seen);
  847. *final = m;
  848. no_way_out = mce_no_way_out(&m, &msg);
  849. barrier();
  850. /*
  851. * When no restart IP must always kill or panic.
  852. */
  853. if (!(m.mcgstatus & MCG_STATUS_RIPV))
  854. kill_it = 1;
  855. /*
  856. * Go through all the banks in exclusion of the other CPUs.
  857. * This way we don't report duplicated events on shared banks
  858. * because the first one to see it will clear it.
  859. */
  860. order = mce_start(&no_way_out);
  861. for (i = 0; i < banks; i++) {
  862. __clear_bit(i, toclear);
  863. if (!mce_banks[i].ctl)
  864. continue;
  865. m.misc = 0;
  866. m.addr = 0;
  867. m.bank = i;
  868. m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
  869. if ((m.status & MCI_STATUS_VAL) == 0)
  870. continue;
  871. /*
  872. * Non uncorrected or non signaled errors are handled by
  873. * machine_check_poll. Leave them alone, unless this panics.
  874. */
  875. if (!(m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
  876. !no_way_out)
  877. continue;
  878. /*
  879. * Set taint even when machine check was not enabled.
  880. */
  881. add_taint(TAINT_MACHINE_CHECK);
  882. severity = mce_severity(&m, tolerant, NULL);
  883. /*
  884. * When machine check was for corrected handler don't touch,
  885. * unless we're panicing.
  886. */
  887. if (severity == MCE_KEEP_SEVERITY && !no_way_out)
  888. continue;
  889. __set_bit(i, toclear);
  890. if (severity == MCE_NO_SEVERITY) {
  891. /*
  892. * Machine check event was not enabled. Clear, but
  893. * ignore.
  894. */
  895. continue;
  896. }
  897. /*
  898. * Kill on action required.
  899. */
  900. if (severity == MCE_AR_SEVERITY)
  901. kill_it = 1;
  902. mce_read_aux(&m, i);
  903. /*
  904. * Action optional error. Queue address for later processing.
  905. * When the ring overflows we just ignore the AO error.
  906. * RED-PEN add some logging mechanism when
  907. * usable_address or mce_add_ring fails.
  908. * RED-PEN don't ignore overflow for tolerant == 0
  909. */
  910. if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
  911. mce_ring_add(m.addr >> PAGE_SHIFT);
  912. mce_log(&m);
  913. if (severity > worst) {
  914. *final = m;
  915. worst = severity;
  916. }
  917. }
  918. if (!no_way_out)
  919. mce_clear_state(toclear);
  920. /*
  921. * Do most of the synchronization with other CPUs.
  922. * When there's any problem use only local no_way_out state.
  923. */
  924. if (mce_end(order) < 0)
  925. no_way_out = worst >= MCE_PANIC_SEVERITY;
  926. /*
  927. * If we have decided that we just CAN'T continue, and the user
  928. * has not set tolerant to an insane level, give up and die.
  929. *
  930. * This is mainly used in the case when the system doesn't
  931. * support MCE broadcasting or it has been disabled.
  932. */
  933. if (no_way_out && tolerant < 3)
  934. mce_panic("Fatal machine check on current CPU", final, msg);
  935. /*
  936. * If the error seems to be unrecoverable, something should be
  937. * done. Try to kill as little as possible. If we can kill just
  938. * one task, do that. If the user has set the tolerance very
  939. * high, don't try to do anything at all.
  940. */
  941. if (kill_it && tolerant < 3)
  942. force_sig(SIGBUS, current);
  943. /* notify userspace ASAP */
  944. set_thread_flag(TIF_MCE_NOTIFY);
  945. if (worst > 0)
  946. mce_report_event(regs);
  947. mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
  948. out:
  949. atomic_dec(&mce_entry);
  950. sync_core();
  951. }
  952. EXPORT_SYMBOL_GPL(do_machine_check);
  953. #ifndef CONFIG_MEMORY_FAILURE
  954. int memory_failure(unsigned long pfn, int vector, int flags)
  955. {
  956. printk(KERN_ERR "Uncorrected memory error in page 0x%lx ignored\n"
  957. "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n", pfn);
  958. return 0;
  959. }
  960. #endif
  961. /*
  962. * Called after mce notification in process context. This code
  963. * is allowed to sleep. Call the high level VM handler to process
  964. * any corrupted pages.
  965. * Assume that the work queue code only calls this one at a time
  966. * per CPU.
  967. * Note we don't disable preemption, so this code might run on the wrong
  968. * CPU. In this case the event is picked up by the scheduled work queue.
  969. * This is merely a fast path to expedite processing in some common
  970. * cases.
  971. */
  972. void mce_notify_process(void)
  973. {
  974. unsigned long pfn;
  975. mce_notify_irq();
  976. while (mce_ring_get(&pfn))
  977. memory_failure(pfn, MCE_VECTOR, 0);
  978. }
  979. static void mce_process_work(struct work_struct *dummy)
  980. {
  981. mce_notify_process();
  982. }
  983. #ifdef CONFIG_X86_MCE_INTEL
  984. /***
  985. * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
  986. * @cpu: The CPU on which the event occurred.
  987. * @status: Event status information
  988. *
  989. * This function should be called by the thermal interrupt after the
  990. * event has been processed and the decision was made to log the event
  991. * further.
  992. *
  993. * The status parameter will be saved to the 'status' field of 'struct mce'
  994. * and historically has been the register value of the
  995. * MSR_IA32_THERMAL_STATUS (Intel) msr.
  996. */
  997. void mce_log_therm_throt_event(__u64 status)
  998. {
  999. struct mce m;
  1000. mce_setup(&m);
  1001. m.bank = MCE_THERMAL_BANK;
  1002. m.status = status;
  1003. mce_log(&m);
  1004. }
  1005. #endif /* CONFIG_X86_MCE_INTEL */
  1006. /*
  1007. * Periodic polling timer for "silent" machine check errors. If the
  1008. * poller finds an MCE, poll 2x faster. When the poller finds no more
  1009. * errors, poll 2x slower (up to check_interval seconds).
  1010. */
  1011. static int check_interval = 5 * 60; /* 5 minutes */
  1012. static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
  1013. static DEFINE_PER_CPU(struct timer_list, mce_timer);
  1014. static void mce_start_timer(unsigned long data)
  1015. {
  1016. struct timer_list *t = &per_cpu(mce_timer, data);
  1017. int *n;
  1018. WARN_ON(smp_processor_id() != data);
  1019. if (mce_available(__this_cpu_ptr(&cpu_info))) {
  1020. machine_check_poll(MCP_TIMESTAMP,
  1021. &__get_cpu_var(mce_poll_banks));
  1022. }
  1023. /*
  1024. * Alert userspace if needed. If we logged an MCE, reduce the
  1025. * polling interval, otherwise increase the polling interval.
  1026. */
  1027. n = &__get_cpu_var(mce_next_interval);
  1028. if (mce_notify_irq())
  1029. *n = max(*n/2, HZ/100);
  1030. else
  1031. *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
  1032. t->expires = jiffies + *n;
  1033. add_timer_on(t, smp_processor_id());
  1034. }
  1035. /* Must not be called in IRQ context where del_timer_sync() can deadlock */
  1036. static void mce_timer_delete_all(void)
  1037. {
  1038. int cpu;
  1039. for_each_online_cpu(cpu)
  1040. del_timer_sync(&per_cpu(mce_timer, cpu));
  1041. }
  1042. static void mce_do_trigger(struct work_struct *work)
  1043. {
  1044. call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
  1045. }
  1046. static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
  1047. /*
  1048. * Notify the user(s) about new machine check events.
  1049. * Can be called from interrupt context, but not from machine check/NMI
  1050. * context.
  1051. */
  1052. int mce_notify_irq(void)
  1053. {
  1054. /* Not more than two messages every minute */
  1055. static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
  1056. clear_thread_flag(TIF_MCE_NOTIFY);
  1057. if (test_and_clear_bit(0, &mce_need_notify)) {
  1058. /* wake processes polling /dev/mcelog */
  1059. wake_up_interruptible(&mce_chrdev_wait);
  1060. /*
  1061. * There is no risk of missing notifications because
  1062. * work_pending is always cleared before the function is
  1063. * executed.
  1064. */
  1065. if (mce_helper[0] && !work_pending(&mce_trigger_work))
  1066. schedule_work(&mce_trigger_work);
  1067. if (__ratelimit(&ratelimit))
  1068. pr_info(HW_ERR "Machine check events logged\n");
  1069. return 1;
  1070. }
  1071. return 0;
  1072. }
  1073. EXPORT_SYMBOL_GPL(mce_notify_irq);
  1074. static int __cpuinit __mcheck_cpu_mce_banks_init(void)
  1075. {
  1076. int i;
  1077. mce_banks = kzalloc(banks * sizeof(struct mce_bank), GFP_KERNEL);
  1078. if (!mce_banks)
  1079. return -ENOMEM;
  1080. for (i = 0; i < banks; i++) {
  1081. struct mce_bank *b = &mce_banks[i];
  1082. b->ctl = -1ULL;
  1083. b->init = 1;
  1084. }
  1085. return 0;
  1086. }
  1087. /*
  1088. * Initialize Machine Checks for a CPU.
  1089. */
  1090. static int __cpuinit __mcheck_cpu_cap_init(void)
  1091. {
  1092. unsigned b;
  1093. u64 cap;
  1094. rdmsrl(MSR_IA32_MCG_CAP, cap);
  1095. b = cap & MCG_BANKCNT_MASK;
  1096. if (!banks)
  1097. printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b);
  1098. if (b > MAX_NR_BANKS) {
  1099. printk(KERN_WARNING
  1100. "MCE: Using only %u machine check banks out of %u\n",
  1101. MAX_NR_BANKS, b);
  1102. b = MAX_NR_BANKS;
  1103. }
  1104. /* Don't support asymmetric configurations today */
  1105. WARN_ON(banks != 0 && b != banks);
  1106. banks = b;
  1107. if (!mce_banks) {
  1108. int err = __mcheck_cpu_mce_banks_init();
  1109. if (err)
  1110. return err;
  1111. }
  1112. /* Use accurate RIP reporting if available. */
  1113. if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
  1114. rip_msr = MSR_IA32_MCG_EIP;
  1115. if (cap & MCG_SER_P)
  1116. mce_ser = 1;
  1117. return 0;
  1118. }
  1119. static void __mcheck_cpu_init_generic(void)
  1120. {
  1121. mce_banks_t all_banks;
  1122. u64 cap;
  1123. int i;
  1124. /*
  1125. * Log the machine checks left over from the previous reset.
  1126. */
  1127. bitmap_fill(all_banks, MAX_NR_BANKS);
  1128. machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks);
  1129. set_in_cr4(X86_CR4_MCE);
  1130. rdmsrl(MSR_IA32_MCG_CAP, cap);
  1131. if (cap & MCG_CTL_P)
  1132. wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
  1133. for (i = 0; i < banks; i++) {
  1134. struct mce_bank *b = &mce_banks[i];
  1135. if (!b->init)
  1136. continue;
  1137. wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
  1138. wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
  1139. }
  1140. }
  1141. /* Add per CPU specific workarounds here */
  1142. static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
  1143. {
  1144. if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
  1145. pr_info("MCE: unknown CPU type - not enabling MCE support.\n");
  1146. return -EOPNOTSUPP;
  1147. }
  1148. /* This should be disabled by the BIOS, but isn't always */
  1149. if (c->x86_vendor == X86_VENDOR_AMD) {
  1150. if (c->x86 == 15 && banks > 4) {
  1151. /*
  1152. * disable GART TBL walk error reporting, which
  1153. * trips off incorrectly with the IOMMU & 3ware
  1154. * & Cerberus:
  1155. */
  1156. clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
  1157. }
  1158. if (c->x86 <= 17 && mce_bootlog < 0) {
  1159. /*
  1160. * Lots of broken BIOS around that don't clear them
  1161. * by default and leave crap in there. Don't log:
  1162. */
  1163. mce_bootlog = 0;
  1164. }
  1165. /*
  1166. * Various K7s with broken bank 0 around. Always disable
  1167. * by default.
  1168. */
  1169. if (c->x86 == 6 && banks > 0)
  1170. mce_banks[0].ctl = 0;
  1171. }
  1172. if (c->x86_vendor == X86_VENDOR_INTEL) {
  1173. /*
  1174. * SDM documents that on family 6 bank 0 should not be written
  1175. * because it aliases to another special BIOS controlled
  1176. * register.
  1177. * But it's not aliased anymore on model 0x1a+
  1178. * Don't ignore bank 0 completely because there could be a
  1179. * valid event later, merely don't write CTL0.
  1180. */
  1181. if (c->x86 == 6 && c->x86_model < 0x1A && banks > 0)
  1182. mce_banks[0].init = 0;
  1183. /*
  1184. * All newer Intel systems support MCE broadcasting. Enable
  1185. * synchronization with a one second timeout.
  1186. */
  1187. if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
  1188. monarch_timeout < 0)
  1189. monarch_timeout = USEC_PER_SEC;
  1190. /*
  1191. * There are also broken BIOSes on some Pentium M and
  1192. * earlier systems:
  1193. */
  1194. if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0)
  1195. mce_bootlog = 0;
  1196. }
  1197. if (monarch_timeout < 0)
  1198. monarch_timeout = 0;
  1199. if (mce_bootlog != 0)
  1200. mce_panic_timeout = 30;
  1201. return 0;
  1202. }
  1203. static int __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
  1204. {
  1205. if (c->x86 != 5)
  1206. return 0;
  1207. switch (c->x86_vendor) {
  1208. case X86_VENDOR_INTEL:
  1209. intel_p5_mcheck_init(c);
  1210. return 1;
  1211. break;
  1212. case X86_VENDOR_CENTAUR:
  1213. winchip_mcheck_init(c);
  1214. return 1;
  1215. break;
  1216. }
  1217. return 0;
  1218. }
  1219. static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
  1220. {
  1221. switch (c->x86_vendor) {
  1222. case X86_VENDOR_INTEL:
  1223. mce_intel_feature_init(c);
  1224. break;
  1225. case X86_VENDOR_AMD:
  1226. mce_amd_feature_init(c);
  1227. break;
  1228. default:
  1229. break;
  1230. }
  1231. }
  1232. static void __mcheck_cpu_init_timer(void)
  1233. {
  1234. struct timer_list *t = &__get_cpu_var(mce_timer);
  1235. int *n = &__get_cpu_var(mce_next_interval);
  1236. setup_timer(t, mce_start_timer, smp_processor_id());
  1237. if (mce_ignore_ce)
  1238. return;
  1239. *n = check_interval * HZ;
  1240. if (!*n)
  1241. return;
  1242. t->expires = round_jiffies(jiffies + *n);
  1243. add_timer_on(t, smp_processor_id());
  1244. }
  1245. /* Handle unconfigured int18 (should never happen) */
  1246. static void unexpected_machine_check(struct pt_regs *regs, long error_code)
  1247. {
  1248. printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n",
  1249. smp_processor_id());
  1250. }
  1251. /* Call the installed machine check handler for this CPU setup. */
  1252. void (*machine_check_vector)(struct pt_regs *, long error_code) =
  1253. unexpected_machine_check;
  1254. /*
  1255. * Called for each booted CPU to set up machine checks.
  1256. * Must be called with preempt off:
  1257. */
  1258. void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
  1259. {
  1260. if (mce_disabled)
  1261. return;
  1262. if (__mcheck_cpu_ancient_init(c))
  1263. return;
  1264. if (!mce_available(c))
  1265. return;
  1266. if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
  1267. mce_disabled = 1;
  1268. return;
  1269. }
  1270. machine_check_vector = do_machine_check;
  1271. __mcheck_cpu_init_generic();
  1272. __mcheck_cpu_init_vendor(c);
  1273. __mcheck_cpu_init_timer();
  1274. INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
  1275. init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb);
  1276. }
  1277. /*
  1278. * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log.
  1279. */
  1280. static DEFINE_SPINLOCK(mce_chrdev_state_lock);
  1281. static int mce_chrdev_open_count; /* #times opened */
  1282. static int mce_chrdev_open_exclu; /* already open exclusive? */
  1283. static int mce_chrdev_open(struct inode *inode, struct file *file)
  1284. {
  1285. spin_lock(&mce_chrdev_state_lock);
  1286. if (mce_chrdev_open_exclu ||
  1287. (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
  1288. spin_unlock(&mce_chrdev_state_lock);
  1289. return -EBUSY;
  1290. }
  1291. if (file->f_flags & O_EXCL)
  1292. mce_chrdev_open_exclu = 1;
  1293. mce_chrdev_open_count++;
  1294. spin_unlock(&mce_chrdev_state_lock);
  1295. return nonseekable_open(inode, file);
  1296. }
  1297. static int mce_chrdev_release(struct inode *inode, struct file *file)
  1298. {
  1299. spin_lock(&mce_chrdev_state_lock);
  1300. mce_chrdev_open_count--;
  1301. mce_chrdev_open_exclu = 0;
  1302. spin_unlock(&mce_chrdev_state_lock);
  1303. return 0;
  1304. }
  1305. static void collect_tscs(void *data)
  1306. {
  1307. unsigned long *cpu_tsc = (unsigned long *)data;
  1308. rdtscll(cpu_tsc[smp_processor_id()]);
  1309. }
  1310. static int mce_apei_read_done;
  1311. /* Collect MCE record of previous boot in persistent storage via APEI ERST. */
  1312. static int __mce_read_apei(char __user **ubuf, size_t usize)
  1313. {
  1314. int rc;
  1315. u64 record_id;
  1316. struct mce m;
  1317. if (usize < sizeof(struct mce))
  1318. return -EINVAL;
  1319. rc = apei_read_mce(&m, &record_id);
  1320. /* Error or no more MCE record */
  1321. if (rc <= 0) {
  1322. mce_apei_read_done = 1;
  1323. return rc;
  1324. }
  1325. rc = -EFAULT;
  1326. if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
  1327. return rc;
  1328. /*
  1329. * In fact, we should have cleared the record after that has
  1330. * been flushed to the disk or sent to network in
  1331. * /sbin/mcelog, but we have no interface to support that now,
  1332. * so just clear it to avoid duplication.
  1333. */
  1334. rc = apei_clear_mce(record_id);
  1335. if (rc) {
  1336. mce_apei_read_done = 1;
  1337. return rc;
  1338. }
  1339. *ubuf += sizeof(struct mce);
  1340. return 0;
  1341. }
  1342. static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
  1343. size_t usize, loff_t *off)
  1344. {
  1345. char __user *buf = ubuf;
  1346. unsigned long *cpu_tsc;
  1347. unsigned prev, next;
  1348. int i, err;
  1349. cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
  1350. if (!cpu_tsc)
  1351. return -ENOMEM;
  1352. mutex_lock(&mce_chrdev_read_mutex);
  1353. if (!mce_apei_read_done) {
  1354. err = __mce_read_apei(&buf, usize);
  1355. if (err || buf != ubuf)
  1356. goto out;
  1357. }
  1358. next = rcu_dereference_check_mce(mcelog.next);
  1359. /* Only supports full reads right now */
  1360. err = -EINVAL;
  1361. if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
  1362. goto out;
  1363. err = 0;
  1364. prev = 0;
  1365. do {
  1366. for (i = prev; i < next; i++) {
  1367. unsigned long start = jiffies;
  1368. struct mce *m = &mcelog.entry[i];
  1369. while (!m->finished) {
  1370. if (time_after_eq(jiffies, start + 2)) {
  1371. memset(m, 0, sizeof(*m));
  1372. goto timeout;
  1373. }
  1374. cpu_relax();
  1375. }
  1376. smp_rmb();
  1377. err |= copy_to_user(buf, m, sizeof(*m));
  1378. buf += sizeof(*m);
  1379. timeout:
  1380. ;
  1381. }
  1382. memset(mcelog.entry + prev, 0,
  1383. (next - prev) * sizeof(struct mce));
  1384. prev = next;
  1385. next = cmpxchg(&mcelog.next, prev, 0);
  1386. } while (next != prev);
  1387. synchronize_sched();
  1388. /*
  1389. * Collect entries that were still getting written before the
  1390. * synchronize.
  1391. */
  1392. on_each_cpu(collect_tscs, cpu_tsc, 1);
  1393. for (i = next; i < MCE_LOG_LEN; i++) {
  1394. struct mce *m = &mcelog.entry[i];
  1395. if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
  1396. err |= copy_to_user(buf, m, sizeof(*m));
  1397. smp_rmb();
  1398. buf += sizeof(*m);
  1399. memset(m, 0, sizeof(*m));
  1400. }
  1401. }
  1402. if (err)
  1403. err = -EFAULT;
  1404. out:
  1405. mutex_unlock(&mce_chrdev_read_mutex);
  1406. kfree(cpu_tsc);
  1407. return err ? err : buf - ubuf;
  1408. }
  1409. static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
  1410. {
  1411. poll_wait(file, &mce_chrdev_wait, wait);
  1412. if (rcu_access_index(mcelog.next))
  1413. return POLLIN | POLLRDNORM;
  1414. if (!mce_apei_read_done && apei_check_mce())
  1415. return POLLIN | POLLRDNORM;
  1416. return 0;
  1417. }
  1418. static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
  1419. unsigned long arg)
  1420. {
  1421. int __user *p = (int __user *)arg;
  1422. if (!capable(CAP_SYS_ADMIN))
  1423. return -EPERM;
  1424. switch (cmd) {
  1425. case MCE_GET_RECORD_LEN:
  1426. return put_user(sizeof(struct mce), p);
  1427. case MCE_GET_LOG_LEN:
  1428. return put_user(MCE_LOG_LEN, p);
  1429. case MCE_GETCLEAR_FLAGS: {
  1430. unsigned flags;
  1431. do {
  1432. flags = mcelog.flags;
  1433. } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
  1434. return put_user(flags, p);
  1435. }
  1436. default:
  1437. return -ENOTTY;
  1438. }
  1439. }
  1440. static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf,
  1441. size_t usize, loff_t *off);
  1442. void register_mce_write_callback(ssize_t (*fn)(struct file *filp,
  1443. const char __user *ubuf,
  1444. size_t usize, loff_t *off))
  1445. {
  1446. mce_write = fn;
  1447. }
  1448. EXPORT_SYMBOL_GPL(register_mce_write_callback);
  1449. ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
  1450. size_t usize, loff_t *off)
  1451. {
  1452. if (mce_write)
  1453. return mce_write(filp, ubuf, usize, off);
  1454. else
  1455. return -EINVAL;
  1456. }
  1457. static const struct file_operations mce_chrdev_ops = {
  1458. .open = mce_chrdev_open,
  1459. .release = mce_chrdev_release,
  1460. .read = mce_chrdev_read,
  1461. .write = mce_chrdev_write,
  1462. .poll = mce_chrdev_poll,
  1463. .unlocked_ioctl = mce_chrdev_ioctl,
  1464. .llseek = no_llseek,
  1465. };
  1466. static struct miscdevice mce_chrdev_device = {
  1467. MISC_MCELOG_MINOR,
  1468. "mcelog",
  1469. &mce_chrdev_ops,
  1470. };
  1471. /*
  1472. * mce=off Disables machine check
  1473. * mce=no_cmci Disables CMCI
  1474. * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
  1475. * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
  1476. * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
  1477. * monarchtimeout is how long to wait for other CPUs on machine
  1478. * check, or 0 to not wait
  1479. * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
  1480. * mce=nobootlog Don't log MCEs from before booting.
  1481. */
  1482. static int __init mcheck_enable(char *str)
  1483. {
  1484. if (*str == 0) {
  1485. enable_p5_mce();
  1486. return 1;
  1487. }
  1488. if (*str == '=')
  1489. str++;
  1490. if (!strcmp(str, "off"))
  1491. mce_disabled = 1;
  1492. else if (!strcmp(str, "no_cmci"))
  1493. mce_cmci_disabled = 1;
  1494. else if (!strcmp(str, "dont_log_ce"))
  1495. mce_dont_log_ce = 1;
  1496. else if (!strcmp(str, "ignore_ce"))
  1497. mce_ignore_ce = 1;
  1498. else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
  1499. mce_bootlog = (str[0] == 'b');
  1500. else if (isdigit(str[0])) {
  1501. get_option(&str, &tolerant);
  1502. if (*str == ',') {
  1503. ++str;
  1504. get_option(&str, &monarch_timeout);
  1505. }
  1506. } else {
  1507. printk(KERN_INFO "mce argument %s ignored. Please use /sys\n",
  1508. str);
  1509. return 0;
  1510. }
  1511. return 1;
  1512. }
  1513. __setup("mce", mcheck_enable);
  1514. int __init mcheck_init(void)
  1515. {
  1516. mcheck_intel_therm_init();
  1517. return 0;
  1518. }
  1519. /*
  1520. * mce_syscore: PM support
  1521. */
  1522. /*
  1523. * Disable machine checks on suspend and shutdown. We can't really handle
  1524. * them later.
  1525. */
  1526. static int mce_disable_error_reporting(void)
  1527. {
  1528. int i;
  1529. for (i = 0; i < banks; i++) {
  1530. struct mce_bank *b = &mce_banks[i];
  1531. if (b->init)
  1532. wrmsrl(MSR_IA32_MCx_CTL(i), 0);
  1533. }
  1534. return 0;
  1535. }
  1536. static int mce_syscore_suspend(void)
  1537. {
  1538. return mce_disable_error_reporting();
  1539. }
  1540. static void mce_syscore_shutdown(void)
  1541. {
  1542. mce_disable_error_reporting();
  1543. }
  1544. /*
  1545. * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
  1546. * Only one CPU is active at this time, the others get re-added later using
  1547. * CPU hotplug:
  1548. */
  1549. static void mce_syscore_resume(void)
  1550. {
  1551. __mcheck_cpu_init_generic();
  1552. __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info));
  1553. }
  1554. static struct syscore_ops mce_syscore_ops = {
  1555. .suspend = mce_syscore_suspend,
  1556. .shutdown = mce_syscore_shutdown,
  1557. .resume = mce_syscore_resume,
  1558. };
  1559. /*
  1560. * mce_sysdev: Sysfs support
  1561. */
  1562. static void mce_cpu_restart(void *data)
  1563. {
  1564. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1565. return;
  1566. __mcheck_cpu_init_generic();
  1567. __mcheck_cpu_init_timer();
  1568. }
  1569. /* Reinit MCEs after user configuration changes */
  1570. static void mce_restart(void)
  1571. {
  1572. mce_timer_delete_all();
  1573. on_each_cpu(mce_cpu_restart, NULL, 1);
  1574. }
  1575. /* Toggle features for corrected errors */
  1576. static void mce_disable_cmci(void *data)
  1577. {
  1578. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1579. return;
  1580. cmci_clear();
  1581. }
  1582. static void mce_enable_ce(void *all)
  1583. {
  1584. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1585. return;
  1586. cmci_reenable();
  1587. cmci_recheck();
  1588. if (all)
  1589. __mcheck_cpu_init_timer();
  1590. }
  1591. static struct sysdev_class mce_sysdev_class = {
  1592. .name = "machinecheck",
  1593. };
  1594. DEFINE_PER_CPU(struct sys_device, mce_sysdev);
  1595. __cpuinitdata
  1596. void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
  1597. static inline struct mce_bank *attr_to_bank(struct sysdev_attribute *attr)
  1598. {
  1599. return container_of(attr, struct mce_bank, attr);
  1600. }
  1601. static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr,
  1602. char *buf)
  1603. {
  1604. return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
  1605. }
  1606. static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr,
  1607. const char *buf, size_t size)
  1608. {
  1609. u64 new;
  1610. if (strict_strtoull(buf, 0, &new) < 0)
  1611. return -EINVAL;
  1612. attr_to_bank(attr)->ctl = new;
  1613. mce_restart();
  1614. return size;
  1615. }
  1616. static ssize_t
  1617. show_trigger(struct sys_device *s, struct sysdev_attribute *attr, char *buf)
  1618. {
  1619. strcpy(buf, mce_helper);
  1620. strcat(buf, "\n");
  1621. return strlen(mce_helper) + 1;
  1622. }
  1623. static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
  1624. const char *buf, size_t siz)
  1625. {
  1626. char *p;
  1627. strncpy(mce_helper, buf, sizeof(mce_helper));
  1628. mce_helper[sizeof(mce_helper)-1] = 0;
  1629. p = strchr(mce_helper, '\n');
  1630. if (p)
  1631. *p = 0;
  1632. return strlen(mce_helper) + !!p;
  1633. }
  1634. static ssize_t set_ignore_ce(struct sys_device *s,
  1635. struct sysdev_attribute *attr,
  1636. const char *buf, size_t size)
  1637. {
  1638. u64 new;
  1639. if (strict_strtoull(buf, 0, &new) < 0)
  1640. return -EINVAL;
  1641. if (mce_ignore_ce ^ !!new) {
  1642. if (new) {
  1643. /* disable ce features */
  1644. mce_timer_delete_all();
  1645. on_each_cpu(mce_disable_cmci, NULL, 1);
  1646. mce_ignore_ce = 1;
  1647. } else {
  1648. /* enable ce features */
  1649. mce_ignore_ce = 0;
  1650. on_each_cpu(mce_enable_ce, (void *)1, 1);
  1651. }
  1652. }
  1653. return size;
  1654. }
  1655. static ssize_t set_cmci_disabled(struct sys_device *s,
  1656. struct sysdev_attribute *attr,
  1657. const char *buf, size_t size)
  1658. {
  1659. u64 new;
  1660. if (strict_strtoull(buf, 0, &new) < 0)
  1661. return -EINVAL;
  1662. if (mce_cmci_disabled ^ !!new) {
  1663. if (new) {
  1664. /* disable cmci */
  1665. on_each_cpu(mce_disable_cmci, NULL, 1);
  1666. mce_cmci_disabled = 1;
  1667. } else {
  1668. /* enable cmci */
  1669. mce_cmci_disabled = 0;
  1670. on_each_cpu(mce_enable_ce, NULL, 1);
  1671. }
  1672. }
  1673. return size;
  1674. }
  1675. static ssize_t store_int_with_restart(struct sys_device *s,
  1676. struct sysdev_attribute *attr,
  1677. const char *buf, size_t size)
  1678. {
  1679. ssize_t ret = sysdev_store_int(s, attr, buf, size);
  1680. mce_restart();
  1681. return ret;
  1682. }
  1683. static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
  1684. static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
  1685. static SYSDEV_INT_ATTR(monarch_timeout, 0644, monarch_timeout);
  1686. static SYSDEV_INT_ATTR(dont_log_ce, 0644, mce_dont_log_ce);
  1687. static struct sysdev_ext_attribute attr_check_interval = {
  1688. _SYSDEV_ATTR(check_interval, 0644, sysdev_show_int,
  1689. store_int_with_restart),
  1690. &check_interval
  1691. };
  1692. static struct sysdev_ext_attribute attr_ignore_ce = {
  1693. _SYSDEV_ATTR(ignore_ce, 0644, sysdev_show_int, set_ignore_ce),
  1694. &mce_ignore_ce
  1695. };
  1696. static struct sysdev_ext_attribute attr_cmci_disabled = {
  1697. _SYSDEV_ATTR(cmci_disabled, 0644, sysdev_show_int, set_cmci_disabled),
  1698. &mce_cmci_disabled
  1699. };
  1700. static struct sysdev_attribute *mce_sysdev_attrs[] = {
  1701. &attr_tolerant.attr,
  1702. &attr_check_interval.attr,
  1703. &attr_trigger,
  1704. &attr_monarch_timeout.attr,
  1705. &attr_dont_log_ce.attr,
  1706. &attr_ignore_ce.attr,
  1707. &attr_cmci_disabled.attr,
  1708. NULL
  1709. };
  1710. static cpumask_var_t mce_sysdev_initialized;
  1711. /* Per cpu sysdev init. All of the cpus still share the same ctrl bank: */
  1712. static __cpuinit int mce_sysdev_create(unsigned int cpu)
  1713. {
  1714. struct sys_device *sysdev = &per_cpu(mce_sysdev, cpu);
  1715. int err;
  1716. int i, j;
  1717. if (!mce_available(&boot_cpu_data))
  1718. return -EIO;
  1719. memset(&sysdev->kobj, 0, sizeof(struct kobject));
  1720. sysdev->id = cpu;
  1721. sysdev->cls = &mce_sysdev_class;
  1722. err = sysdev_register(sysdev);
  1723. if (err)
  1724. return err;
  1725. for (i = 0; mce_sysdev_attrs[i]; i++) {
  1726. err = sysdev_create_file(sysdev, mce_sysdev_attrs[i]);
  1727. if (err)
  1728. goto error;
  1729. }
  1730. for (j = 0; j < banks; j++) {
  1731. err = sysdev_create_file(sysdev, &mce_banks[j].attr);
  1732. if (err)
  1733. goto error2;
  1734. }
  1735. cpumask_set_cpu(cpu, mce_sysdev_initialized);
  1736. return 0;
  1737. error2:
  1738. while (--j >= 0)
  1739. sysdev_remove_file(sysdev, &mce_banks[j].attr);
  1740. error:
  1741. while (--i >= 0)
  1742. sysdev_remove_file(sysdev, mce_sysdev_attrs[i]);
  1743. sysdev_unregister(sysdev);
  1744. return err;
  1745. }
  1746. static __cpuinit void mce_sysdev_remove(unsigned int cpu)
  1747. {
  1748. struct sys_device *sysdev = &per_cpu(mce_sysdev, cpu);
  1749. int i;
  1750. if (!cpumask_test_cpu(cpu, mce_sysdev_initialized))
  1751. return;
  1752. for (i = 0; mce_sysdev_attrs[i]; i++)
  1753. sysdev_remove_file(sysdev, mce_sysdev_attrs[i]);
  1754. for (i = 0; i < banks; i++)
  1755. sysdev_remove_file(sysdev, &mce_banks[i].attr);
  1756. sysdev_unregister(sysdev);
  1757. cpumask_clear_cpu(cpu, mce_sysdev_initialized);
  1758. }
  1759. /* Make sure there are no machine checks on offlined CPUs. */
  1760. static void __cpuinit mce_disable_cpu(void *h)
  1761. {
  1762. unsigned long action = *(unsigned long *)h;
  1763. int i;
  1764. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1765. return;
  1766. if (!(action & CPU_TASKS_FROZEN))
  1767. cmci_clear();
  1768. for (i = 0; i < banks; i++) {
  1769. struct mce_bank *b = &mce_banks[i];
  1770. if (b->init)
  1771. wrmsrl(MSR_IA32_MCx_CTL(i), 0);
  1772. }
  1773. }
  1774. static void __cpuinit mce_reenable_cpu(void *h)
  1775. {
  1776. unsigned long action = *(unsigned long *)h;
  1777. int i;
  1778. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1779. return;
  1780. if (!(action & CPU_TASKS_FROZEN))
  1781. cmci_reenable();
  1782. for (i = 0; i < banks; i++) {
  1783. struct mce_bank *b = &mce_banks[i];
  1784. if (b->init)
  1785. wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
  1786. }
  1787. }
  1788. /* Get notified when a cpu comes on/off. Be hotplug friendly. */
  1789. static int __cpuinit
  1790. mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
  1791. {
  1792. unsigned int cpu = (unsigned long)hcpu;
  1793. struct timer_list *t = &per_cpu(mce_timer, cpu);
  1794. switch (action) {
  1795. case CPU_ONLINE:
  1796. case CPU_ONLINE_FROZEN:
  1797. mce_sysdev_create(cpu);
  1798. if (threshold_cpu_callback)
  1799. threshold_cpu_callback(action, cpu);
  1800. break;
  1801. case CPU_DEAD:
  1802. case CPU_DEAD_FROZEN:
  1803. if (threshold_cpu_callback)
  1804. threshold_cpu_callback(action, cpu);
  1805. mce_sysdev_remove(cpu);
  1806. break;
  1807. case CPU_DOWN_PREPARE:
  1808. case CPU_DOWN_PREPARE_FROZEN:
  1809. del_timer_sync(t);
  1810. smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
  1811. break;
  1812. case CPU_DOWN_FAILED:
  1813. case CPU_DOWN_FAILED_FROZEN:
  1814. if (!mce_ignore_ce && check_interval) {
  1815. t->expires = round_jiffies(jiffies +
  1816. __get_cpu_var(mce_next_interval));
  1817. add_timer_on(t, cpu);
  1818. }
  1819. smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
  1820. break;
  1821. case CPU_POST_DEAD:
  1822. /* intentionally ignoring frozen here */
  1823. cmci_rediscover(cpu);
  1824. break;
  1825. }
  1826. return NOTIFY_OK;
  1827. }
  1828. static struct notifier_block mce_cpu_notifier __cpuinitdata = {
  1829. .notifier_call = mce_cpu_callback,
  1830. };
  1831. static __init void mce_init_banks(void)
  1832. {
  1833. int i;
  1834. for (i = 0; i < banks; i++) {
  1835. struct mce_bank *b = &mce_banks[i];
  1836. struct sysdev_attribute *a = &b->attr;
  1837. sysfs_attr_init(&a->attr);
  1838. a->attr.name = b->attrname;
  1839. snprintf(b->attrname, ATTR_LEN, "bank%d", i);
  1840. a->attr.mode = 0644;
  1841. a->show = show_bank;
  1842. a->store = set_bank;
  1843. }
  1844. }
  1845. static __init int mcheck_init_device(void)
  1846. {
  1847. int err;
  1848. int i = 0;
  1849. if (!mce_available(&boot_cpu_data))
  1850. return -EIO;
  1851. zalloc_cpumask_var(&mce_sysdev_initialized, GFP_KERNEL);
  1852. mce_init_banks();
  1853. err = sysdev_class_register(&mce_sysdev_class);
  1854. if (err)
  1855. return err;
  1856. for_each_online_cpu(i) {
  1857. err = mce_sysdev_create(i);
  1858. if (err)
  1859. return err;
  1860. }
  1861. register_syscore_ops(&mce_syscore_ops);
  1862. register_hotcpu_notifier(&mce_cpu_notifier);
  1863. /* register character device /dev/mcelog */
  1864. misc_register(&mce_chrdev_device);
  1865. return err;
  1866. }
  1867. device_initcall(mcheck_init_device);
  1868. /*
  1869. * Old style boot options parsing. Only for compatibility.
  1870. */
  1871. static int __init mcheck_disable(char *str)
  1872. {
  1873. mce_disabled = 1;
  1874. return 1;
  1875. }
  1876. __setup("nomce", mcheck_disable);
  1877. #ifdef CONFIG_DEBUG_FS
  1878. struct dentry *mce_get_debugfs_dir(void)
  1879. {
  1880. static struct dentry *dmce;
  1881. if (!dmce)
  1882. dmce = debugfs_create_dir("mce", NULL);
  1883. return dmce;
  1884. }
  1885. static void mce_reset(void)
  1886. {
  1887. cpu_missing = 0;
  1888. atomic_set(&mce_fake_paniced, 0);
  1889. atomic_set(&mce_executing, 0);
  1890. atomic_set(&mce_callin, 0);
  1891. atomic_set(&global_nwo, 0);
  1892. }
  1893. static int fake_panic_get(void *data, u64 *val)
  1894. {
  1895. *val = fake_panic;
  1896. return 0;
  1897. }
  1898. static int fake_panic_set(void *data, u64 val)
  1899. {
  1900. mce_reset();
  1901. fake_panic = val;
  1902. return 0;
  1903. }
  1904. DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
  1905. fake_panic_set, "%llu\n");
  1906. static int __init mcheck_debugfs_init(void)
  1907. {
  1908. struct dentry *dmce, *ffake_panic;
  1909. dmce = mce_get_debugfs_dir();
  1910. if (!dmce)
  1911. return -ENOMEM;
  1912. ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
  1913. &fake_panic_fops);
  1914. if (!ffake_panic)
  1915. return -ENOMEM;
  1916. return 0;
  1917. }
  1918. late_initcall(mcheck_debugfs_init);
  1919. #endif