mce.c 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412
  1. /*
  2. * Machine check handler.
  3. *
  4. * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
  5. * Rest from unknown author(s).
  6. * 2004 Andi Kleen. Rewrote most of it.
  7. * Copyright 2008 Intel Corporation
  8. * Author: Andi Kleen
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <linux/thread_info.h>
  12. #include <linux/capability.h>
  13. #include <linux/miscdevice.h>
  14. #include <linux/ratelimit.h>
  15. #include <linux/kallsyms.h>
  16. #include <linux/rcupdate.h>
  17. #include <linux/kobject.h>
  18. #include <linux/uaccess.h>
  19. #include <linux/kdebug.h>
  20. #include <linux/kernel.h>
  21. #include <linux/percpu.h>
  22. #include <linux/string.h>
  23. #include <linux/device.h>
  24. #include <linux/syscore_ops.h>
  25. #include <linux/delay.h>
  26. #include <linux/ctype.h>
  27. #include <linux/sched.h>
  28. #include <linux/sysfs.h>
  29. #include <linux/types.h>
  30. #include <linux/slab.h>
  31. #include <linux/init.h>
  32. #include <linux/kmod.h>
  33. #include <linux/poll.h>
  34. #include <linux/nmi.h>
  35. #include <linux/cpu.h>
  36. #include <linux/smp.h>
  37. #include <linux/fs.h>
  38. #include <linux/mm.h>
  39. #include <linux/debugfs.h>
  40. #include <linux/irq_work.h>
  41. #include <linux/export.h>
  42. #include <asm/processor.h>
  43. #include <asm/mce.h>
  44. #include <asm/msr.h>
  45. #include "mce-internal.h"
  46. static DEFINE_MUTEX(mce_chrdev_read_mutex);
  47. #define rcu_dereference_check_mce(p) \
  48. rcu_dereference_index_check((p), \
  49. rcu_read_lock_sched_held() || \
  50. lockdep_is_held(&mce_chrdev_read_mutex))
  51. #define CREATE_TRACE_POINTS
  52. #include <trace/events/mce.h>
  53. int mce_disabled __read_mostly;
  54. #define MISC_MCELOG_MINOR 227
  55. #define SPINUNIT 100 /* 100ns */
  56. atomic_t mce_entry;
  57. DEFINE_PER_CPU(unsigned, mce_exception_count);
  58. /*
  59. * Tolerant levels:
  60. * 0: always panic on uncorrected errors, log corrected errors
  61. * 1: panic or SIGBUS on uncorrected errors, log corrected errors
  62. * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors
  63. * 3: never panic or SIGBUS, log all errors (for testing only)
  64. */
  65. static int tolerant __read_mostly = 1;
  66. static int banks __read_mostly;
  67. static int rip_msr __read_mostly;
  68. static int mce_bootlog __read_mostly = -1;
  69. static int monarch_timeout __read_mostly = -1;
  70. static int mce_panic_timeout __read_mostly;
  71. static int mce_dont_log_ce __read_mostly;
  72. int mce_cmci_disabled __read_mostly;
  73. int mce_ignore_ce __read_mostly;
  74. int mce_ser __read_mostly;
  75. struct mce_bank *mce_banks __read_mostly;
  76. /* User mode helper program triggered by machine check event */
  77. static unsigned long mce_need_notify;
  78. static char mce_helper[128];
  79. static char *mce_helper_argv[2] = { mce_helper, NULL };
  80. static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
  81. static DEFINE_PER_CPU(struct mce, mces_seen);
  82. static int cpu_missing;
  83. /* MCA banks polled by the period polling timer for corrected events */
  84. DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
  85. [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
  86. };
  87. static DEFINE_PER_CPU(struct work_struct, mce_work);
  88. /*
  89. * CPU/chipset specific EDAC code can register a notifier call here to print
  90. * MCE errors in a human-readable form.
  91. */
  92. ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
  93. /* Do initial initialization of a struct mce */
  94. void mce_setup(struct mce *m)
  95. {
  96. memset(m, 0, sizeof(struct mce));
  97. m->cpu = m->extcpu = smp_processor_id();
  98. rdtscll(m->tsc);
  99. /* We hope get_seconds stays lockless */
  100. m->time = get_seconds();
  101. m->cpuvendor = boot_cpu_data.x86_vendor;
  102. m->cpuid = cpuid_eax(1);
  103. m->socketid = cpu_data(m->extcpu).phys_proc_id;
  104. m->apicid = cpu_data(m->extcpu).initial_apicid;
  105. rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
  106. }
  107. DEFINE_PER_CPU(struct mce, injectm);
  108. EXPORT_PER_CPU_SYMBOL_GPL(injectm);
  109. /*
  110. * Lockless MCE logging infrastructure.
  111. * This avoids deadlocks on printk locks without having to break locks. Also
  112. * separate MCEs from kernel messages to avoid bogus bug reports.
  113. */
  114. static struct mce_log mcelog = {
  115. .signature = MCE_LOG_SIGNATURE,
  116. .len = MCE_LOG_LEN,
  117. .recordlen = sizeof(struct mce),
  118. };
  119. void mce_log(struct mce *mce)
  120. {
  121. unsigned next, entry;
  122. int ret = 0;
  123. /* Emit the trace record: */
  124. trace_mce_record(mce);
  125. ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
  126. if (ret == NOTIFY_STOP)
  127. return;
  128. mce->finished = 0;
  129. wmb();
  130. for (;;) {
  131. entry = rcu_dereference_check_mce(mcelog.next);
  132. for (;;) {
  133. /*
  134. * When the buffer fills up discard new entries.
  135. * Assume that the earlier errors are the more
  136. * interesting ones:
  137. */
  138. if (entry >= MCE_LOG_LEN) {
  139. set_bit(MCE_OVERFLOW,
  140. (unsigned long *)&mcelog.flags);
  141. return;
  142. }
  143. /* Old left over entry. Skip: */
  144. if (mcelog.entry[entry].finished) {
  145. entry++;
  146. continue;
  147. }
  148. break;
  149. }
  150. smp_rmb();
  151. next = entry + 1;
  152. if (cmpxchg(&mcelog.next, entry, next) == entry)
  153. break;
  154. }
  155. memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
  156. wmb();
  157. mcelog.entry[entry].finished = 1;
  158. wmb();
  159. mce->finished = 1;
  160. set_bit(0, &mce_need_notify);
  161. }
  162. static void drain_mcelog_buffer(void)
  163. {
  164. unsigned int next, i, prev = 0;
  165. next = ACCESS_ONCE(mcelog.next);
  166. do {
  167. struct mce *m;
  168. /* drain what was logged during boot */
  169. for (i = prev; i < next; i++) {
  170. unsigned long start = jiffies;
  171. unsigned retries = 1;
  172. m = &mcelog.entry[i];
  173. while (!m->finished) {
  174. if (time_after_eq(jiffies, start + 2*retries))
  175. retries++;
  176. cpu_relax();
  177. if (!m->finished && retries >= 4) {
  178. pr_err("skipping error being logged currently!\n");
  179. break;
  180. }
  181. }
  182. smp_rmb();
  183. atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
  184. }
  185. memset(mcelog.entry + prev, 0, (next - prev) * sizeof(*m));
  186. prev = next;
  187. next = cmpxchg(&mcelog.next, prev, 0);
  188. } while (next != prev);
  189. }
  190. void mce_register_decode_chain(struct notifier_block *nb)
  191. {
  192. atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
  193. drain_mcelog_buffer();
  194. }
  195. EXPORT_SYMBOL_GPL(mce_register_decode_chain);
  196. void mce_unregister_decode_chain(struct notifier_block *nb)
  197. {
  198. atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
  199. }
  200. EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
  201. static void print_mce(struct mce *m)
  202. {
  203. int ret = 0;
  204. pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
  205. m->extcpu, m->mcgstatus, m->bank, m->status);
  206. if (m->ip) {
  207. pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
  208. !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
  209. m->cs, m->ip);
  210. if (m->cs == __KERNEL_CS)
  211. print_symbol("{%s}", m->ip);
  212. pr_cont("\n");
  213. }
  214. pr_emerg(HW_ERR "TSC %llx ", m->tsc);
  215. if (m->addr)
  216. pr_cont("ADDR %llx ", m->addr);
  217. if (m->misc)
  218. pr_cont("MISC %llx ", m->misc);
  219. pr_cont("\n");
  220. /*
  221. * Note this output is parsed by external tools and old fields
  222. * should not be changed.
  223. */
  224. pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
  225. m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
  226. cpu_data(m->extcpu).microcode);
  227. /*
  228. * Print out human-readable details about the MCE error,
  229. * (if the CPU has an implementation for that)
  230. */
  231. ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
  232. if (ret == NOTIFY_STOP)
  233. return;
  234. pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
  235. }
  236. #define PANIC_TIMEOUT 5 /* 5 seconds */
  237. static atomic_t mce_paniced;
  238. static int fake_panic;
  239. static atomic_t mce_fake_paniced;
  240. /* Panic in progress. Enable interrupts and wait for final IPI */
  241. static void wait_for_panic(void)
  242. {
  243. long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
  244. preempt_disable();
  245. local_irq_enable();
  246. while (timeout-- > 0)
  247. udelay(1);
  248. if (panic_timeout == 0)
  249. panic_timeout = mce_panic_timeout;
  250. panic("Panicing machine check CPU died");
  251. }
  252. static void mce_panic(char *msg, struct mce *final, char *exp)
  253. {
  254. int i, apei_err = 0;
  255. if (!fake_panic) {
  256. /*
  257. * Make sure only one CPU runs in machine check panic
  258. */
  259. if (atomic_inc_return(&mce_paniced) > 1)
  260. wait_for_panic();
  261. barrier();
  262. bust_spinlocks(1);
  263. console_verbose();
  264. } else {
  265. /* Don't log too much for fake panic */
  266. if (atomic_inc_return(&mce_fake_paniced) > 1)
  267. return;
  268. }
  269. /* First print corrected ones that are still unlogged */
  270. for (i = 0; i < MCE_LOG_LEN; i++) {
  271. struct mce *m = &mcelog.entry[i];
  272. if (!(m->status & MCI_STATUS_VAL))
  273. continue;
  274. if (!(m->status & MCI_STATUS_UC)) {
  275. print_mce(m);
  276. if (!apei_err)
  277. apei_err = apei_write_mce(m);
  278. }
  279. }
  280. /* Now print uncorrected but with the final one last */
  281. for (i = 0; i < MCE_LOG_LEN; i++) {
  282. struct mce *m = &mcelog.entry[i];
  283. if (!(m->status & MCI_STATUS_VAL))
  284. continue;
  285. if (!(m->status & MCI_STATUS_UC))
  286. continue;
  287. if (!final || memcmp(m, final, sizeof(struct mce))) {
  288. print_mce(m);
  289. if (!apei_err)
  290. apei_err = apei_write_mce(m);
  291. }
  292. }
  293. if (final) {
  294. print_mce(final);
  295. if (!apei_err)
  296. apei_err = apei_write_mce(final);
  297. }
  298. if (cpu_missing)
  299. pr_emerg(HW_ERR "Some CPUs didn't answer in synchronization\n");
  300. if (exp)
  301. pr_emerg(HW_ERR "Machine check: %s\n", exp);
  302. if (!fake_panic) {
  303. if (panic_timeout == 0)
  304. panic_timeout = mce_panic_timeout;
  305. panic(msg);
  306. } else
  307. pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
  308. }
  309. /* Support code for software error injection */
  310. static int msr_to_offset(u32 msr)
  311. {
  312. unsigned bank = __this_cpu_read(injectm.bank);
  313. if (msr == rip_msr)
  314. return offsetof(struct mce, ip);
  315. if (msr == MSR_IA32_MCx_STATUS(bank))
  316. return offsetof(struct mce, status);
  317. if (msr == MSR_IA32_MCx_ADDR(bank))
  318. return offsetof(struct mce, addr);
  319. if (msr == MSR_IA32_MCx_MISC(bank))
  320. return offsetof(struct mce, misc);
  321. if (msr == MSR_IA32_MCG_STATUS)
  322. return offsetof(struct mce, mcgstatus);
  323. return -1;
  324. }
  325. /* MSR access wrappers used for error injection */
  326. static u64 mce_rdmsrl(u32 msr)
  327. {
  328. u64 v;
  329. if (__this_cpu_read(injectm.finished)) {
  330. int offset = msr_to_offset(msr);
  331. if (offset < 0)
  332. return 0;
  333. return *(u64 *)((char *)&__get_cpu_var(injectm) + offset);
  334. }
  335. if (rdmsrl_safe(msr, &v)) {
  336. WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
  337. /*
  338. * Return zero in case the access faulted. This should
  339. * not happen normally but can happen if the CPU does
  340. * something weird, or if the code is buggy.
  341. */
  342. v = 0;
  343. }
  344. return v;
  345. }
  346. static void mce_wrmsrl(u32 msr, u64 v)
  347. {
  348. if (__this_cpu_read(injectm.finished)) {
  349. int offset = msr_to_offset(msr);
  350. if (offset >= 0)
  351. *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v;
  352. return;
  353. }
  354. wrmsrl(msr, v);
  355. }
  356. /*
  357. * Collect all global (w.r.t. this processor) status about this machine
  358. * check into our "mce" struct so that we can use it later to assess
  359. * the severity of the problem as we read per-bank specific details.
  360. */
  361. static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
  362. {
  363. mce_setup(m);
  364. m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
  365. if (regs) {
  366. /*
  367. * Get the address of the instruction at the time of
  368. * the machine check error.
  369. */
  370. if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
  371. m->ip = regs->ip;
  372. m->cs = regs->cs;
  373. /*
  374. * When in VM86 mode make the cs look like ring 3
  375. * always. This is a lie, but it's better than passing
  376. * the additional vm86 bit around everywhere.
  377. */
  378. if (v8086_mode(regs))
  379. m->cs |= 3;
  380. }
  381. /* Use accurate RIP reporting if available. */
  382. if (rip_msr)
  383. m->ip = mce_rdmsrl(rip_msr);
  384. }
  385. }
  386. /*
  387. * Simple lockless ring to communicate PFNs from the exception handler with the
  388. * process context work function. This is vastly simplified because there's
  389. * only a single reader and a single writer.
  390. */
  391. #define MCE_RING_SIZE 16 /* we use one entry less */
  392. struct mce_ring {
  393. unsigned short start;
  394. unsigned short end;
  395. unsigned long ring[MCE_RING_SIZE];
  396. };
  397. static DEFINE_PER_CPU(struct mce_ring, mce_ring);
  398. /* Runs with CPU affinity in workqueue */
  399. static int mce_ring_empty(void)
  400. {
  401. struct mce_ring *r = &__get_cpu_var(mce_ring);
  402. return r->start == r->end;
  403. }
  404. static int mce_ring_get(unsigned long *pfn)
  405. {
  406. struct mce_ring *r;
  407. int ret = 0;
  408. *pfn = 0;
  409. get_cpu();
  410. r = &__get_cpu_var(mce_ring);
  411. if (r->start == r->end)
  412. goto out;
  413. *pfn = r->ring[r->start];
  414. r->start = (r->start + 1) % MCE_RING_SIZE;
  415. ret = 1;
  416. out:
  417. put_cpu();
  418. return ret;
  419. }
  420. /* Always runs in MCE context with preempt off */
  421. static int mce_ring_add(unsigned long pfn)
  422. {
  423. struct mce_ring *r = &__get_cpu_var(mce_ring);
  424. unsigned next;
  425. next = (r->end + 1) % MCE_RING_SIZE;
  426. if (next == r->start)
  427. return -1;
  428. r->ring[r->end] = pfn;
  429. wmb();
  430. r->end = next;
  431. return 0;
  432. }
  433. int mce_available(struct cpuinfo_x86 *c)
  434. {
  435. if (mce_disabled)
  436. return 0;
  437. return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
  438. }
  439. static void mce_schedule_work(void)
  440. {
  441. if (!mce_ring_empty()) {
  442. struct work_struct *work = &__get_cpu_var(mce_work);
  443. if (!work_pending(work))
  444. schedule_work(work);
  445. }
  446. }
  447. DEFINE_PER_CPU(struct irq_work, mce_irq_work);
  448. static void mce_irq_work_cb(struct irq_work *entry)
  449. {
  450. mce_notify_irq();
  451. mce_schedule_work();
  452. }
  453. static void mce_report_event(struct pt_regs *regs)
  454. {
  455. if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
  456. mce_notify_irq();
  457. /*
  458. * Triggering the work queue here is just an insurance
  459. * policy in case the syscall exit notify handler
  460. * doesn't run soon enough or ends up running on the
  461. * wrong CPU (can happen when audit sleeps)
  462. */
  463. mce_schedule_work();
  464. return;
  465. }
  466. irq_work_queue(&__get_cpu_var(mce_irq_work));
  467. }
  468. /*
  469. * Read ADDR and MISC registers.
  470. */
  471. static void mce_read_aux(struct mce *m, int i)
  472. {
  473. if (m->status & MCI_STATUS_MISCV)
  474. m->misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
  475. if (m->status & MCI_STATUS_ADDRV) {
  476. m->addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
  477. /*
  478. * Mask the reported address by the reported granularity.
  479. */
  480. if (mce_ser && (m->status & MCI_STATUS_MISCV)) {
  481. u8 shift = MCI_MISC_ADDR_LSB(m->misc);
  482. m->addr >>= shift;
  483. m->addr <<= shift;
  484. }
  485. }
  486. }
  487. DEFINE_PER_CPU(unsigned, mce_poll_count);
  488. /*
  489. * Poll for corrected events or events that happened before reset.
  490. * Those are just logged through /dev/mcelog.
  491. *
  492. * This is executed in standard interrupt context.
  493. *
  494. * Note: spec recommends to panic for fatal unsignalled
  495. * errors here. However this would be quite problematic --
  496. * we would need to reimplement the Monarch handling and
  497. * it would mess up the exclusion between exception handler
  498. * and poll hander -- * so we skip this for now.
  499. * These cases should not happen anyways, or only when the CPU
  500. * is already totally * confused. In this case it's likely it will
  501. * not fully execute the machine check handler either.
  502. */
  503. void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
  504. {
  505. struct mce m;
  506. int i;
  507. this_cpu_inc(mce_poll_count);
  508. mce_gather_info(&m, NULL);
  509. for (i = 0; i < banks; i++) {
  510. if (!mce_banks[i].ctl || !test_bit(i, *b))
  511. continue;
  512. m.misc = 0;
  513. m.addr = 0;
  514. m.bank = i;
  515. m.tsc = 0;
  516. barrier();
  517. m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
  518. if (!(m.status & MCI_STATUS_VAL))
  519. continue;
  520. /*
  521. * Uncorrected or signalled events are handled by the exception
  522. * handler when it is enabled, so don't process those here.
  523. *
  524. * TBD do the same check for MCI_STATUS_EN here?
  525. */
  526. if (!(flags & MCP_UC) &&
  527. (m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)))
  528. continue;
  529. mce_read_aux(&m, i);
  530. if (!(flags & MCP_TIMESTAMP))
  531. m.tsc = 0;
  532. /*
  533. * Don't get the IP here because it's unlikely to
  534. * have anything to do with the actual error location.
  535. */
  536. if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce)
  537. mce_log(&m);
  538. /*
  539. * Clear state for this bank.
  540. */
  541. mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
  542. }
  543. /*
  544. * Don't clear MCG_STATUS here because it's only defined for
  545. * exceptions.
  546. */
  547. sync_core();
  548. }
  549. EXPORT_SYMBOL_GPL(machine_check_poll);
  550. /*
  551. * Do a quick check if any of the events requires a panic.
  552. * This decides if we keep the events around or clear them.
  553. */
  554. static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp)
  555. {
  556. int i, ret = 0;
  557. for (i = 0; i < banks; i++) {
  558. m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
  559. if (m->status & MCI_STATUS_VAL)
  560. __set_bit(i, validp);
  561. if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
  562. ret = 1;
  563. }
  564. return ret;
  565. }
  566. /*
  567. * Variable to establish order between CPUs while scanning.
  568. * Each CPU spins initially until executing is equal its number.
  569. */
  570. static atomic_t mce_executing;
  571. /*
  572. * Defines order of CPUs on entry. First CPU becomes Monarch.
  573. */
  574. static atomic_t mce_callin;
  575. /*
  576. * Check if a timeout waiting for other CPUs happened.
  577. */
  578. static int mce_timed_out(u64 *t)
  579. {
  580. /*
  581. * The others already did panic for some reason.
  582. * Bail out like in a timeout.
  583. * rmb() to tell the compiler that system_state
  584. * might have been modified by someone else.
  585. */
  586. rmb();
  587. if (atomic_read(&mce_paniced))
  588. wait_for_panic();
  589. if (!monarch_timeout)
  590. goto out;
  591. if ((s64)*t < SPINUNIT) {
  592. /* CHECKME: Make panic default for 1 too? */
  593. if (tolerant < 1)
  594. mce_panic("Timeout synchronizing machine check over CPUs",
  595. NULL, NULL);
  596. cpu_missing = 1;
  597. return 1;
  598. }
  599. *t -= SPINUNIT;
  600. out:
  601. touch_nmi_watchdog();
  602. return 0;
  603. }
  604. /*
  605. * The Monarch's reign. The Monarch is the CPU who entered
  606. * the machine check handler first. It waits for the others to
  607. * raise the exception too and then grades them. When any
  608. * error is fatal panic. Only then let the others continue.
  609. *
  610. * The other CPUs entering the MCE handler will be controlled by the
  611. * Monarch. They are called Subjects.
  612. *
  613. * This way we prevent any potential data corruption in a unrecoverable case
  614. * and also makes sure always all CPU's errors are examined.
  615. *
  616. * Also this detects the case of a machine check event coming from outer
  617. * space (not detected by any CPUs) In this case some external agent wants
  618. * us to shut down, so panic too.
  619. *
  620. * The other CPUs might still decide to panic if the handler happens
  621. * in a unrecoverable place, but in this case the system is in a semi-stable
  622. * state and won't corrupt anything by itself. It's ok to let the others
  623. * continue for a bit first.
  624. *
  625. * All the spin loops have timeouts; when a timeout happens a CPU
  626. * typically elects itself to be Monarch.
  627. */
  628. static void mce_reign(void)
  629. {
  630. int cpu;
  631. struct mce *m = NULL;
  632. int global_worst = 0;
  633. char *msg = NULL;
  634. char *nmsg = NULL;
  635. /*
  636. * This CPU is the Monarch and the other CPUs have run
  637. * through their handlers.
  638. * Grade the severity of the errors of all the CPUs.
  639. */
  640. for_each_possible_cpu(cpu) {
  641. int severity = mce_severity(&per_cpu(mces_seen, cpu), tolerant,
  642. &nmsg);
  643. if (severity > global_worst) {
  644. msg = nmsg;
  645. global_worst = severity;
  646. m = &per_cpu(mces_seen, cpu);
  647. }
  648. }
  649. /*
  650. * Cannot recover? Panic here then.
  651. * This dumps all the mces in the log buffer and stops the
  652. * other CPUs.
  653. */
  654. if (m && global_worst >= MCE_PANIC_SEVERITY && tolerant < 3)
  655. mce_panic("Fatal Machine check", m, msg);
  656. /*
  657. * For UC somewhere we let the CPU who detects it handle it.
  658. * Also must let continue the others, otherwise the handling
  659. * CPU could deadlock on a lock.
  660. */
  661. /*
  662. * No machine check event found. Must be some external
  663. * source or one CPU is hung. Panic.
  664. */
  665. if (global_worst <= MCE_KEEP_SEVERITY && tolerant < 3)
  666. mce_panic("Machine check from unknown source", NULL, NULL);
  667. /*
  668. * Now clear all the mces_seen so that they don't reappear on
  669. * the next mce.
  670. */
  671. for_each_possible_cpu(cpu)
  672. memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
  673. }
  674. static atomic_t global_nwo;
  675. /*
  676. * Start of Monarch synchronization. This waits until all CPUs have
  677. * entered the exception handler and then determines if any of them
  678. * saw a fatal event that requires panic. Then it executes them
  679. * in the entry order.
  680. * TBD double check parallel CPU hotunplug
  681. */
  682. static int mce_start(int *no_way_out)
  683. {
  684. int order;
  685. int cpus = num_online_cpus();
  686. u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
  687. if (!timeout)
  688. return -1;
  689. atomic_add(*no_way_out, &global_nwo);
  690. /*
  691. * global_nwo should be updated before mce_callin
  692. */
  693. smp_wmb();
  694. order = atomic_inc_return(&mce_callin);
  695. /*
  696. * Wait for everyone.
  697. */
  698. while (atomic_read(&mce_callin) != cpus) {
  699. if (mce_timed_out(&timeout)) {
  700. atomic_set(&global_nwo, 0);
  701. return -1;
  702. }
  703. ndelay(SPINUNIT);
  704. }
  705. /*
  706. * mce_callin should be read before global_nwo
  707. */
  708. smp_rmb();
  709. if (order == 1) {
  710. /*
  711. * Monarch: Starts executing now, the others wait.
  712. */
  713. atomic_set(&mce_executing, 1);
  714. } else {
  715. /*
  716. * Subject: Now start the scanning loop one by one in
  717. * the original callin order.
  718. * This way when there are any shared banks it will be
  719. * only seen by one CPU before cleared, avoiding duplicates.
  720. */
  721. while (atomic_read(&mce_executing) < order) {
  722. if (mce_timed_out(&timeout)) {
  723. atomic_set(&global_nwo, 0);
  724. return -1;
  725. }
  726. ndelay(SPINUNIT);
  727. }
  728. }
  729. /*
  730. * Cache the global no_way_out state.
  731. */
  732. *no_way_out = atomic_read(&global_nwo);
  733. return order;
  734. }
  735. /*
  736. * Synchronize between CPUs after main scanning loop.
  737. * This invokes the bulk of the Monarch processing.
  738. */
  739. static int mce_end(int order)
  740. {
  741. int ret = -1;
  742. u64 timeout = (u64)monarch_timeout * NSEC_PER_USEC;
  743. if (!timeout)
  744. goto reset;
  745. if (order < 0)
  746. goto reset;
  747. /*
  748. * Allow others to run.
  749. */
  750. atomic_inc(&mce_executing);
  751. if (order == 1) {
  752. /* CHECKME: Can this race with a parallel hotplug? */
  753. int cpus = num_online_cpus();
  754. /*
  755. * Monarch: Wait for everyone to go through their scanning
  756. * loops.
  757. */
  758. while (atomic_read(&mce_executing) <= cpus) {
  759. if (mce_timed_out(&timeout))
  760. goto reset;
  761. ndelay(SPINUNIT);
  762. }
  763. mce_reign();
  764. barrier();
  765. ret = 0;
  766. } else {
  767. /*
  768. * Subject: Wait for Monarch to finish.
  769. */
  770. while (atomic_read(&mce_executing) != 0) {
  771. if (mce_timed_out(&timeout))
  772. goto reset;
  773. ndelay(SPINUNIT);
  774. }
  775. /*
  776. * Don't reset anything. That's done by the Monarch.
  777. */
  778. return 0;
  779. }
  780. /*
  781. * Reset all global state.
  782. */
  783. reset:
  784. atomic_set(&global_nwo, 0);
  785. atomic_set(&mce_callin, 0);
  786. barrier();
  787. /*
  788. * Let others run again.
  789. */
  790. atomic_set(&mce_executing, 0);
  791. return ret;
  792. }
  793. /*
  794. * Check if the address reported by the CPU is in a format we can parse.
  795. * It would be possible to add code for most other cases, but all would
  796. * be somewhat complicated (e.g. segment offset would require an instruction
  797. * parser). So only support physical addresses up to page granuality for now.
  798. */
  799. static int mce_usable_address(struct mce *m)
  800. {
  801. if (!(m->status & MCI_STATUS_MISCV) || !(m->status & MCI_STATUS_ADDRV))
  802. return 0;
  803. if (MCI_MISC_ADDR_LSB(m->misc) > PAGE_SHIFT)
  804. return 0;
  805. if (MCI_MISC_ADDR_MODE(m->misc) != MCI_MISC_ADDR_PHYS)
  806. return 0;
  807. return 1;
  808. }
  809. static void mce_clear_state(unsigned long *toclear)
  810. {
  811. int i;
  812. for (i = 0; i < banks; i++) {
  813. if (test_bit(i, toclear))
  814. mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
  815. }
  816. }
  817. /*
  818. * Need to save faulting physical address associated with a process
  819. * in the machine check handler some place where we can grab it back
  820. * later in mce_notify_process()
  821. */
  822. #define MCE_INFO_MAX 16
  823. struct mce_info {
  824. atomic_t inuse;
  825. struct task_struct *t;
  826. __u64 paddr;
  827. int restartable;
  828. } mce_info[MCE_INFO_MAX];
  829. static void mce_save_info(__u64 addr, int c)
  830. {
  831. struct mce_info *mi;
  832. for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++) {
  833. if (atomic_cmpxchg(&mi->inuse, 0, 1) == 0) {
  834. mi->t = current;
  835. mi->paddr = addr;
  836. mi->restartable = c;
  837. return;
  838. }
  839. }
  840. mce_panic("Too many concurrent recoverable errors", NULL, NULL);
  841. }
  842. static struct mce_info *mce_find_info(void)
  843. {
  844. struct mce_info *mi;
  845. for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++)
  846. if (atomic_read(&mi->inuse) && mi->t == current)
  847. return mi;
  848. return NULL;
  849. }
  850. static void mce_clear_info(struct mce_info *mi)
  851. {
  852. atomic_set(&mi->inuse, 0);
  853. }
  854. /*
  855. * The actual machine check handler. This only handles real
  856. * exceptions when something got corrupted coming in through int 18.
  857. *
  858. * This is executed in NMI context not subject to normal locking rules. This
  859. * implies that most kernel services cannot be safely used. Don't even
  860. * think about putting a printk in there!
  861. *
  862. * On Intel systems this is entered on all CPUs in parallel through
  863. * MCE broadcast. However some CPUs might be broken beyond repair,
  864. * so be always careful when synchronizing with others.
  865. */
  866. void do_machine_check(struct pt_regs *regs, long error_code)
  867. {
  868. struct mce m, *final;
  869. int i;
  870. int worst = 0;
  871. int severity;
  872. /*
  873. * Establish sequential order between the CPUs entering the machine
  874. * check handler.
  875. */
  876. int order;
  877. /*
  878. * If no_way_out gets set, there is no safe way to recover from this
  879. * MCE. If tolerant is cranked up, we'll try anyway.
  880. */
  881. int no_way_out = 0;
  882. /*
  883. * If kill_it gets set, there might be a way to recover from this
  884. * error.
  885. */
  886. int kill_it = 0;
  887. DECLARE_BITMAP(toclear, MAX_NR_BANKS);
  888. DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
  889. char *msg = "Unknown";
  890. atomic_inc(&mce_entry);
  891. this_cpu_inc(mce_exception_count);
  892. if (!banks)
  893. goto out;
  894. mce_gather_info(&m, regs);
  895. final = &__get_cpu_var(mces_seen);
  896. *final = m;
  897. memset(valid_banks, 0, sizeof(valid_banks));
  898. no_way_out = mce_no_way_out(&m, &msg, valid_banks);
  899. barrier();
  900. /*
  901. * When no restart IP might need to kill or panic.
  902. * Assume the worst for now, but if we find the
  903. * severity is MCE_AR_SEVERITY we have other options.
  904. */
  905. if (!(m.mcgstatus & MCG_STATUS_RIPV))
  906. kill_it = 1;
  907. /*
  908. * Go through all the banks in exclusion of the other CPUs.
  909. * This way we don't report duplicated events on shared banks
  910. * because the first one to see it will clear it.
  911. */
  912. order = mce_start(&no_way_out);
  913. for (i = 0; i < banks; i++) {
  914. __clear_bit(i, toclear);
  915. if (!test_bit(i, valid_banks))
  916. continue;
  917. if (!mce_banks[i].ctl)
  918. continue;
  919. m.misc = 0;
  920. m.addr = 0;
  921. m.bank = i;
  922. m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
  923. if ((m.status & MCI_STATUS_VAL) == 0)
  924. continue;
  925. /*
  926. * Non uncorrected or non signaled errors are handled by
  927. * machine_check_poll. Leave them alone, unless this panics.
  928. */
  929. if (!(m.status & (mce_ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
  930. !no_way_out)
  931. continue;
  932. /*
  933. * Set taint even when machine check was not enabled.
  934. */
  935. add_taint(TAINT_MACHINE_CHECK);
  936. severity = mce_severity(&m, tolerant, NULL);
  937. /*
  938. * When machine check was for corrected handler don't touch,
  939. * unless we're panicing.
  940. */
  941. if (severity == MCE_KEEP_SEVERITY && !no_way_out)
  942. continue;
  943. __set_bit(i, toclear);
  944. if (severity == MCE_NO_SEVERITY) {
  945. /*
  946. * Machine check event was not enabled. Clear, but
  947. * ignore.
  948. */
  949. continue;
  950. }
  951. mce_read_aux(&m, i);
  952. /*
  953. * Action optional error. Queue address for later processing.
  954. * When the ring overflows we just ignore the AO error.
  955. * RED-PEN add some logging mechanism when
  956. * usable_address or mce_add_ring fails.
  957. * RED-PEN don't ignore overflow for tolerant == 0
  958. */
  959. if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
  960. mce_ring_add(m.addr >> PAGE_SHIFT);
  961. mce_log(&m);
  962. if (severity > worst) {
  963. *final = m;
  964. worst = severity;
  965. }
  966. }
  967. /* mce_clear_state will clear *final, save locally for use later */
  968. m = *final;
  969. if (!no_way_out)
  970. mce_clear_state(toclear);
  971. /*
  972. * Do most of the synchronization with other CPUs.
  973. * When there's any problem use only local no_way_out state.
  974. */
  975. if (mce_end(order) < 0)
  976. no_way_out = worst >= MCE_PANIC_SEVERITY;
  977. /*
  978. * At insane "tolerant" levels we take no action. Otherwise
  979. * we only die if we have no other choice. For less serious
  980. * issues we try to recover, or limit damage to the current
  981. * process.
  982. */
  983. if (tolerant < 3) {
  984. if (no_way_out)
  985. mce_panic("Fatal machine check on current CPU", &m, msg);
  986. if (worst == MCE_AR_SEVERITY) {
  987. /* schedule action before return to userland */
  988. mce_save_info(m.addr, m.mcgstatus & MCG_STATUS_RIPV);
  989. set_thread_flag(TIF_MCE_NOTIFY);
  990. } else if (kill_it) {
  991. force_sig(SIGBUS, current);
  992. }
  993. }
  994. if (worst > 0)
  995. mce_report_event(regs);
  996. mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
  997. out:
  998. atomic_dec(&mce_entry);
  999. sync_core();
  1000. }
  1001. EXPORT_SYMBOL_GPL(do_machine_check);
  1002. #ifndef CONFIG_MEMORY_FAILURE
  1003. int memory_failure(unsigned long pfn, int vector, int flags)
  1004. {
  1005. /* mce_severity() should not hand us an ACTION_REQUIRED error */
  1006. BUG_ON(flags & MF_ACTION_REQUIRED);
  1007. pr_err("Uncorrected memory error in page 0x%lx ignored\n"
  1008. "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
  1009. pfn);
  1010. return 0;
  1011. }
  1012. #endif
  1013. /*
  1014. * Called in process context that interrupted by MCE and marked with
  1015. * TIF_MCE_NOTIFY, just before returning to erroneous userland.
  1016. * This code is allowed to sleep.
  1017. * Attempt possible recovery such as calling the high level VM handler to
  1018. * process any corrupted pages, and kill/signal current process if required.
  1019. * Action required errors are handled here.
  1020. */
  1021. void mce_notify_process(void)
  1022. {
  1023. unsigned long pfn;
  1024. struct mce_info *mi = mce_find_info();
  1025. int flags = MF_ACTION_REQUIRED;
  1026. if (!mi)
  1027. mce_panic("Lost physical address for unconsumed uncorrectable error", NULL, NULL);
  1028. pfn = mi->paddr >> PAGE_SHIFT;
  1029. clear_thread_flag(TIF_MCE_NOTIFY);
  1030. pr_err("Uncorrected hardware memory error in user-access at %llx",
  1031. mi->paddr);
  1032. /*
  1033. * We must call memory_failure() here even if the current process is
  1034. * doomed. We still need to mark the page as poisoned and alert any
  1035. * other users of the page.
  1036. */
  1037. if (!mi->restartable)
  1038. flags |= MF_MUST_KILL;
  1039. if (memory_failure(pfn, MCE_VECTOR, flags) < 0) {
  1040. pr_err("Memory error not recovered");
  1041. force_sig(SIGBUS, current);
  1042. }
  1043. mce_clear_info(mi);
  1044. }
  1045. /*
  1046. * Action optional processing happens here (picking up
  1047. * from the list of faulting pages that do_machine_check()
  1048. * placed into the "ring").
  1049. */
  1050. static void mce_process_work(struct work_struct *dummy)
  1051. {
  1052. unsigned long pfn;
  1053. while (mce_ring_get(&pfn))
  1054. memory_failure(pfn, MCE_VECTOR, 0);
  1055. }
  1056. #ifdef CONFIG_X86_MCE_INTEL
  1057. /***
  1058. * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
  1059. * @cpu: The CPU on which the event occurred.
  1060. * @status: Event status information
  1061. *
  1062. * This function should be called by the thermal interrupt after the
  1063. * event has been processed and the decision was made to log the event
  1064. * further.
  1065. *
  1066. * The status parameter will be saved to the 'status' field of 'struct mce'
  1067. * and historically has been the register value of the
  1068. * MSR_IA32_THERMAL_STATUS (Intel) msr.
  1069. */
  1070. void mce_log_therm_throt_event(__u64 status)
  1071. {
  1072. struct mce m;
  1073. mce_setup(&m);
  1074. m.bank = MCE_THERMAL_BANK;
  1075. m.status = status;
  1076. mce_log(&m);
  1077. }
  1078. #endif /* CONFIG_X86_MCE_INTEL */
  1079. /*
  1080. * Periodic polling timer for "silent" machine check errors. If the
  1081. * poller finds an MCE, poll 2x faster. When the poller finds no more
  1082. * errors, poll 2x slower (up to check_interval seconds).
  1083. */
  1084. static unsigned long check_interval = 5 * 60; /* 5 minutes */
  1085. static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
  1086. static DEFINE_PER_CPU(struct timer_list, mce_timer);
  1087. static void mce_timer_fn(unsigned long data)
  1088. {
  1089. struct timer_list *t = &__get_cpu_var(mce_timer);
  1090. unsigned long iv;
  1091. WARN_ON(smp_processor_id() != data);
  1092. if (mce_available(__this_cpu_ptr(&cpu_info))) {
  1093. machine_check_poll(MCP_TIMESTAMP,
  1094. &__get_cpu_var(mce_poll_banks));
  1095. }
  1096. /*
  1097. * Alert userspace if needed. If we logged an MCE, reduce the
  1098. * polling interval, otherwise increase the polling interval.
  1099. */
  1100. iv = __this_cpu_read(mce_next_interval);
  1101. if (mce_notify_irq())
  1102. iv = max(iv / 2, (unsigned long) HZ/100);
  1103. else
  1104. iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
  1105. __this_cpu_write(mce_next_interval, iv);
  1106. t->expires = jiffies + iv;
  1107. add_timer_on(t, smp_processor_id());
  1108. }
  1109. /* Must not be called in IRQ context where del_timer_sync() can deadlock */
  1110. static void mce_timer_delete_all(void)
  1111. {
  1112. int cpu;
  1113. for_each_online_cpu(cpu)
  1114. del_timer_sync(&per_cpu(mce_timer, cpu));
  1115. }
  1116. static void mce_do_trigger(struct work_struct *work)
  1117. {
  1118. call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT);
  1119. }
  1120. static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
  1121. /*
  1122. * Notify the user(s) about new machine check events.
  1123. * Can be called from interrupt context, but not from machine check/NMI
  1124. * context.
  1125. */
  1126. int mce_notify_irq(void)
  1127. {
  1128. /* Not more than two messages every minute */
  1129. static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
  1130. if (test_and_clear_bit(0, &mce_need_notify)) {
  1131. /* wake processes polling /dev/mcelog */
  1132. wake_up_interruptible(&mce_chrdev_wait);
  1133. /*
  1134. * There is no risk of missing notifications because
  1135. * work_pending is always cleared before the function is
  1136. * executed.
  1137. */
  1138. if (mce_helper[0] && !work_pending(&mce_trigger_work))
  1139. schedule_work(&mce_trigger_work);
  1140. if (__ratelimit(&ratelimit))
  1141. pr_info(HW_ERR "Machine check events logged\n");
  1142. return 1;
  1143. }
  1144. return 0;
  1145. }
  1146. EXPORT_SYMBOL_GPL(mce_notify_irq);
  1147. static int __cpuinit __mcheck_cpu_mce_banks_init(void)
  1148. {
  1149. int i;
  1150. mce_banks = kzalloc(banks * sizeof(struct mce_bank), GFP_KERNEL);
  1151. if (!mce_banks)
  1152. return -ENOMEM;
  1153. for (i = 0; i < banks; i++) {
  1154. struct mce_bank *b = &mce_banks[i];
  1155. b->ctl = -1ULL;
  1156. b->init = 1;
  1157. }
  1158. return 0;
  1159. }
  1160. /*
  1161. * Initialize Machine Checks for a CPU.
  1162. */
  1163. static int __cpuinit __mcheck_cpu_cap_init(void)
  1164. {
  1165. unsigned b;
  1166. u64 cap;
  1167. rdmsrl(MSR_IA32_MCG_CAP, cap);
  1168. b = cap & MCG_BANKCNT_MASK;
  1169. if (!banks)
  1170. pr_info("CPU supports %d MCE banks\n", b);
  1171. if (b > MAX_NR_BANKS) {
  1172. pr_warn("Using only %u machine check banks out of %u\n",
  1173. MAX_NR_BANKS, b);
  1174. b = MAX_NR_BANKS;
  1175. }
  1176. /* Don't support asymmetric configurations today */
  1177. WARN_ON(banks != 0 && b != banks);
  1178. banks = b;
  1179. if (!mce_banks) {
  1180. int err = __mcheck_cpu_mce_banks_init();
  1181. if (err)
  1182. return err;
  1183. }
  1184. /* Use accurate RIP reporting if available. */
  1185. if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
  1186. rip_msr = MSR_IA32_MCG_EIP;
  1187. if (cap & MCG_SER_P)
  1188. mce_ser = 1;
  1189. return 0;
  1190. }
  1191. static void __mcheck_cpu_init_generic(void)
  1192. {
  1193. mce_banks_t all_banks;
  1194. u64 cap;
  1195. int i;
  1196. /*
  1197. * Log the machine checks left over from the previous reset.
  1198. */
  1199. bitmap_fill(all_banks, MAX_NR_BANKS);
  1200. machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks);
  1201. set_in_cr4(X86_CR4_MCE);
  1202. rdmsrl(MSR_IA32_MCG_CAP, cap);
  1203. if (cap & MCG_CTL_P)
  1204. wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
  1205. for (i = 0; i < banks; i++) {
  1206. struct mce_bank *b = &mce_banks[i];
  1207. if (!b->init)
  1208. continue;
  1209. wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
  1210. wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
  1211. }
  1212. }
  1213. /* Add per CPU specific workarounds here */
  1214. static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
  1215. {
  1216. if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
  1217. pr_info("unknown CPU type - not enabling MCE support\n");
  1218. return -EOPNOTSUPP;
  1219. }
  1220. /* This should be disabled by the BIOS, but isn't always */
  1221. if (c->x86_vendor == X86_VENDOR_AMD) {
  1222. if (c->x86 == 15 && banks > 4) {
  1223. /*
  1224. * disable GART TBL walk error reporting, which
  1225. * trips off incorrectly with the IOMMU & 3ware
  1226. * & Cerberus:
  1227. */
  1228. clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
  1229. }
  1230. if (c->x86 <= 17 && mce_bootlog < 0) {
  1231. /*
  1232. * Lots of broken BIOS around that don't clear them
  1233. * by default and leave crap in there. Don't log:
  1234. */
  1235. mce_bootlog = 0;
  1236. }
  1237. /*
  1238. * Various K7s with broken bank 0 around. Always disable
  1239. * by default.
  1240. */
  1241. if (c->x86 == 6 && banks > 0)
  1242. mce_banks[0].ctl = 0;
  1243. /*
  1244. * Turn off MC4_MISC thresholding banks on those models since
  1245. * they're not supported there.
  1246. */
  1247. if (c->x86 == 0x15 &&
  1248. (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) {
  1249. int i;
  1250. u64 val, hwcr;
  1251. bool need_toggle;
  1252. u32 msrs[] = {
  1253. 0x00000413, /* MC4_MISC0 */
  1254. 0xc0000408, /* MC4_MISC1 */
  1255. };
  1256. rdmsrl(MSR_K7_HWCR, hwcr);
  1257. /* McStatusWrEn has to be set */
  1258. need_toggle = !(hwcr & BIT(18));
  1259. if (need_toggle)
  1260. wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
  1261. for (i = 0; i < ARRAY_SIZE(msrs); i++) {
  1262. rdmsrl(msrs[i], val);
  1263. /* CntP bit set? */
  1264. if (val & BIT_64(62)) {
  1265. val &= ~BIT_64(62);
  1266. wrmsrl(msrs[i], val);
  1267. }
  1268. }
  1269. /* restore old settings */
  1270. if (need_toggle)
  1271. wrmsrl(MSR_K7_HWCR, hwcr);
  1272. }
  1273. }
  1274. if (c->x86_vendor == X86_VENDOR_INTEL) {
  1275. /*
  1276. * SDM documents that on family 6 bank 0 should not be written
  1277. * because it aliases to another special BIOS controlled
  1278. * register.
  1279. * But it's not aliased anymore on model 0x1a+
  1280. * Don't ignore bank 0 completely because there could be a
  1281. * valid event later, merely don't write CTL0.
  1282. */
  1283. if (c->x86 == 6 && c->x86_model < 0x1A && banks > 0)
  1284. mce_banks[0].init = 0;
  1285. /*
  1286. * All newer Intel systems support MCE broadcasting. Enable
  1287. * synchronization with a one second timeout.
  1288. */
  1289. if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
  1290. monarch_timeout < 0)
  1291. monarch_timeout = USEC_PER_SEC;
  1292. /*
  1293. * There are also broken BIOSes on some Pentium M and
  1294. * earlier systems:
  1295. */
  1296. if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0)
  1297. mce_bootlog = 0;
  1298. }
  1299. if (monarch_timeout < 0)
  1300. monarch_timeout = 0;
  1301. if (mce_bootlog != 0)
  1302. mce_panic_timeout = 30;
  1303. return 0;
  1304. }
  1305. static int __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
  1306. {
  1307. if (c->x86 != 5)
  1308. return 0;
  1309. switch (c->x86_vendor) {
  1310. case X86_VENDOR_INTEL:
  1311. intel_p5_mcheck_init(c);
  1312. return 1;
  1313. break;
  1314. case X86_VENDOR_CENTAUR:
  1315. winchip_mcheck_init(c);
  1316. return 1;
  1317. break;
  1318. }
  1319. return 0;
  1320. }
  1321. static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
  1322. {
  1323. switch (c->x86_vendor) {
  1324. case X86_VENDOR_INTEL:
  1325. mce_intel_feature_init(c);
  1326. break;
  1327. case X86_VENDOR_AMD:
  1328. mce_amd_feature_init(c);
  1329. break;
  1330. default:
  1331. break;
  1332. }
  1333. }
  1334. static void __mcheck_cpu_init_timer(void)
  1335. {
  1336. struct timer_list *t = &__get_cpu_var(mce_timer);
  1337. unsigned long iv = check_interval * HZ;
  1338. setup_timer(t, mce_timer_fn, smp_processor_id());
  1339. if (mce_ignore_ce)
  1340. return;
  1341. __this_cpu_write(mce_next_interval, iv);
  1342. if (!iv)
  1343. return;
  1344. t->expires = round_jiffies(jiffies + iv);
  1345. add_timer_on(t, smp_processor_id());
  1346. }
  1347. /* Handle unconfigured int18 (should never happen) */
  1348. static void unexpected_machine_check(struct pt_regs *regs, long error_code)
  1349. {
  1350. pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
  1351. smp_processor_id());
  1352. }
  1353. /* Call the installed machine check handler for this CPU setup. */
  1354. void (*machine_check_vector)(struct pt_regs *, long error_code) =
  1355. unexpected_machine_check;
  1356. /*
  1357. * Called for each booted CPU to set up machine checks.
  1358. * Must be called with preempt off:
  1359. */
  1360. void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
  1361. {
  1362. if (mce_disabled)
  1363. return;
  1364. if (__mcheck_cpu_ancient_init(c))
  1365. return;
  1366. if (!mce_available(c))
  1367. return;
  1368. if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
  1369. mce_disabled = 1;
  1370. return;
  1371. }
  1372. machine_check_vector = do_machine_check;
  1373. __mcheck_cpu_init_generic();
  1374. __mcheck_cpu_init_vendor(c);
  1375. __mcheck_cpu_init_timer();
  1376. INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
  1377. init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb);
  1378. }
  1379. /*
  1380. * mce_chrdev: Character device /dev/mcelog to read and clear the MCE log.
  1381. */
  1382. static DEFINE_SPINLOCK(mce_chrdev_state_lock);
  1383. static int mce_chrdev_open_count; /* #times opened */
  1384. static int mce_chrdev_open_exclu; /* already open exclusive? */
  1385. static int mce_chrdev_open(struct inode *inode, struct file *file)
  1386. {
  1387. spin_lock(&mce_chrdev_state_lock);
  1388. if (mce_chrdev_open_exclu ||
  1389. (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
  1390. spin_unlock(&mce_chrdev_state_lock);
  1391. return -EBUSY;
  1392. }
  1393. if (file->f_flags & O_EXCL)
  1394. mce_chrdev_open_exclu = 1;
  1395. mce_chrdev_open_count++;
  1396. spin_unlock(&mce_chrdev_state_lock);
  1397. return nonseekable_open(inode, file);
  1398. }
  1399. static int mce_chrdev_release(struct inode *inode, struct file *file)
  1400. {
  1401. spin_lock(&mce_chrdev_state_lock);
  1402. mce_chrdev_open_count--;
  1403. mce_chrdev_open_exclu = 0;
  1404. spin_unlock(&mce_chrdev_state_lock);
  1405. return 0;
  1406. }
  1407. static void collect_tscs(void *data)
  1408. {
  1409. unsigned long *cpu_tsc = (unsigned long *)data;
  1410. rdtscll(cpu_tsc[smp_processor_id()]);
  1411. }
  1412. static int mce_apei_read_done;
  1413. /* Collect MCE record of previous boot in persistent storage via APEI ERST. */
  1414. static int __mce_read_apei(char __user **ubuf, size_t usize)
  1415. {
  1416. int rc;
  1417. u64 record_id;
  1418. struct mce m;
  1419. if (usize < sizeof(struct mce))
  1420. return -EINVAL;
  1421. rc = apei_read_mce(&m, &record_id);
  1422. /* Error or no more MCE record */
  1423. if (rc <= 0) {
  1424. mce_apei_read_done = 1;
  1425. /*
  1426. * When ERST is disabled, mce_chrdev_read() should return
  1427. * "no record" instead of "no device."
  1428. */
  1429. if (rc == -ENODEV)
  1430. return 0;
  1431. return rc;
  1432. }
  1433. rc = -EFAULT;
  1434. if (copy_to_user(*ubuf, &m, sizeof(struct mce)))
  1435. return rc;
  1436. /*
  1437. * In fact, we should have cleared the record after that has
  1438. * been flushed to the disk or sent to network in
  1439. * /sbin/mcelog, but we have no interface to support that now,
  1440. * so just clear it to avoid duplication.
  1441. */
  1442. rc = apei_clear_mce(record_id);
  1443. if (rc) {
  1444. mce_apei_read_done = 1;
  1445. return rc;
  1446. }
  1447. *ubuf += sizeof(struct mce);
  1448. return 0;
  1449. }
  1450. static ssize_t mce_chrdev_read(struct file *filp, char __user *ubuf,
  1451. size_t usize, loff_t *off)
  1452. {
  1453. char __user *buf = ubuf;
  1454. unsigned long *cpu_tsc;
  1455. unsigned prev, next;
  1456. int i, err;
  1457. cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
  1458. if (!cpu_tsc)
  1459. return -ENOMEM;
  1460. mutex_lock(&mce_chrdev_read_mutex);
  1461. if (!mce_apei_read_done) {
  1462. err = __mce_read_apei(&buf, usize);
  1463. if (err || buf != ubuf)
  1464. goto out;
  1465. }
  1466. next = rcu_dereference_check_mce(mcelog.next);
  1467. /* Only supports full reads right now */
  1468. err = -EINVAL;
  1469. if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
  1470. goto out;
  1471. err = 0;
  1472. prev = 0;
  1473. do {
  1474. for (i = prev; i < next; i++) {
  1475. unsigned long start = jiffies;
  1476. struct mce *m = &mcelog.entry[i];
  1477. while (!m->finished) {
  1478. if (time_after_eq(jiffies, start + 2)) {
  1479. memset(m, 0, sizeof(*m));
  1480. goto timeout;
  1481. }
  1482. cpu_relax();
  1483. }
  1484. smp_rmb();
  1485. err |= copy_to_user(buf, m, sizeof(*m));
  1486. buf += sizeof(*m);
  1487. timeout:
  1488. ;
  1489. }
  1490. memset(mcelog.entry + prev, 0,
  1491. (next - prev) * sizeof(struct mce));
  1492. prev = next;
  1493. next = cmpxchg(&mcelog.next, prev, 0);
  1494. } while (next != prev);
  1495. synchronize_sched();
  1496. /*
  1497. * Collect entries that were still getting written before the
  1498. * synchronize.
  1499. */
  1500. on_each_cpu(collect_tscs, cpu_tsc, 1);
  1501. for (i = next; i < MCE_LOG_LEN; i++) {
  1502. struct mce *m = &mcelog.entry[i];
  1503. if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
  1504. err |= copy_to_user(buf, m, sizeof(*m));
  1505. smp_rmb();
  1506. buf += sizeof(*m);
  1507. memset(m, 0, sizeof(*m));
  1508. }
  1509. }
  1510. if (err)
  1511. err = -EFAULT;
  1512. out:
  1513. mutex_unlock(&mce_chrdev_read_mutex);
  1514. kfree(cpu_tsc);
  1515. return err ? err : buf - ubuf;
  1516. }
  1517. static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
  1518. {
  1519. poll_wait(file, &mce_chrdev_wait, wait);
  1520. if (rcu_access_index(mcelog.next))
  1521. return POLLIN | POLLRDNORM;
  1522. if (!mce_apei_read_done && apei_check_mce())
  1523. return POLLIN | POLLRDNORM;
  1524. return 0;
  1525. }
  1526. static long mce_chrdev_ioctl(struct file *f, unsigned int cmd,
  1527. unsigned long arg)
  1528. {
  1529. int __user *p = (int __user *)arg;
  1530. if (!capable(CAP_SYS_ADMIN))
  1531. return -EPERM;
  1532. switch (cmd) {
  1533. case MCE_GET_RECORD_LEN:
  1534. return put_user(sizeof(struct mce), p);
  1535. case MCE_GET_LOG_LEN:
  1536. return put_user(MCE_LOG_LEN, p);
  1537. case MCE_GETCLEAR_FLAGS: {
  1538. unsigned flags;
  1539. do {
  1540. flags = mcelog.flags;
  1541. } while (cmpxchg(&mcelog.flags, flags, 0) != flags);
  1542. return put_user(flags, p);
  1543. }
  1544. default:
  1545. return -ENOTTY;
  1546. }
  1547. }
  1548. static ssize_t (*mce_write)(struct file *filp, const char __user *ubuf,
  1549. size_t usize, loff_t *off);
  1550. void register_mce_write_callback(ssize_t (*fn)(struct file *filp,
  1551. const char __user *ubuf,
  1552. size_t usize, loff_t *off))
  1553. {
  1554. mce_write = fn;
  1555. }
  1556. EXPORT_SYMBOL_GPL(register_mce_write_callback);
  1557. ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
  1558. size_t usize, loff_t *off)
  1559. {
  1560. if (mce_write)
  1561. return mce_write(filp, ubuf, usize, off);
  1562. else
  1563. return -EINVAL;
  1564. }
  1565. static const struct file_operations mce_chrdev_ops = {
  1566. .open = mce_chrdev_open,
  1567. .release = mce_chrdev_release,
  1568. .read = mce_chrdev_read,
  1569. .write = mce_chrdev_write,
  1570. .poll = mce_chrdev_poll,
  1571. .unlocked_ioctl = mce_chrdev_ioctl,
  1572. .llseek = no_llseek,
  1573. };
  1574. static struct miscdevice mce_chrdev_device = {
  1575. MISC_MCELOG_MINOR,
  1576. "mcelog",
  1577. &mce_chrdev_ops,
  1578. };
  1579. /*
  1580. * mce=off Disables machine check
  1581. * mce=no_cmci Disables CMCI
  1582. * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
  1583. * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
  1584. * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
  1585. * monarchtimeout is how long to wait for other CPUs on machine
  1586. * check, or 0 to not wait
  1587. * mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
  1588. * mce=nobootlog Don't log MCEs from before booting.
  1589. */
  1590. static int __init mcheck_enable(char *str)
  1591. {
  1592. if (*str == 0) {
  1593. enable_p5_mce();
  1594. return 1;
  1595. }
  1596. if (*str == '=')
  1597. str++;
  1598. if (!strcmp(str, "off"))
  1599. mce_disabled = 1;
  1600. else if (!strcmp(str, "no_cmci"))
  1601. mce_cmci_disabled = 1;
  1602. else if (!strcmp(str, "dont_log_ce"))
  1603. mce_dont_log_ce = 1;
  1604. else if (!strcmp(str, "ignore_ce"))
  1605. mce_ignore_ce = 1;
  1606. else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
  1607. mce_bootlog = (str[0] == 'b');
  1608. else if (isdigit(str[0])) {
  1609. get_option(&str, &tolerant);
  1610. if (*str == ',') {
  1611. ++str;
  1612. get_option(&str, &monarch_timeout);
  1613. }
  1614. } else {
  1615. pr_info("mce argument %s ignored. Please use /sys\n", str);
  1616. return 0;
  1617. }
  1618. return 1;
  1619. }
  1620. __setup("mce", mcheck_enable);
  1621. int __init mcheck_init(void)
  1622. {
  1623. mcheck_intel_therm_init();
  1624. return 0;
  1625. }
  1626. /*
  1627. * mce_syscore: PM support
  1628. */
  1629. /*
  1630. * Disable machine checks on suspend and shutdown. We can't really handle
  1631. * them later.
  1632. */
  1633. static int mce_disable_error_reporting(void)
  1634. {
  1635. int i;
  1636. for (i = 0; i < banks; i++) {
  1637. struct mce_bank *b = &mce_banks[i];
  1638. if (b->init)
  1639. wrmsrl(MSR_IA32_MCx_CTL(i), 0);
  1640. }
  1641. return 0;
  1642. }
  1643. static int mce_syscore_suspend(void)
  1644. {
  1645. return mce_disable_error_reporting();
  1646. }
  1647. static void mce_syscore_shutdown(void)
  1648. {
  1649. mce_disable_error_reporting();
  1650. }
  1651. /*
  1652. * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
  1653. * Only one CPU is active at this time, the others get re-added later using
  1654. * CPU hotplug:
  1655. */
  1656. static void mce_syscore_resume(void)
  1657. {
  1658. __mcheck_cpu_init_generic();
  1659. __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info));
  1660. }
  1661. static struct syscore_ops mce_syscore_ops = {
  1662. .suspend = mce_syscore_suspend,
  1663. .shutdown = mce_syscore_shutdown,
  1664. .resume = mce_syscore_resume,
  1665. };
  1666. /*
  1667. * mce_device: Sysfs support
  1668. */
  1669. static void mce_cpu_restart(void *data)
  1670. {
  1671. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1672. return;
  1673. __mcheck_cpu_init_generic();
  1674. __mcheck_cpu_init_timer();
  1675. }
  1676. /* Reinit MCEs after user configuration changes */
  1677. static void mce_restart(void)
  1678. {
  1679. mce_timer_delete_all();
  1680. on_each_cpu(mce_cpu_restart, NULL, 1);
  1681. }
  1682. /* Toggle features for corrected errors */
  1683. static void mce_disable_cmci(void *data)
  1684. {
  1685. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1686. return;
  1687. cmci_clear();
  1688. }
  1689. static void mce_enable_ce(void *all)
  1690. {
  1691. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1692. return;
  1693. cmci_reenable();
  1694. cmci_recheck();
  1695. if (all)
  1696. __mcheck_cpu_init_timer();
  1697. }
  1698. static struct bus_type mce_subsys = {
  1699. .name = "machinecheck",
  1700. .dev_name = "machinecheck",
  1701. };
  1702. DEFINE_PER_CPU(struct device *, mce_device);
  1703. __cpuinitdata
  1704. void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
  1705. static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
  1706. {
  1707. return container_of(attr, struct mce_bank, attr);
  1708. }
  1709. static ssize_t show_bank(struct device *s, struct device_attribute *attr,
  1710. char *buf)
  1711. {
  1712. return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
  1713. }
  1714. static ssize_t set_bank(struct device *s, struct device_attribute *attr,
  1715. const char *buf, size_t size)
  1716. {
  1717. u64 new;
  1718. if (strict_strtoull(buf, 0, &new) < 0)
  1719. return -EINVAL;
  1720. attr_to_bank(attr)->ctl = new;
  1721. mce_restart();
  1722. return size;
  1723. }
  1724. static ssize_t
  1725. show_trigger(struct device *s, struct device_attribute *attr, char *buf)
  1726. {
  1727. strcpy(buf, mce_helper);
  1728. strcat(buf, "\n");
  1729. return strlen(mce_helper) + 1;
  1730. }
  1731. static ssize_t set_trigger(struct device *s, struct device_attribute *attr,
  1732. const char *buf, size_t siz)
  1733. {
  1734. char *p;
  1735. strncpy(mce_helper, buf, sizeof(mce_helper));
  1736. mce_helper[sizeof(mce_helper)-1] = 0;
  1737. p = strchr(mce_helper, '\n');
  1738. if (p)
  1739. *p = 0;
  1740. return strlen(mce_helper) + !!p;
  1741. }
  1742. static ssize_t set_ignore_ce(struct device *s,
  1743. struct device_attribute *attr,
  1744. const char *buf, size_t size)
  1745. {
  1746. u64 new;
  1747. if (strict_strtoull(buf, 0, &new) < 0)
  1748. return -EINVAL;
  1749. if (mce_ignore_ce ^ !!new) {
  1750. if (new) {
  1751. /* disable ce features */
  1752. mce_timer_delete_all();
  1753. on_each_cpu(mce_disable_cmci, NULL, 1);
  1754. mce_ignore_ce = 1;
  1755. } else {
  1756. /* enable ce features */
  1757. mce_ignore_ce = 0;
  1758. on_each_cpu(mce_enable_ce, (void *)1, 1);
  1759. }
  1760. }
  1761. return size;
  1762. }
  1763. static ssize_t set_cmci_disabled(struct device *s,
  1764. struct device_attribute *attr,
  1765. const char *buf, size_t size)
  1766. {
  1767. u64 new;
  1768. if (strict_strtoull(buf, 0, &new) < 0)
  1769. return -EINVAL;
  1770. if (mce_cmci_disabled ^ !!new) {
  1771. if (new) {
  1772. /* disable cmci */
  1773. on_each_cpu(mce_disable_cmci, NULL, 1);
  1774. mce_cmci_disabled = 1;
  1775. } else {
  1776. /* enable cmci */
  1777. mce_cmci_disabled = 0;
  1778. on_each_cpu(mce_enable_ce, NULL, 1);
  1779. }
  1780. }
  1781. return size;
  1782. }
  1783. static ssize_t store_int_with_restart(struct device *s,
  1784. struct device_attribute *attr,
  1785. const char *buf, size_t size)
  1786. {
  1787. ssize_t ret = device_store_int(s, attr, buf, size);
  1788. mce_restart();
  1789. return ret;
  1790. }
  1791. static DEVICE_ATTR(trigger, 0644, show_trigger, set_trigger);
  1792. static DEVICE_INT_ATTR(tolerant, 0644, tolerant);
  1793. static DEVICE_INT_ATTR(monarch_timeout, 0644, monarch_timeout);
  1794. static DEVICE_INT_ATTR(dont_log_ce, 0644, mce_dont_log_ce);
  1795. static struct dev_ext_attribute dev_attr_check_interval = {
  1796. __ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
  1797. &check_interval
  1798. };
  1799. static struct dev_ext_attribute dev_attr_ignore_ce = {
  1800. __ATTR(ignore_ce, 0644, device_show_int, set_ignore_ce),
  1801. &mce_ignore_ce
  1802. };
  1803. static struct dev_ext_attribute dev_attr_cmci_disabled = {
  1804. __ATTR(cmci_disabled, 0644, device_show_int, set_cmci_disabled),
  1805. &mce_cmci_disabled
  1806. };
  1807. static struct device_attribute *mce_device_attrs[] = {
  1808. &dev_attr_tolerant.attr,
  1809. &dev_attr_check_interval.attr,
  1810. &dev_attr_trigger,
  1811. &dev_attr_monarch_timeout.attr,
  1812. &dev_attr_dont_log_ce.attr,
  1813. &dev_attr_ignore_ce.attr,
  1814. &dev_attr_cmci_disabled.attr,
  1815. NULL
  1816. };
  1817. static cpumask_var_t mce_device_initialized;
  1818. static void mce_device_release(struct device *dev)
  1819. {
  1820. kfree(dev);
  1821. }
  1822. /* Per cpu device init. All of the cpus still share the same ctrl bank: */
  1823. static __cpuinit int mce_device_create(unsigned int cpu)
  1824. {
  1825. struct device *dev;
  1826. int err;
  1827. int i, j;
  1828. if (!mce_available(&boot_cpu_data))
  1829. return -EIO;
  1830. dev = kzalloc(sizeof *dev, GFP_KERNEL);
  1831. if (!dev)
  1832. return -ENOMEM;
  1833. dev->id = cpu;
  1834. dev->bus = &mce_subsys;
  1835. dev->release = &mce_device_release;
  1836. err = device_register(dev);
  1837. if (err)
  1838. return err;
  1839. for (i = 0; mce_device_attrs[i]; i++) {
  1840. err = device_create_file(dev, mce_device_attrs[i]);
  1841. if (err)
  1842. goto error;
  1843. }
  1844. for (j = 0; j < banks; j++) {
  1845. err = device_create_file(dev, &mce_banks[j].attr);
  1846. if (err)
  1847. goto error2;
  1848. }
  1849. cpumask_set_cpu(cpu, mce_device_initialized);
  1850. per_cpu(mce_device, cpu) = dev;
  1851. return 0;
  1852. error2:
  1853. while (--j >= 0)
  1854. device_remove_file(dev, &mce_banks[j].attr);
  1855. error:
  1856. while (--i >= 0)
  1857. device_remove_file(dev, mce_device_attrs[i]);
  1858. device_unregister(dev);
  1859. return err;
  1860. }
  1861. static __cpuinit void mce_device_remove(unsigned int cpu)
  1862. {
  1863. struct device *dev = per_cpu(mce_device, cpu);
  1864. int i;
  1865. if (!cpumask_test_cpu(cpu, mce_device_initialized))
  1866. return;
  1867. for (i = 0; mce_device_attrs[i]; i++)
  1868. device_remove_file(dev, mce_device_attrs[i]);
  1869. for (i = 0; i < banks; i++)
  1870. device_remove_file(dev, &mce_banks[i].attr);
  1871. device_unregister(dev);
  1872. cpumask_clear_cpu(cpu, mce_device_initialized);
  1873. per_cpu(mce_device, cpu) = NULL;
  1874. }
  1875. /* Make sure there are no machine checks on offlined CPUs. */
  1876. static void __cpuinit mce_disable_cpu(void *h)
  1877. {
  1878. unsigned long action = *(unsigned long *)h;
  1879. int i;
  1880. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1881. return;
  1882. if (!(action & CPU_TASKS_FROZEN))
  1883. cmci_clear();
  1884. for (i = 0; i < banks; i++) {
  1885. struct mce_bank *b = &mce_banks[i];
  1886. if (b->init)
  1887. wrmsrl(MSR_IA32_MCx_CTL(i), 0);
  1888. }
  1889. }
  1890. static void __cpuinit mce_reenable_cpu(void *h)
  1891. {
  1892. unsigned long action = *(unsigned long *)h;
  1893. int i;
  1894. if (!mce_available(__this_cpu_ptr(&cpu_info)))
  1895. return;
  1896. if (!(action & CPU_TASKS_FROZEN))
  1897. cmci_reenable();
  1898. for (i = 0; i < banks; i++) {
  1899. struct mce_bank *b = &mce_banks[i];
  1900. if (b->init)
  1901. wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
  1902. }
  1903. }
  1904. /* Get notified when a cpu comes on/off. Be hotplug friendly. */
  1905. static int __cpuinit
  1906. mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
  1907. {
  1908. unsigned int cpu = (unsigned long)hcpu;
  1909. struct timer_list *t = &per_cpu(mce_timer, cpu);
  1910. switch (action) {
  1911. case CPU_ONLINE:
  1912. case CPU_ONLINE_FROZEN:
  1913. mce_device_create(cpu);
  1914. if (threshold_cpu_callback)
  1915. threshold_cpu_callback(action, cpu);
  1916. break;
  1917. case CPU_DEAD:
  1918. case CPU_DEAD_FROZEN:
  1919. if (threshold_cpu_callback)
  1920. threshold_cpu_callback(action, cpu);
  1921. mce_device_remove(cpu);
  1922. break;
  1923. case CPU_DOWN_PREPARE:
  1924. case CPU_DOWN_PREPARE_FROZEN:
  1925. del_timer_sync(t);
  1926. smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
  1927. break;
  1928. case CPU_DOWN_FAILED:
  1929. case CPU_DOWN_FAILED_FROZEN:
  1930. if (!mce_ignore_ce && check_interval) {
  1931. t->expires = round_jiffies(jiffies +
  1932. per_cpu(mce_next_interval, cpu));
  1933. add_timer_on(t, cpu);
  1934. }
  1935. smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
  1936. break;
  1937. case CPU_POST_DEAD:
  1938. /* intentionally ignoring frozen here */
  1939. cmci_rediscover(cpu);
  1940. break;
  1941. }
  1942. return NOTIFY_OK;
  1943. }
  1944. static struct notifier_block mce_cpu_notifier __cpuinitdata = {
  1945. .notifier_call = mce_cpu_callback,
  1946. };
  1947. static __init void mce_init_banks(void)
  1948. {
  1949. int i;
  1950. for (i = 0; i < banks; i++) {
  1951. struct mce_bank *b = &mce_banks[i];
  1952. struct device_attribute *a = &b->attr;
  1953. sysfs_attr_init(&a->attr);
  1954. a->attr.name = b->attrname;
  1955. snprintf(b->attrname, ATTR_LEN, "bank%d", i);
  1956. a->attr.mode = 0644;
  1957. a->show = show_bank;
  1958. a->store = set_bank;
  1959. }
  1960. }
  1961. static __init int mcheck_init_device(void)
  1962. {
  1963. int err;
  1964. int i = 0;
  1965. if (!mce_available(&boot_cpu_data))
  1966. return -EIO;
  1967. zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL);
  1968. mce_init_banks();
  1969. err = subsys_system_register(&mce_subsys, NULL);
  1970. if (err)
  1971. return err;
  1972. for_each_online_cpu(i) {
  1973. err = mce_device_create(i);
  1974. if (err)
  1975. return err;
  1976. }
  1977. register_syscore_ops(&mce_syscore_ops);
  1978. register_hotcpu_notifier(&mce_cpu_notifier);
  1979. /* register character device /dev/mcelog */
  1980. misc_register(&mce_chrdev_device);
  1981. return err;
  1982. }
  1983. device_initcall(mcheck_init_device);
  1984. /*
  1985. * Old style boot options parsing. Only for compatibility.
  1986. */
  1987. static int __init mcheck_disable(char *str)
  1988. {
  1989. mce_disabled = 1;
  1990. return 1;
  1991. }
  1992. __setup("nomce", mcheck_disable);
  1993. #ifdef CONFIG_DEBUG_FS
  1994. struct dentry *mce_get_debugfs_dir(void)
  1995. {
  1996. static struct dentry *dmce;
  1997. if (!dmce)
  1998. dmce = debugfs_create_dir("mce", NULL);
  1999. return dmce;
  2000. }
  2001. static void mce_reset(void)
  2002. {
  2003. cpu_missing = 0;
  2004. atomic_set(&mce_fake_paniced, 0);
  2005. atomic_set(&mce_executing, 0);
  2006. atomic_set(&mce_callin, 0);
  2007. atomic_set(&global_nwo, 0);
  2008. }
  2009. static int fake_panic_get(void *data, u64 *val)
  2010. {
  2011. *val = fake_panic;
  2012. return 0;
  2013. }
  2014. static int fake_panic_set(void *data, u64 val)
  2015. {
  2016. mce_reset();
  2017. fake_panic = val;
  2018. return 0;
  2019. }
  2020. DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
  2021. fake_panic_set, "%llu\n");
  2022. static int __init mcheck_debugfs_init(void)
  2023. {
  2024. struct dentry *dmce, *ffake_panic;
  2025. dmce = mce_get_debugfs_dir();
  2026. if (!dmce)
  2027. return -ENOMEM;
  2028. ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
  2029. &fake_panic_fops);
  2030. if (!ffake_panic)
  2031. return -ENOMEM;
  2032. return 0;
  2033. }
  2034. late_initcall(mcheck_debugfs_init);
  2035. #endif