mca.c 61 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157
  1. /*
  2. * File: mca.c
  3. * Purpose: Generic MCA handling layer
  4. *
  5. * Copyright (C) 2003 Hewlett-Packard Co
  6. * David Mosberger-Tang <davidm@hpl.hp.com>
  7. *
  8. * Copyright (C) 2002 Dell Inc.
  9. * Copyright (C) Matt Domsch <Matt_Domsch@dell.com>
  10. *
  11. * Copyright (C) 2002 Intel
  12. * Copyright (C) Jenna Hall <jenna.s.hall@intel.com>
  13. *
  14. * Copyright (C) 2001 Intel
  15. * Copyright (C) Fred Lewis <frederick.v.lewis@intel.com>
  16. *
  17. * Copyright (C) 2000 Intel
  18. * Copyright (C) Chuck Fleckenstein <cfleck@co.intel.com>
  19. *
  20. * Copyright (C) 1999, 2004-2008 Silicon Graphics, Inc.
  21. * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
  22. *
  23. * Copyright (C) 2006 FUJITSU LIMITED
  24. * Copyright (C) Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
  25. *
  26. * 2000-03-29 Chuck Fleckenstein <cfleck@co.intel.com>
  27. * Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
  28. * added min save state dump, added INIT handler.
  29. *
  30. * 2001-01-03 Fred Lewis <frederick.v.lewis@intel.com>
  31. * Added setup of CMCI and CPEI IRQs, logging of corrected platform
  32. * errors, completed code for logging of corrected & uncorrected
  33. * machine check errors, and updated for conformance with Nov. 2000
  34. * revision of the SAL 3.0 spec.
  35. *
  36. * 2002-01-04 Jenna Hall <jenna.s.hall@intel.com>
  37. * Aligned MCA stack to 16 bytes, added platform vs. CPU error flag,
  38. * set SAL default return values, changed error record structure to
  39. * linked list, added init call to sal_get_state_info_size().
  40. *
  41. * 2002-03-25 Matt Domsch <Matt_Domsch@dell.com>
  42. * GUID cleanups.
  43. *
  44. * 2003-04-15 David Mosberger-Tang <davidm@hpl.hp.com>
  45. * Added INIT backtrace support.
  46. *
  47. * 2003-12-08 Keith Owens <kaos@sgi.com>
  48. * smp_call_function() must not be called from interrupt context
  49. * (can deadlock on tasklist_lock).
  50. * Use keventd to call smp_call_function().
  51. *
  52. * 2004-02-01 Keith Owens <kaos@sgi.com>
  53. * Avoid deadlock when using printk() for MCA and INIT records.
  54. * Delete all record printing code, moved to salinfo_decode in user
  55. * space. Mark variables and functions static where possible.
  56. * Delete dead variables and functions. Reorder to remove the need
  57. * for forward declarations and to consolidate related code.
  58. *
  59. * 2005-08-12 Keith Owens <kaos@sgi.com>
  60. * Convert MCA/INIT handlers to use per event stacks and SAL/OS
  61. * state.
  62. *
  63. * 2005-10-07 Keith Owens <kaos@sgi.com>
  64. * Add notify_die() hooks.
  65. *
  66. * 2006-09-15 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
  67. * Add printing support for MCA/INIT.
  68. *
  69. * 2007-04-27 Russ Anderson <rja@sgi.com>
  70. * Support multiple cpus going through OS_MCA in the same event.
  71. */
  72. #include <linux/jiffies.h>
  73. #include <linux/types.h>
  74. #include <linux/init.h>
  75. #include <linux/sched.h>
  76. #include <linux/interrupt.h>
  77. #include <linux/irq.h>
  78. #include <linux/bootmem.h>
  79. #include <linux/acpi.h>
  80. #include <linux/timer.h>
  81. #include <linux/module.h>
  82. #include <linux/kernel.h>
  83. #include <linux/smp.h>
  84. #include <linux/workqueue.h>
  85. #include <linux/cpumask.h>
  86. #include <linux/kdebug.h>
  87. #include <linux/cpu.h>
  88. #include <linux/gfp.h>
  89. #include <asm/delay.h>
  90. #include <asm/machvec.h>
  91. #include <asm/meminit.h>
  92. #include <asm/page.h>
  93. #include <asm/ptrace.h>
  94. #include <asm/system.h>
  95. #include <asm/sal.h>
  96. #include <asm/mca.h>
  97. #include <asm/kexec.h>
  98. #include <asm/irq.h>
  99. #include <asm/hw_irq.h>
  100. #include <asm/tlb.h>
  101. #include "mca_drv.h"
  102. #include "entry.h"
  103. #if defined(IA64_MCA_DEBUG_INFO)
  104. # define IA64_MCA_DEBUG(fmt...) printk(fmt)
  105. #else
  106. # define IA64_MCA_DEBUG(fmt...)
  107. #endif
  108. #define NOTIFY_INIT(event, regs, arg, spin) \
  109. do { \
  110. if ((notify_die((event), "INIT", (regs), (arg), 0, 0) \
  111. == NOTIFY_STOP) && ((spin) == 1)) \
  112. ia64_mca_spin(__func__); \
  113. } while (0)
  114. #define NOTIFY_MCA(event, regs, arg, spin) \
  115. do { \
  116. if ((notify_die((event), "MCA", (regs), (arg), 0, 0) \
  117. == NOTIFY_STOP) && ((spin) == 1)) \
  118. ia64_mca_spin(__func__); \
  119. } while (0)
  120. /* Used by mca_asm.S */
  121. DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
  122. DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
  123. DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */
  124. DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */
  125. DEFINE_PER_CPU(u64, ia64_mca_tr_reload); /* Flag for TR reload */
  126. unsigned long __per_cpu_mca[NR_CPUS];
  127. /* In mca_asm.S */
  128. extern void ia64_os_init_dispatch_monarch (void);
  129. extern void ia64_os_init_dispatch_slave (void);
  130. static int monarch_cpu = -1;
  131. static ia64_mc_info_t ia64_mc_info;
  132. #define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
  133. #define MIN_CPE_POLL_INTERVAL (2*60*HZ) /* 2 minutes */
  134. #define CMC_POLL_INTERVAL (1*60*HZ) /* 1 minute */
  135. #define CPE_HISTORY_LENGTH 5
  136. #define CMC_HISTORY_LENGTH 5
  137. #ifdef CONFIG_ACPI
  138. static struct timer_list cpe_poll_timer;
  139. #endif
  140. static struct timer_list cmc_poll_timer;
  141. /*
  142. * This variable tells whether we are currently in polling mode.
  143. * Start with this in the wrong state so we won't play w/ timers
  144. * before the system is ready.
  145. */
  146. static int cmc_polling_enabled = 1;
  147. /*
  148. * Clearing this variable prevents CPE polling from getting activated
  149. * in mca_late_init. Use it if your system doesn't provide a CPEI,
  150. * but encounters problems retrieving CPE logs. This should only be
  151. * necessary for debugging.
  152. */
  153. static int cpe_poll_enabled = 1;
  154. extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
  155. static int mca_init __initdata;
  156. /*
  157. * limited & delayed printing support for MCA/INIT handler
  158. */
  159. #define mprintk(fmt...) ia64_mca_printk(fmt)
  160. #define MLOGBUF_SIZE (512+256*NR_CPUS)
  161. #define MLOGBUF_MSGMAX 256
  162. static char mlogbuf[MLOGBUF_SIZE];
  163. static DEFINE_SPINLOCK(mlogbuf_wlock); /* mca context only */
  164. static DEFINE_SPINLOCK(mlogbuf_rlock); /* normal context only */
  165. static unsigned long mlogbuf_start;
  166. static unsigned long mlogbuf_end;
  167. static unsigned int mlogbuf_finished = 0;
  168. static unsigned long mlogbuf_timestamp = 0;
  169. static int loglevel_save = -1;
  170. #define BREAK_LOGLEVEL(__console_loglevel) \
  171. oops_in_progress = 1; \
  172. if (loglevel_save < 0) \
  173. loglevel_save = __console_loglevel; \
  174. __console_loglevel = 15;
  175. #define RESTORE_LOGLEVEL(__console_loglevel) \
  176. if (loglevel_save >= 0) { \
  177. __console_loglevel = loglevel_save; \
  178. loglevel_save = -1; \
  179. } \
  180. mlogbuf_finished = 0; \
  181. oops_in_progress = 0;
  182. /*
  183. * Push messages into buffer, print them later if not urgent.
  184. */
  185. void ia64_mca_printk(const char *fmt, ...)
  186. {
  187. va_list args;
  188. int printed_len;
  189. char temp_buf[MLOGBUF_MSGMAX];
  190. char *p;
  191. va_start(args, fmt);
  192. printed_len = vscnprintf(temp_buf, sizeof(temp_buf), fmt, args);
  193. va_end(args);
  194. /* Copy the output into mlogbuf */
  195. if (oops_in_progress) {
  196. /* mlogbuf was abandoned, use printk directly instead. */
  197. printk(temp_buf);
  198. } else {
  199. spin_lock(&mlogbuf_wlock);
  200. for (p = temp_buf; *p; p++) {
  201. unsigned long next = (mlogbuf_end + 1) % MLOGBUF_SIZE;
  202. if (next != mlogbuf_start) {
  203. mlogbuf[mlogbuf_end] = *p;
  204. mlogbuf_end = next;
  205. } else {
  206. /* buffer full */
  207. break;
  208. }
  209. }
  210. mlogbuf[mlogbuf_end] = '\0';
  211. spin_unlock(&mlogbuf_wlock);
  212. }
  213. }
  214. EXPORT_SYMBOL(ia64_mca_printk);
  215. /*
  216. * Print buffered messages.
  217. * NOTE: call this after returning normal context. (ex. from salinfod)
  218. */
  219. void ia64_mlogbuf_dump(void)
  220. {
  221. char temp_buf[MLOGBUF_MSGMAX];
  222. char *p;
  223. unsigned long index;
  224. unsigned long flags;
  225. unsigned int printed_len;
  226. /* Get output from mlogbuf */
  227. while (mlogbuf_start != mlogbuf_end) {
  228. temp_buf[0] = '\0';
  229. p = temp_buf;
  230. printed_len = 0;
  231. spin_lock_irqsave(&mlogbuf_rlock, flags);
  232. index = mlogbuf_start;
  233. while (index != mlogbuf_end) {
  234. *p = mlogbuf[index];
  235. index = (index + 1) % MLOGBUF_SIZE;
  236. if (!*p)
  237. break;
  238. p++;
  239. if (++printed_len >= MLOGBUF_MSGMAX - 1)
  240. break;
  241. }
  242. *p = '\0';
  243. if (temp_buf[0])
  244. printk(temp_buf);
  245. mlogbuf_start = index;
  246. mlogbuf_timestamp = 0;
  247. spin_unlock_irqrestore(&mlogbuf_rlock, flags);
  248. }
  249. }
  250. EXPORT_SYMBOL(ia64_mlogbuf_dump);
  251. /*
  252. * Call this if system is going to down or if immediate flushing messages to
  253. * console is required. (ex. recovery was failed, crash dump is going to be
  254. * invoked, long-wait rendezvous etc.)
  255. * NOTE: this should be called from monarch.
  256. */
  257. static void ia64_mlogbuf_finish(int wait)
  258. {
  259. BREAK_LOGLEVEL(console_loglevel);
  260. spin_lock_init(&mlogbuf_rlock);
  261. ia64_mlogbuf_dump();
  262. printk(KERN_EMERG "mlogbuf_finish: printing switched to urgent mode, "
  263. "MCA/INIT might be dodgy or fail.\n");
  264. if (!wait)
  265. return;
  266. /* wait for console */
  267. printk("Delaying for 5 seconds...\n");
  268. udelay(5*1000000);
  269. mlogbuf_finished = 1;
  270. }
  271. /*
  272. * Print buffered messages from INIT context.
  273. */
  274. static void ia64_mlogbuf_dump_from_init(void)
  275. {
  276. if (mlogbuf_finished)
  277. return;
  278. if (mlogbuf_timestamp &&
  279. time_before(jiffies, mlogbuf_timestamp + 30 * HZ)) {
  280. printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT "
  281. " and the system seems to be messed up.\n");
  282. ia64_mlogbuf_finish(0);
  283. return;
  284. }
  285. if (!spin_trylock(&mlogbuf_rlock)) {
  286. printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT. "
  287. "Generated messages other than stack dump will be "
  288. "buffered to mlogbuf and will be printed later.\n");
  289. printk(KERN_ERR "INIT: If messages would not printed after "
  290. "this INIT, wait 30sec and assert INIT again.\n");
  291. if (!mlogbuf_timestamp)
  292. mlogbuf_timestamp = jiffies;
  293. return;
  294. }
  295. spin_unlock(&mlogbuf_rlock);
  296. ia64_mlogbuf_dump();
  297. }
  298. static void inline
  299. ia64_mca_spin(const char *func)
  300. {
  301. if (monarch_cpu == smp_processor_id())
  302. ia64_mlogbuf_finish(0);
  303. mprintk(KERN_EMERG "%s: spinning here, not returning to SAL\n", func);
  304. while (1)
  305. cpu_relax();
  306. }
  307. /*
  308. * IA64_MCA log support
  309. */
  310. #define IA64_MAX_LOGS 2 /* Double-buffering for nested MCAs */
  311. #define IA64_MAX_LOG_TYPES 4 /* MCA, INIT, CMC, CPE */
  312. typedef struct ia64_state_log_s
  313. {
  314. spinlock_t isl_lock;
  315. int isl_index;
  316. unsigned long isl_count;
  317. ia64_err_rec_t *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */
  318. } ia64_state_log_t;
  319. static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
  320. #define IA64_LOG_ALLOCATE(it, size) \
  321. {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
  322. (ia64_err_rec_t *)alloc_bootmem(size); \
  323. ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
  324. (ia64_err_rec_t *)alloc_bootmem(size);}
  325. #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
  326. #define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
  327. #define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
  328. #define IA64_LOG_NEXT_INDEX(it) ia64_state_log[it].isl_index
  329. #define IA64_LOG_CURR_INDEX(it) 1 - ia64_state_log[it].isl_index
  330. #define IA64_LOG_INDEX_INC(it) \
  331. {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
  332. ia64_state_log[it].isl_count++;}
  333. #define IA64_LOG_INDEX_DEC(it) \
  334. ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
  335. #define IA64_LOG_NEXT_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
  336. #define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
  337. #define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count
  338. /*
  339. * ia64_log_init
  340. * Reset the OS ia64 log buffer
  341. * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
  342. * Outputs : None
  343. */
  344. static void __init
  345. ia64_log_init(int sal_info_type)
  346. {
  347. u64 max_size = 0;
  348. IA64_LOG_NEXT_INDEX(sal_info_type) = 0;
  349. IA64_LOG_LOCK_INIT(sal_info_type);
  350. // SAL will tell us the maximum size of any error record of this type
  351. max_size = ia64_sal_get_state_info_size(sal_info_type);
  352. if (!max_size)
  353. /* alloc_bootmem() doesn't like zero-sized allocations! */
  354. return;
  355. // set up OS data structures to hold error info
  356. IA64_LOG_ALLOCATE(sal_info_type, max_size);
  357. memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
  358. memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
  359. }
  360. /*
  361. * ia64_log_get
  362. *
  363. * Get the current MCA log from SAL and copy it into the OS log buffer.
  364. *
  365. * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
  366. * irq_safe whether you can use printk at this point
  367. * Outputs : size (total record length)
  368. * *buffer (ptr to error record)
  369. *
  370. */
  371. static u64
  372. ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
  373. {
  374. sal_log_record_header_t *log_buffer;
  375. u64 total_len = 0;
  376. unsigned long s;
  377. IA64_LOG_LOCK(sal_info_type);
  378. /* Get the process state information */
  379. log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
  380. total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
  381. if (total_len) {
  382. IA64_LOG_INDEX_INC(sal_info_type);
  383. IA64_LOG_UNLOCK(sal_info_type);
  384. if (irq_safe) {
  385. IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. Record length = %ld\n",
  386. __func__, sal_info_type, total_len);
  387. }
  388. *buffer = (u8 *) log_buffer;
  389. return total_len;
  390. } else {
  391. IA64_LOG_UNLOCK(sal_info_type);
  392. return 0;
  393. }
  394. }
  395. /*
  396. * ia64_mca_log_sal_error_record
  397. *
  398. * This function retrieves a specified error record type from SAL
  399. * and wakes up any processes waiting for error records.
  400. *
  401. * Inputs : sal_info_type (Type of error record MCA/CMC/CPE)
  402. * FIXME: remove MCA and irq_safe.
  403. */
  404. static void
  405. ia64_mca_log_sal_error_record(int sal_info_type)
  406. {
  407. u8 *buffer;
  408. sal_log_record_header_t *rh;
  409. u64 size;
  410. int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA;
  411. #ifdef IA64_MCA_DEBUG_INFO
  412. static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
  413. #endif
  414. size = ia64_log_get(sal_info_type, &buffer, irq_safe);
  415. if (!size)
  416. return;
  417. salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe);
  418. if (irq_safe)
  419. IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n",
  420. smp_processor_id(),
  421. sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN");
  422. /* Clear logs from corrected errors in case there's no user-level logger */
  423. rh = (sal_log_record_header_t *)buffer;
  424. if (rh->severity == sal_log_severity_corrected)
  425. ia64_sal_clear_state_info(sal_info_type);
  426. }
  427. /*
  428. * search_mca_table
  429. * See if the MCA surfaced in an instruction range
  430. * that has been tagged as recoverable.
  431. *
  432. * Inputs
  433. * first First address range to check
  434. * last Last address range to check
  435. * ip Instruction pointer, address we are looking for
  436. *
  437. * Return value:
  438. * 1 on Success (in the table)/ 0 on Failure (not in the table)
  439. */
  440. int
  441. search_mca_table (const struct mca_table_entry *first,
  442. const struct mca_table_entry *last,
  443. unsigned long ip)
  444. {
  445. const struct mca_table_entry *curr;
  446. u64 curr_start, curr_end;
  447. curr = first;
  448. while (curr <= last) {
  449. curr_start = (u64) &curr->start_addr + curr->start_addr;
  450. curr_end = (u64) &curr->end_addr + curr->end_addr;
  451. if ((ip >= curr_start) && (ip <= curr_end)) {
  452. return 1;
  453. }
  454. curr++;
  455. }
  456. return 0;
  457. }
  458. /* Given an address, look for it in the mca tables. */
  459. int mca_recover_range(unsigned long addr)
  460. {
  461. extern struct mca_table_entry __start___mca_table[];
  462. extern struct mca_table_entry __stop___mca_table[];
  463. return search_mca_table(__start___mca_table, __stop___mca_table-1, addr);
  464. }
  465. EXPORT_SYMBOL_GPL(mca_recover_range);
  466. #ifdef CONFIG_ACPI
  467. int cpe_vector = -1;
  468. int ia64_cpe_irq = -1;
  469. static irqreturn_t
  470. ia64_mca_cpe_int_handler (int cpe_irq, void *arg)
  471. {
  472. static unsigned long cpe_history[CPE_HISTORY_LENGTH];
  473. static int index;
  474. static DEFINE_SPINLOCK(cpe_history_lock);
  475. IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
  476. __func__, cpe_irq, smp_processor_id());
  477. /* SAL spec states this should run w/ interrupts enabled */
  478. local_irq_enable();
  479. spin_lock(&cpe_history_lock);
  480. if (!cpe_poll_enabled && cpe_vector >= 0) {
  481. int i, count = 1; /* we know 1 happened now */
  482. unsigned long now = jiffies;
  483. for (i = 0; i < CPE_HISTORY_LENGTH; i++) {
  484. if (now - cpe_history[i] <= HZ)
  485. count++;
  486. }
  487. IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH);
  488. if (count >= CPE_HISTORY_LENGTH) {
  489. cpe_poll_enabled = 1;
  490. spin_unlock(&cpe_history_lock);
  491. disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR));
  492. /*
  493. * Corrected errors will still be corrected, but
  494. * make sure there's a log somewhere that indicates
  495. * something is generating more than we can handle.
  496. */
  497. printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n");
  498. mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL);
  499. /* lock already released, get out now */
  500. goto out;
  501. } else {
  502. cpe_history[index++] = now;
  503. if (index == CPE_HISTORY_LENGTH)
  504. index = 0;
  505. }
  506. }
  507. spin_unlock(&cpe_history_lock);
  508. out:
  509. /* Get the CPE error record and log it */
  510. ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
  511. return IRQ_HANDLED;
  512. }
  513. #endif /* CONFIG_ACPI */
  514. #ifdef CONFIG_ACPI
  515. /*
  516. * ia64_mca_register_cpev
  517. *
  518. * Register the corrected platform error vector with SAL.
  519. *
  520. * Inputs
  521. * cpev Corrected Platform Error Vector number
  522. *
  523. * Outputs
  524. * None
  525. */
  526. void
  527. ia64_mca_register_cpev (int cpev)
  528. {
  529. /* Register the CPE interrupt vector with SAL */
  530. struct ia64_sal_retval isrv;
  531. isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0);
  532. if (isrv.status) {
  533. printk(KERN_ERR "Failed to register Corrected Platform "
  534. "Error interrupt vector with SAL (status %ld)\n", isrv.status);
  535. return;
  536. }
  537. IA64_MCA_DEBUG("%s: corrected platform error "
  538. "vector %#x registered\n", __func__, cpev);
  539. }
  540. #endif /* CONFIG_ACPI */
  541. /*
  542. * ia64_mca_cmc_vector_setup
  543. *
  544. * Setup the corrected machine check vector register in the processor.
  545. * (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
  546. * This function is invoked on a per-processor basis.
  547. *
  548. * Inputs
  549. * None
  550. *
  551. * Outputs
  552. * None
  553. */
  554. void __cpuinit
  555. ia64_mca_cmc_vector_setup (void)
  556. {
  557. cmcv_reg_t cmcv;
  558. cmcv.cmcv_regval = 0;
  559. cmcv.cmcv_mask = 1; /* Mask/disable interrupt at first */
  560. cmcv.cmcv_vector = IA64_CMC_VECTOR;
  561. ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
  562. IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x registered.\n",
  563. __func__, smp_processor_id(), IA64_CMC_VECTOR);
  564. IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
  565. __func__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
  566. }
  567. /*
  568. * ia64_mca_cmc_vector_disable
  569. *
  570. * Mask the corrected machine check vector register in the processor.
  571. * This function is invoked on a per-processor basis.
  572. *
  573. * Inputs
  574. * dummy(unused)
  575. *
  576. * Outputs
  577. * None
  578. */
  579. static void
  580. ia64_mca_cmc_vector_disable (void *dummy)
  581. {
  582. cmcv_reg_t cmcv;
  583. cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
  584. cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
  585. ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
  586. IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x disabled.\n",
  587. __func__, smp_processor_id(), cmcv.cmcv_vector);
  588. }
  589. /*
  590. * ia64_mca_cmc_vector_enable
  591. *
  592. * Unmask the corrected machine check vector register in the processor.
  593. * This function is invoked on a per-processor basis.
  594. *
  595. * Inputs
  596. * dummy(unused)
  597. *
  598. * Outputs
  599. * None
  600. */
  601. static void
  602. ia64_mca_cmc_vector_enable (void *dummy)
  603. {
  604. cmcv_reg_t cmcv;
  605. cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
  606. cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
  607. ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
  608. IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x enabled.\n",
  609. __func__, smp_processor_id(), cmcv.cmcv_vector);
  610. }
  611. /*
  612. * ia64_mca_cmc_vector_disable_keventd
  613. *
  614. * Called via keventd (smp_call_function() is not safe in interrupt context) to
  615. * disable the cmc interrupt vector.
  616. */
  617. static void
  618. ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
  619. {
  620. on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0);
  621. }
  622. /*
  623. * ia64_mca_cmc_vector_enable_keventd
  624. *
  625. * Called via keventd (smp_call_function() is not safe in interrupt context) to
  626. * enable the cmc interrupt vector.
  627. */
  628. static void
  629. ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
  630. {
  631. on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0);
  632. }
  633. /*
  634. * ia64_mca_wakeup
  635. *
  636. * Send an inter-cpu interrupt to wake-up a particular cpu.
  637. *
  638. * Inputs : cpuid
  639. * Outputs : None
  640. */
  641. static void
  642. ia64_mca_wakeup(int cpu)
  643. {
  644. platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
  645. }
  646. /*
  647. * ia64_mca_wakeup_all
  648. *
  649. * Wakeup all the slave cpus which have rendez'ed previously.
  650. *
  651. * Inputs : None
  652. * Outputs : None
  653. */
  654. static void
  655. ia64_mca_wakeup_all(void)
  656. {
  657. int cpu;
  658. /* Clear the Rendez checkin flag for all cpus */
  659. for_each_online_cpu(cpu) {
  660. if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE)
  661. ia64_mca_wakeup(cpu);
  662. }
  663. }
  664. /*
  665. * ia64_mca_rendez_interrupt_handler
  666. *
  667. * This is handler used to put slave processors into spinloop
  668. * while the monarch processor does the mca handling and later
  669. * wake each slave up once the monarch is done. The state
  670. * IA64_MCA_RENDEZ_CHECKIN_DONE indicates the cpu is rendez'ed
  671. * in SAL. The state IA64_MCA_RENDEZ_CHECKIN_NOTDONE indicates
  672. * the cpu has come out of OS rendezvous.
  673. *
  674. * Inputs : None
  675. * Outputs : None
  676. */
  677. static irqreturn_t
  678. ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
  679. {
  680. unsigned long flags;
  681. int cpu = smp_processor_id();
  682. struct ia64_mca_notify_die nd =
  683. { .sos = NULL, .monarch_cpu = &monarch_cpu };
  684. /* Mask all interrupts */
  685. local_irq_save(flags);
  686. NOTIFY_MCA(DIE_MCA_RENDZVOUS_ENTER, get_irq_regs(), (long)&nd, 1);
  687. ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
  688. /* Register with the SAL monarch that the slave has
  689. * reached SAL
  690. */
  691. ia64_sal_mc_rendez();
  692. NOTIFY_MCA(DIE_MCA_RENDZVOUS_PROCESS, get_irq_regs(), (long)&nd, 1);
  693. /* Wait for the monarch cpu to exit. */
  694. while (monarch_cpu != -1)
  695. cpu_relax(); /* spin until monarch leaves */
  696. NOTIFY_MCA(DIE_MCA_RENDZVOUS_LEAVE, get_irq_regs(), (long)&nd, 1);
  697. ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
  698. /* Enable all interrupts */
  699. local_irq_restore(flags);
  700. return IRQ_HANDLED;
  701. }
  702. /*
  703. * ia64_mca_wakeup_int_handler
  704. *
  705. * The interrupt handler for processing the inter-cpu interrupt to the
  706. * slave cpu which was spinning in the rendez loop.
  707. * Since this spinning is done by turning off the interrupts and
  708. * polling on the wakeup-interrupt bit in the IRR, there is
  709. * nothing useful to be done in the handler.
  710. *
  711. * Inputs : wakeup_irq (Wakeup-interrupt bit)
  712. * arg (Interrupt handler specific argument)
  713. * Outputs : None
  714. *
  715. */
  716. static irqreturn_t
  717. ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg)
  718. {
  719. return IRQ_HANDLED;
  720. }
  721. /* Function pointer for extra MCA recovery */
  722. int (*ia64_mca_ucmc_extension)
  723. (void*,struct ia64_sal_os_state*)
  724. = NULL;
  725. int
  726. ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *))
  727. {
  728. if (ia64_mca_ucmc_extension)
  729. return 1;
  730. ia64_mca_ucmc_extension = fn;
  731. return 0;
  732. }
  733. void
  734. ia64_unreg_MCA_extension(void)
  735. {
  736. if (ia64_mca_ucmc_extension)
  737. ia64_mca_ucmc_extension = NULL;
  738. }
  739. EXPORT_SYMBOL(ia64_reg_MCA_extension);
  740. EXPORT_SYMBOL(ia64_unreg_MCA_extension);
  741. static inline void
  742. copy_reg(const u64 *fr, u64 fnat, unsigned long *tr, unsigned long *tnat)
  743. {
  744. u64 fslot, tslot, nat;
  745. *tr = *fr;
  746. fslot = ((unsigned long)fr >> 3) & 63;
  747. tslot = ((unsigned long)tr >> 3) & 63;
  748. *tnat &= ~(1UL << tslot);
  749. nat = (fnat >> fslot) & 1;
  750. *tnat |= (nat << tslot);
  751. }
  752. /* Change the comm field on the MCA/INT task to include the pid that
  753. * was interrupted, it makes for easier debugging. If that pid was 0
  754. * (swapper or nested MCA/INIT) then use the start of the previous comm
  755. * field suffixed with its cpu.
  756. */
  757. static void
  758. ia64_mca_modify_comm(const struct task_struct *previous_current)
  759. {
  760. char *p, comm[sizeof(current->comm)];
  761. if (previous_current->pid)
  762. snprintf(comm, sizeof(comm), "%s %d",
  763. current->comm, previous_current->pid);
  764. else {
  765. int l;
  766. if ((p = strchr(previous_current->comm, ' ')))
  767. l = p - previous_current->comm;
  768. else
  769. l = strlen(previous_current->comm);
  770. snprintf(comm, sizeof(comm), "%s %*s %d",
  771. current->comm, l, previous_current->comm,
  772. task_thread_info(previous_current)->cpu);
  773. }
  774. memcpy(current->comm, comm, sizeof(current->comm));
  775. }
  776. static void
  777. finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos,
  778. unsigned long *nat)
  779. {
  780. const pal_min_state_area_t *ms = sos->pal_min_state;
  781. const u64 *bank;
  782. /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
  783. * pmsa_{xip,xpsr,xfs}
  784. */
  785. if (ia64_psr(regs)->ic) {
  786. regs->cr_iip = ms->pmsa_iip;
  787. regs->cr_ipsr = ms->pmsa_ipsr;
  788. regs->cr_ifs = ms->pmsa_ifs;
  789. } else {
  790. regs->cr_iip = ms->pmsa_xip;
  791. regs->cr_ipsr = ms->pmsa_xpsr;
  792. regs->cr_ifs = ms->pmsa_xfs;
  793. sos->iip = ms->pmsa_iip;
  794. sos->ipsr = ms->pmsa_ipsr;
  795. sos->ifs = ms->pmsa_ifs;
  796. }
  797. regs->pr = ms->pmsa_pr;
  798. regs->b0 = ms->pmsa_br0;
  799. regs->ar_rsc = ms->pmsa_rsc;
  800. copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &regs->r1, nat);
  801. copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &regs->r2, nat);
  802. copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &regs->r3, nat);
  803. copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &regs->r8, nat);
  804. copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &regs->r9, nat);
  805. copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &regs->r10, nat);
  806. copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &regs->r11, nat);
  807. copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &regs->r12, nat);
  808. copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &regs->r13, nat);
  809. copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &regs->r14, nat);
  810. copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &regs->r15, nat);
  811. if (ia64_psr(regs)->bn)
  812. bank = ms->pmsa_bank1_gr;
  813. else
  814. bank = ms->pmsa_bank0_gr;
  815. copy_reg(&bank[16-16], ms->pmsa_nat_bits, &regs->r16, nat);
  816. copy_reg(&bank[17-16], ms->pmsa_nat_bits, &regs->r17, nat);
  817. copy_reg(&bank[18-16], ms->pmsa_nat_bits, &regs->r18, nat);
  818. copy_reg(&bank[19-16], ms->pmsa_nat_bits, &regs->r19, nat);
  819. copy_reg(&bank[20-16], ms->pmsa_nat_bits, &regs->r20, nat);
  820. copy_reg(&bank[21-16], ms->pmsa_nat_bits, &regs->r21, nat);
  821. copy_reg(&bank[22-16], ms->pmsa_nat_bits, &regs->r22, nat);
  822. copy_reg(&bank[23-16], ms->pmsa_nat_bits, &regs->r23, nat);
  823. copy_reg(&bank[24-16], ms->pmsa_nat_bits, &regs->r24, nat);
  824. copy_reg(&bank[25-16], ms->pmsa_nat_bits, &regs->r25, nat);
  825. copy_reg(&bank[26-16], ms->pmsa_nat_bits, &regs->r26, nat);
  826. copy_reg(&bank[27-16], ms->pmsa_nat_bits, &regs->r27, nat);
  827. copy_reg(&bank[28-16], ms->pmsa_nat_bits, &regs->r28, nat);
  828. copy_reg(&bank[29-16], ms->pmsa_nat_bits, &regs->r29, nat);
  829. copy_reg(&bank[30-16], ms->pmsa_nat_bits, &regs->r30, nat);
  830. copy_reg(&bank[31-16], ms->pmsa_nat_bits, &regs->r31, nat);
  831. }
  832. /* On entry to this routine, we are running on the per cpu stack, see
  833. * mca_asm.h. The original stack has not been touched by this event. Some of
  834. * the original stack's registers will be in the RBS on this stack. This stack
  835. * also contains a partial pt_regs and switch_stack, the rest of the data is in
  836. * PAL minstate.
  837. *
  838. * The first thing to do is modify the original stack to look like a blocked
  839. * task so we can run backtrace on the original task. Also mark the per cpu
  840. * stack as current to ensure that we use the correct task state, it also means
  841. * that we can do backtrace on the MCA/INIT handler code itself.
  842. */
  843. static struct task_struct *
  844. ia64_mca_modify_original_stack(struct pt_regs *regs,
  845. const struct switch_stack *sw,
  846. struct ia64_sal_os_state *sos,
  847. const char *type)
  848. {
  849. char *p;
  850. ia64_va va;
  851. extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */
  852. const pal_min_state_area_t *ms = sos->pal_min_state;
  853. struct task_struct *previous_current;
  854. struct pt_regs *old_regs;
  855. struct switch_stack *old_sw;
  856. unsigned size = sizeof(struct pt_regs) +
  857. sizeof(struct switch_stack) + 16;
  858. unsigned long *old_bspstore, *old_bsp;
  859. unsigned long *new_bspstore, *new_bsp;
  860. unsigned long old_unat, old_rnat, new_rnat, nat;
  861. u64 slots, loadrs = regs->loadrs;
  862. u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1];
  863. u64 ar_bspstore = regs->ar_bspstore;
  864. u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16);
  865. const char *msg;
  866. int cpu = smp_processor_id();
  867. previous_current = curr_task(cpu);
  868. set_curr_task(cpu, current);
  869. if ((p = strchr(current->comm, ' ')))
  870. *p = '\0';
  871. /* Best effort attempt to cope with MCA/INIT delivered while in
  872. * physical mode.
  873. */
  874. regs->cr_ipsr = ms->pmsa_ipsr;
  875. if (ia64_psr(regs)->dt == 0) {
  876. va.l = r12;
  877. if (va.f.reg == 0) {
  878. va.f.reg = 7;
  879. r12 = va.l;
  880. }
  881. va.l = r13;
  882. if (va.f.reg == 0) {
  883. va.f.reg = 7;
  884. r13 = va.l;
  885. }
  886. }
  887. if (ia64_psr(regs)->rt == 0) {
  888. va.l = ar_bspstore;
  889. if (va.f.reg == 0) {
  890. va.f.reg = 7;
  891. ar_bspstore = va.l;
  892. }
  893. va.l = ar_bsp;
  894. if (va.f.reg == 0) {
  895. va.f.reg = 7;
  896. ar_bsp = va.l;
  897. }
  898. }
  899. /* mca_asm.S ia64_old_stack() cannot assume that the dirty registers
  900. * have been copied to the old stack, the old stack may fail the
  901. * validation tests below. So ia64_old_stack() must restore the dirty
  902. * registers from the new stack. The old and new bspstore probably
  903. * have different alignments, so loadrs calculated on the old bsp
  904. * cannot be used to restore from the new bsp. Calculate a suitable
  905. * loadrs for the new stack and save it in the new pt_regs, where
  906. * ia64_old_stack() can get it.
  907. */
  908. old_bspstore = (unsigned long *)ar_bspstore;
  909. old_bsp = (unsigned long *)ar_bsp;
  910. slots = ia64_rse_num_regs(old_bspstore, old_bsp);
  911. new_bspstore = (unsigned long *)((u64)current + IA64_RBS_OFFSET);
  912. new_bsp = ia64_rse_skip_regs(new_bspstore, slots);
  913. regs->loadrs = (new_bsp - new_bspstore) * 8 << 16;
  914. /* Verify the previous stack state before we change it */
  915. if (user_mode(regs)) {
  916. msg = "occurred in user space";
  917. /* previous_current is guaranteed to be valid when the task was
  918. * in user space, so ...
  919. */
  920. ia64_mca_modify_comm(previous_current);
  921. goto no_mod;
  922. }
  923. if (r13 != sos->prev_IA64_KR_CURRENT) {
  924. msg = "inconsistent previous current and r13";
  925. goto no_mod;
  926. }
  927. if (!mca_recover_range(ms->pmsa_iip)) {
  928. if ((r12 - r13) >= KERNEL_STACK_SIZE) {
  929. msg = "inconsistent r12 and r13";
  930. goto no_mod;
  931. }
  932. if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) {
  933. msg = "inconsistent ar.bspstore and r13";
  934. goto no_mod;
  935. }
  936. va.p = old_bspstore;
  937. if (va.f.reg < 5) {
  938. msg = "old_bspstore is in the wrong region";
  939. goto no_mod;
  940. }
  941. if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) {
  942. msg = "inconsistent ar.bsp and r13";
  943. goto no_mod;
  944. }
  945. size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8;
  946. if (ar_bspstore + size > r12) {
  947. msg = "no room for blocked state";
  948. goto no_mod;
  949. }
  950. }
  951. ia64_mca_modify_comm(previous_current);
  952. /* Make the original task look blocked. First stack a struct pt_regs,
  953. * describing the state at the time of interrupt. mca_asm.S built a
  954. * partial pt_regs, copy it and fill in the blanks using minstate.
  955. */
  956. p = (char *)r12 - sizeof(*regs);
  957. old_regs = (struct pt_regs *)p;
  958. memcpy(old_regs, regs, sizeof(*regs));
  959. old_regs->loadrs = loadrs;
  960. old_unat = old_regs->ar_unat;
  961. finish_pt_regs(old_regs, sos, &old_unat);
  962. /* Next stack a struct switch_stack. mca_asm.S built a partial
  963. * switch_stack, copy it and fill in the blanks using pt_regs and
  964. * minstate.
  965. *
  966. * In the synthesized switch_stack, b0 points to ia64_leave_kernel,
  967. * ar.pfs is set to 0.
  968. *
  969. * unwind.c::unw_unwind() does special processing for interrupt frames.
  970. * It checks if the PRED_NON_SYSCALL predicate is set, if the predicate
  971. * is clear then unw_unwind() does _not_ adjust bsp over pt_regs. Not
  972. * that this is documented, of course. Set PRED_NON_SYSCALL in the
  973. * switch_stack on the original stack so it will unwind correctly when
  974. * unwind.c reads pt_regs.
  975. *
  976. * thread.ksp is updated to point to the synthesized switch_stack.
  977. */
  978. p -= sizeof(struct switch_stack);
  979. old_sw = (struct switch_stack *)p;
  980. memcpy(old_sw, sw, sizeof(*sw));
  981. old_sw->caller_unat = old_unat;
  982. old_sw->ar_fpsr = old_regs->ar_fpsr;
  983. copy_reg(&ms->pmsa_gr[4-1], ms->pmsa_nat_bits, &old_sw->r4, &old_unat);
  984. copy_reg(&ms->pmsa_gr[5-1], ms->pmsa_nat_bits, &old_sw->r5, &old_unat);
  985. copy_reg(&ms->pmsa_gr[6-1], ms->pmsa_nat_bits, &old_sw->r6, &old_unat);
  986. copy_reg(&ms->pmsa_gr[7-1], ms->pmsa_nat_bits, &old_sw->r7, &old_unat);
  987. old_sw->b0 = (u64)ia64_leave_kernel;
  988. old_sw->b1 = ms->pmsa_br1;
  989. old_sw->ar_pfs = 0;
  990. old_sw->ar_unat = old_unat;
  991. old_sw->pr = old_regs->pr | (1UL << PRED_NON_SYSCALL);
  992. previous_current->thread.ksp = (u64)p - 16;
  993. /* Finally copy the original stack's registers back to its RBS.
  994. * Registers from ar.bspstore through ar.bsp at the time of the event
  995. * are in the current RBS, copy them back to the original stack. The
  996. * copy must be done register by register because the original bspstore
  997. * and the current one have different alignments, so the saved RNAT
  998. * data occurs at different places.
  999. *
  1000. * mca_asm does cover, so the old_bsp already includes all registers at
  1001. * the time of MCA/INIT. It also does flushrs, so all registers before
  1002. * this function have been written to backing store on the MCA/INIT
  1003. * stack.
  1004. */
  1005. new_rnat = ia64_get_rnat(ia64_rse_rnat_addr(new_bspstore));
  1006. old_rnat = regs->ar_rnat;
  1007. while (slots--) {
  1008. if (ia64_rse_is_rnat_slot(new_bspstore)) {
  1009. new_rnat = ia64_get_rnat(new_bspstore++);
  1010. }
  1011. if (ia64_rse_is_rnat_slot(old_bspstore)) {
  1012. *old_bspstore++ = old_rnat;
  1013. old_rnat = 0;
  1014. }
  1015. nat = (new_rnat >> ia64_rse_slot_num(new_bspstore)) & 1UL;
  1016. old_rnat &= ~(1UL << ia64_rse_slot_num(old_bspstore));
  1017. old_rnat |= (nat << ia64_rse_slot_num(old_bspstore));
  1018. *old_bspstore++ = *new_bspstore++;
  1019. }
  1020. old_sw->ar_bspstore = (unsigned long)old_bspstore;
  1021. old_sw->ar_rnat = old_rnat;
  1022. sos->prev_task = previous_current;
  1023. return previous_current;
  1024. no_mod:
  1025. mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
  1026. smp_processor_id(), type, msg);
  1027. old_unat = regs->ar_unat;
  1028. finish_pt_regs(regs, sos, &old_unat);
  1029. return previous_current;
  1030. }
  1031. /* The monarch/slave interaction is based on monarch_cpu and requires that all
  1032. * slaves have entered rendezvous before the monarch leaves. If any cpu has
  1033. * not entered rendezvous yet then wait a bit. The assumption is that any
  1034. * slave that has not rendezvoused after a reasonable time is never going to do
  1035. * so. In this context, slave includes cpus that respond to the MCA rendezvous
  1036. * interrupt, as well as cpus that receive the INIT slave event.
  1037. */
  1038. static void
  1039. ia64_wait_for_slaves(int monarch, const char *type)
  1040. {
  1041. int c, i , wait;
  1042. /*
  1043. * wait 5 seconds total for slaves (arbitrary)
  1044. */
  1045. for (i = 0; i < 5000; i++) {
  1046. wait = 0;
  1047. for_each_online_cpu(c) {
  1048. if (c == monarch)
  1049. continue;
  1050. if (ia64_mc_info.imi_rendez_checkin[c]
  1051. == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) {
  1052. udelay(1000); /* short wait */
  1053. wait = 1;
  1054. break;
  1055. }
  1056. }
  1057. if (!wait)
  1058. goto all_in;
  1059. }
  1060. /*
  1061. * Maybe slave(s) dead. Print buffered messages immediately.
  1062. */
  1063. ia64_mlogbuf_finish(0);
  1064. mprintk(KERN_INFO "OS %s slave did not rendezvous on cpu", type);
  1065. for_each_online_cpu(c) {
  1066. if (c == monarch)
  1067. continue;
  1068. if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE)
  1069. mprintk(" %d", c);
  1070. }
  1071. mprintk("\n");
  1072. return;
  1073. all_in:
  1074. mprintk(KERN_INFO "All OS %s slaves have reached rendezvous\n", type);
  1075. return;
  1076. }
  1077. /* mca_insert_tr
  1078. *
  1079. * Switch rid when TR reload and needed!
  1080. * iord: 1: itr, 2: itr;
  1081. *
  1082. */
  1083. static void mca_insert_tr(u64 iord)
  1084. {
  1085. int i;
  1086. u64 old_rr;
  1087. struct ia64_tr_entry *p;
  1088. unsigned long psr;
  1089. int cpu = smp_processor_id();
  1090. if (!ia64_idtrs[cpu])
  1091. return;
  1092. psr = ia64_clear_ic();
  1093. for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) {
  1094. p = ia64_idtrs[cpu] + (iord - 1) * IA64_TR_ALLOC_MAX;
  1095. if (p->pte & 0x1) {
  1096. old_rr = ia64_get_rr(p->ifa);
  1097. if (old_rr != p->rr) {
  1098. ia64_set_rr(p->ifa, p->rr);
  1099. ia64_srlz_d();
  1100. }
  1101. ia64_ptr(iord, p->ifa, p->itir >> 2);
  1102. ia64_srlz_i();
  1103. if (iord & 0x1) {
  1104. ia64_itr(0x1, i, p->ifa, p->pte, p->itir >> 2);
  1105. ia64_srlz_i();
  1106. }
  1107. if (iord & 0x2) {
  1108. ia64_itr(0x2, i, p->ifa, p->pte, p->itir >> 2);
  1109. ia64_srlz_i();
  1110. }
  1111. if (old_rr != p->rr) {
  1112. ia64_set_rr(p->ifa, old_rr);
  1113. ia64_srlz_d();
  1114. }
  1115. }
  1116. }
  1117. ia64_set_psr(psr);
  1118. }
  1119. /*
  1120. * ia64_mca_handler
  1121. *
  1122. * This is uncorrectable machine check handler called from OS_MCA
  1123. * dispatch code which is in turn called from SAL_CHECK().
  1124. * This is the place where the core of OS MCA handling is done.
  1125. * Right now the logs are extracted and displayed in a well-defined
  1126. * format. This handler code is supposed to be run only on the
  1127. * monarch processor. Once the monarch is done with MCA handling
  1128. * further MCA logging is enabled by clearing logs.
  1129. * Monarch also has the duty of sending wakeup-IPIs to pull the
  1130. * slave processors out of rendezvous spinloop.
  1131. *
  1132. * If multiple processors call into OS_MCA, the first will become
  1133. * the monarch. Subsequent cpus will be recorded in the mca_cpu
  1134. * bitmask. After the first monarch has processed its MCA, it
  1135. * will wake up the next cpu in the mca_cpu bitmask and then go
  1136. * into the rendezvous loop. When all processors have serviced
  1137. * their MCA, the last monarch frees up the rest of the processors.
  1138. */
  1139. void
  1140. ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
  1141. struct ia64_sal_os_state *sos)
  1142. {
  1143. int recover, cpu = smp_processor_id();
  1144. struct task_struct *previous_current;
  1145. struct ia64_mca_notify_die nd =
  1146. { .sos = sos, .monarch_cpu = &monarch_cpu, .data = &recover };
  1147. static atomic_t mca_count;
  1148. static cpumask_t mca_cpu;
  1149. if (atomic_add_return(1, &mca_count) == 1) {
  1150. monarch_cpu = cpu;
  1151. sos->monarch = 1;
  1152. } else {
  1153. cpu_set(cpu, mca_cpu);
  1154. sos->monarch = 0;
  1155. }
  1156. mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d "
  1157. "monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch);
  1158. previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
  1159. NOTIFY_MCA(DIE_MCA_MONARCH_ENTER, regs, (long)&nd, 1);
  1160. ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
  1161. if (sos->monarch) {
  1162. ia64_wait_for_slaves(cpu, "MCA");
  1163. /* Wakeup all the processors which are spinning in the
  1164. * rendezvous loop. They will leave SAL, then spin in the OS
  1165. * with interrupts disabled until this monarch cpu leaves the
  1166. * MCA handler. That gets control back to the OS so we can
  1167. * backtrace the other cpus, backtrace when spinning in SAL
  1168. * does not work.
  1169. */
  1170. ia64_mca_wakeup_all();
  1171. } else {
  1172. while (cpu_isset(cpu, mca_cpu))
  1173. cpu_relax(); /* spin until monarch wakes us */
  1174. }
  1175. NOTIFY_MCA(DIE_MCA_MONARCH_PROCESS, regs, (long)&nd, 1);
  1176. /* Get the MCA error record and log it */
  1177. ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
  1178. /* MCA error recovery */
  1179. recover = (ia64_mca_ucmc_extension
  1180. && ia64_mca_ucmc_extension(
  1181. IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
  1182. sos));
  1183. if (recover) {
  1184. sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
  1185. rh->severity = sal_log_severity_corrected;
  1186. ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
  1187. sos->os_status = IA64_MCA_CORRECTED;
  1188. } else {
  1189. /* Dump buffered message to console */
  1190. ia64_mlogbuf_finish(1);
  1191. }
  1192. if (__get_cpu_var(ia64_mca_tr_reload)) {
  1193. mca_insert_tr(0x1); /*Reload dynamic itrs*/
  1194. mca_insert_tr(0x2); /*Reload dynamic itrs*/
  1195. }
  1196. NOTIFY_MCA(DIE_MCA_MONARCH_LEAVE, regs, (long)&nd, 1);
  1197. if (atomic_dec_return(&mca_count) > 0) {
  1198. int i;
  1199. /* wake up the next monarch cpu,
  1200. * and put this cpu in the rendez loop.
  1201. */
  1202. for_each_online_cpu(i) {
  1203. if (cpu_isset(i, mca_cpu)) {
  1204. monarch_cpu = i;
  1205. cpu_clear(i, mca_cpu); /* wake next cpu */
  1206. while (monarch_cpu != -1)
  1207. cpu_relax(); /* spin until last cpu leaves */
  1208. set_curr_task(cpu, previous_current);
  1209. ia64_mc_info.imi_rendez_checkin[cpu]
  1210. = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
  1211. return;
  1212. }
  1213. }
  1214. }
  1215. set_curr_task(cpu, previous_current);
  1216. ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
  1217. monarch_cpu = -1; /* This frees the slaves and previous monarchs */
  1218. }
  1219. static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd);
  1220. static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd);
  1221. /*
  1222. * ia64_mca_cmc_int_handler
  1223. *
  1224. * This is corrected machine check interrupt handler.
  1225. * Right now the logs are extracted and displayed in a well-defined
  1226. * format.
  1227. *
  1228. * Inputs
  1229. * interrupt number
  1230. * client data arg ptr
  1231. *
  1232. * Outputs
  1233. * None
  1234. */
  1235. static irqreturn_t
  1236. ia64_mca_cmc_int_handler(int cmc_irq, void *arg)
  1237. {
  1238. static unsigned long cmc_history[CMC_HISTORY_LENGTH];
  1239. static int index;
  1240. static DEFINE_SPINLOCK(cmc_history_lock);
  1241. IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
  1242. __func__, cmc_irq, smp_processor_id());
  1243. /* SAL spec states this should run w/ interrupts enabled */
  1244. local_irq_enable();
  1245. spin_lock(&cmc_history_lock);
  1246. if (!cmc_polling_enabled) {
  1247. int i, count = 1; /* we know 1 happened now */
  1248. unsigned long now = jiffies;
  1249. for (i = 0; i < CMC_HISTORY_LENGTH; i++) {
  1250. if (now - cmc_history[i] <= HZ)
  1251. count++;
  1252. }
  1253. IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH);
  1254. if (count >= CMC_HISTORY_LENGTH) {
  1255. cmc_polling_enabled = 1;
  1256. spin_unlock(&cmc_history_lock);
  1257. /* If we're being hit with CMC interrupts, we won't
  1258. * ever execute the schedule_work() below. Need to
  1259. * disable CMC interrupts on this processor now.
  1260. */
  1261. ia64_mca_cmc_vector_disable(NULL);
  1262. schedule_work(&cmc_disable_work);
  1263. /*
  1264. * Corrected errors will still be corrected, but
  1265. * make sure there's a log somewhere that indicates
  1266. * something is generating more than we can handle.
  1267. */
  1268. printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n");
  1269. mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
  1270. /* lock already released, get out now */
  1271. goto out;
  1272. } else {
  1273. cmc_history[index++] = now;
  1274. if (index == CMC_HISTORY_LENGTH)
  1275. index = 0;
  1276. }
  1277. }
  1278. spin_unlock(&cmc_history_lock);
  1279. out:
  1280. /* Get the CMC error record and log it */
  1281. ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
  1282. return IRQ_HANDLED;
  1283. }
  1284. /*
  1285. * ia64_mca_cmc_int_caller
  1286. *
  1287. * Triggered by sw interrupt from CMC polling routine. Calls
  1288. * real interrupt handler and either triggers a sw interrupt
  1289. * on the next cpu or does cleanup at the end.
  1290. *
  1291. * Inputs
  1292. * interrupt number
  1293. * client data arg ptr
  1294. * Outputs
  1295. * handled
  1296. */
  1297. static irqreturn_t
  1298. ia64_mca_cmc_int_caller(int cmc_irq, void *arg)
  1299. {
  1300. static int start_count = -1;
  1301. unsigned int cpuid;
  1302. cpuid = smp_processor_id();
  1303. /* If first cpu, update count */
  1304. if (start_count == -1)
  1305. start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
  1306. ia64_mca_cmc_int_handler(cmc_irq, arg);
  1307. cpuid = cpumask_next(cpuid+1, cpu_online_mask);
  1308. if (cpuid < nr_cpu_ids) {
  1309. platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
  1310. } else {
  1311. /* If no log record, switch out of polling mode */
  1312. if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
  1313. printk(KERN_WARNING "Returning to interrupt driven CMC handler\n");
  1314. schedule_work(&cmc_enable_work);
  1315. cmc_polling_enabled = 0;
  1316. } else {
  1317. mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
  1318. }
  1319. start_count = -1;
  1320. }
  1321. return IRQ_HANDLED;
  1322. }
  1323. /*
  1324. * ia64_mca_cmc_poll
  1325. *
  1326. * Poll for Corrected Machine Checks (CMCs)
  1327. *
  1328. * Inputs : dummy(unused)
  1329. * Outputs : None
  1330. *
  1331. */
  1332. static void
  1333. ia64_mca_cmc_poll (unsigned long dummy)
  1334. {
  1335. /* Trigger a CMC interrupt cascade */
  1336. platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
  1337. }
  1338. /*
  1339. * ia64_mca_cpe_int_caller
  1340. *
  1341. * Triggered by sw interrupt from CPE polling routine. Calls
  1342. * real interrupt handler and either triggers a sw interrupt
  1343. * on the next cpu or does cleanup at the end.
  1344. *
  1345. * Inputs
  1346. * interrupt number
  1347. * client data arg ptr
  1348. * Outputs
  1349. * handled
  1350. */
  1351. #ifdef CONFIG_ACPI
  1352. static irqreturn_t
  1353. ia64_mca_cpe_int_caller(int cpe_irq, void *arg)
  1354. {
  1355. static int start_count = -1;
  1356. static int poll_time = MIN_CPE_POLL_INTERVAL;
  1357. unsigned int cpuid;
  1358. cpuid = smp_processor_id();
  1359. /* If first cpu, update count */
  1360. if (start_count == -1)
  1361. start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
  1362. ia64_mca_cpe_int_handler(cpe_irq, arg);
  1363. cpuid = cpumask_next(cpuid+1, cpu_online_mask);
  1364. if (cpuid < NR_CPUS) {
  1365. platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
  1366. } else {
  1367. /*
  1368. * If a log was recorded, increase our polling frequency,
  1369. * otherwise, backoff or return to interrupt mode.
  1370. */
  1371. if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) {
  1372. poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2);
  1373. } else if (cpe_vector < 0) {
  1374. poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2);
  1375. } else {
  1376. poll_time = MIN_CPE_POLL_INTERVAL;
  1377. printk(KERN_WARNING "Returning to interrupt driven CPE handler\n");
  1378. enable_irq(local_vector_to_irq(IA64_CPE_VECTOR));
  1379. cpe_poll_enabled = 0;
  1380. }
  1381. if (cpe_poll_enabled)
  1382. mod_timer(&cpe_poll_timer, jiffies + poll_time);
  1383. start_count = -1;
  1384. }
  1385. return IRQ_HANDLED;
  1386. }
  1387. /*
  1388. * ia64_mca_cpe_poll
  1389. *
  1390. * Poll for Corrected Platform Errors (CPEs), trigger interrupt
  1391. * on first cpu, from there it will trickle through all the cpus.
  1392. *
  1393. * Inputs : dummy(unused)
  1394. * Outputs : None
  1395. *
  1396. */
  1397. static void
  1398. ia64_mca_cpe_poll (unsigned long dummy)
  1399. {
  1400. /* Trigger a CPE interrupt cascade */
  1401. platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
  1402. }
  1403. #endif /* CONFIG_ACPI */
  1404. static int
  1405. default_monarch_init_process(struct notifier_block *self, unsigned long val, void *data)
  1406. {
  1407. int c;
  1408. struct task_struct *g, *t;
  1409. if (val != DIE_INIT_MONARCH_PROCESS)
  1410. return NOTIFY_DONE;
  1411. #ifdef CONFIG_KEXEC
  1412. if (atomic_read(&kdump_in_progress))
  1413. return NOTIFY_DONE;
  1414. #endif
  1415. /*
  1416. * FIXME: mlogbuf will brim over with INIT stack dumps.
  1417. * To enable show_stack from INIT, we use oops_in_progress which should
  1418. * be used in real oops. This would cause something wrong after INIT.
  1419. */
  1420. BREAK_LOGLEVEL(console_loglevel);
  1421. ia64_mlogbuf_dump_from_init();
  1422. printk(KERN_ERR "Processes interrupted by INIT -");
  1423. for_each_online_cpu(c) {
  1424. struct ia64_sal_os_state *s;
  1425. t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET);
  1426. s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET);
  1427. g = s->prev_task;
  1428. if (g) {
  1429. if (g->pid)
  1430. printk(" %d", g->pid);
  1431. else
  1432. printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g);
  1433. }
  1434. }
  1435. printk("\n\n");
  1436. if (read_trylock(&tasklist_lock)) {
  1437. do_each_thread (g, t) {
  1438. printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
  1439. show_stack(t, NULL);
  1440. } while_each_thread (g, t);
  1441. read_unlock(&tasklist_lock);
  1442. }
  1443. /* FIXME: This will not restore zapped printk locks. */
  1444. RESTORE_LOGLEVEL(console_loglevel);
  1445. return NOTIFY_DONE;
  1446. }
  1447. /*
  1448. * C portion of the OS INIT handler
  1449. *
  1450. * Called from ia64_os_init_dispatch
  1451. *
  1452. * Inputs: pointer to pt_regs where processor info was saved. SAL/OS state for
  1453. * this event. This code is used for both monarch and slave INIT events, see
  1454. * sos->monarch.
  1455. *
  1456. * All INIT events switch to the INIT stack and change the previous process to
  1457. * blocked status. If one of the INIT events is the monarch then we are
  1458. * probably processing the nmi button/command. Use the monarch cpu to dump all
  1459. * the processes. The slave INIT events all spin until the monarch cpu
  1460. * returns. We can also get INIT slave events for MCA, in which case the MCA
  1461. * process is the monarch.
  1462. */
  1463. void
  1464. ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
  1465. struct ia64_sal_os_state *sos)
  1466. {
  1467. static atomic_t slaves;
  1468. static atomic_t monarchs;
  1469. struct task_struct *previous_current;
  1470. int cpu = smp_processor_id();
  1471. struct ia64_mca_notify_die nd =
  1472. { .sos = sos, .monarch_cpu = &monarch_cpu };
  1473. NOTIFY_INIT(DIE_INIT_ENTER, regs, (long)&nd, 0);
  1474. mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
  1475. sos->proc_state_param, cpu, sos->monarch);
  1476. salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0);
  1477. previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "INIT");
  1478. sos->os_status = IA64_INIT_RESUME;
  1479. /* FIXME: Workaround for broken proms that drive all INIT events as
  1480. * slaves. The last slave that enters is promoted to be a monarch.
  1481. * Remove this code in September 2006, that gives platforms a year to
  1482. * fix their proms and get their customers updated.
  1483. */
  1484. if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) {
  1485. mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n",
  1486. __func__, cpu);
  1487. atomic_dec(&slaves);
  1488. sos->monarch = 1;
  1489. }
  1490. /* FIXME: Workaround for broken proms that drive all INIT events as
  1491. * monarchs. Second and subsequent monarchs are demoted to slaves.
  1492. * Remove this code in September 2006, that gives platforms a year to
  1493. * fix their proms and get their customers updated.
  1494. */
  1495. if (sos->monarch && atomic_add_return(1, &monarchs) > 1) {
  1496. mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n",
  1497. __func__, cpu);
  1498. atomic_dec(&monarchs);
  1499. sos->monarch = 0;
  1500. }
  1501. if (!sos->monarch) {
  1502. ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
  1503. #ifdef CONFIG_KEXEC
  1504. while (monarch_cpu == -1 && !atomic_read(&kdump_in_progress))
  1505. udelay(1000);
  1506. #else
  1507. while (monarch_cpu == -1)
  1508. cpu_relax(); /* spin until monarch enters */
  1509. #endif
  1510. NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1);
  1511. NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1);
  1512. #ifdef CONFIG_KEXEC
  1513. while (monarch_cpu != -1 && !atomic_read(&kdump_in_progress))
  1514. udelay(1000);
  1515. #else
  1516. while (monarch_cpu != -1)
  1517. cpu_relax(); /* spin until monarch leaves */
  1518. #endif
  1519. NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1);
  1520. mprintk("Slave on cpu %d returning to normal service.\n", cpu);
  1521. set_curr_task(cpu, previous_current);
  1522. ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
  1523. atomic_dec(&slaves);
  1524. return;
  1525. }
  1526. monarch_cpu = cpu;
  1527. NOTIFY_INIT(DIE_INIT_MONARCH_ENTER, regs, (long)&nd, 1);
  1528. /*
  1529. * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
  1530. * generated via the BMC's command-line interface, but since the console is on the
  1531. * same serial line, the user will need some time to switch out of the BMC before
  1532. * the dump begins.
  1533. */
  1534. mprintk("Delaying for 5 seconds...\n");
  1535. udelay(5*1000000);
  1536. ia64_wait_for_slaves(cpu, "INIT");
  1537. /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through
  1538. * to default_monarch_init_process() above and just print all the
  1539. * tasks.
  1540. */
  1541. NOTIFY_INIT(DIE_INIT_MONARCH_PROCESS, regs, (long)&nd, 1);
  1542. NOTIFY_INIT(DIE_INIT_MONARCH_LEAVE, regs, (long)&nd, 1);
  1543. mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
  1544. atomic_dec(&monarchs);
  1545. set_curr_task(cpu, previous_current);
  1546. monarch_cpu = -1;
  1547. return;
  1548. }
  1549. static int __init
  1550. ia64_mca_disable_cpe_polling(char *str)
  1551. {
  1552. cpe_poll_enabled = 0;
  1553. return 1;
  1554. }
  1555. __setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);
  1556. static struct irqaction cmci_irqaction = {
  1557. .handler = ia64_mca_cmc_int_handler,
  1558. .flags = IRQF_DISABLED,
  1559. .name = "cmc_hndlr"
  1560. };
  1561. static struct irqaction cmcp_irqaction = {
  1562. .handler = ia64_mca_cmc_int_caller,
  1563. .flags = IRQF_DISABLED,
  1564. .name = "cmc_poll"
  1565. };
  1566. static struct irqaction mca_rdzv_irqaction = {
  1567. .handler = ia64_mca_rendez_int_handler,
  1568. .flags = IRQF_DISABLED,
  1569. .name = "mca_rdzv"
  1570. };
  1571. static struct irqaction mca_wkup_irqaction = {
  1572. .handler = ia64_mca_wakeup_int_handler,
  1573. .flags = IRQF_DISABLED,
  1574. .name = "mca_wkup"
  1575. };
  1576. #ifdef CONFIG_ACPI
  1577. static struct irqaction mca_cpe_irqaction = {
  1578. .handler = ia64_mca_cpe_int_handler,
  1579. .flags = IRQF_DISABLED,
  1580. .name = "cpe_hndlr"
  1581. };
  1582. static struct irqaction mca_cpep_irqaction = {
  1583. .handler = ia64_mca_cpe_int_caller,
  1584. .flags = IRQF_DISABLED,
  1585. .name = "cpe_poll"
  1586. };
  1587. #endif /* CONFIG_ACPI */
  1588. /* Minimal format of the MCA/INIT stacks. The pseudo processes that run on
  1589. * these stacks can never sleep, they cannot return from the kernel to user
  1590. * space, they do not appear in a normal ps listing. So there is no need to
  1591. * format most of the fields.
  1592. */
  1593. static void __cpuinit
  1594. format_mca_init_stack(void *mca_data, unsigned long offset,
  1595. const char *type, int cpu)
  1596. {
  1597. struct task_struct *p = (struct task_struct *)((char *)mca_data + offset);
  1598. struct thread_info *ti;
  1599. memset(p, 0, KERNEL_STACK_SIZE);
  1600. ti = task_thread_info(p);
  1601. ti->flags = _TIF_MCA_INIT;
  1602. ti->preempt_count = 1;
  1603. ti->task = p;
  1604. ti->cpu = cpu;
  1605. p->stack = ti;
  1606. p->state = TASK_UNINTERRUPTIBLE;
  1607. cpu_set(cpu, p->cpus_allowed);
  1608. INIT_LIST_HEAD(&p->tasks);
  1609. p->parent = p->real_parent = p->group_leader = p;
  1610. INIT_LIST_HEAD(&p->children);
  1611. INIT_LIST_HEAD(&p->sibling);
  1612. strncpy(p->comm, type, sizeof(p->comm)-1);
  1613. }
  1614. /* Caller prevents this from being called after init */
  1615. static void * __init_refok mca_bootmem(void)
  1616. {
  1617. return __alloc_bootmem(sizeof(struct ia64_mca_cpu),
  1618. KERNEL_STACK_SIZE, 0);
  1619. }
  1620. /* Do per-CPU MCA-related initialization. */
  1621. void __cpuinit
  1622. ia64_mca_cpu_init(void *cpu_data)
  1623. {
  1624. void *pal_vaddr;
  1625. void *data;
  1626. long sz = sizeof(struct ia64_mca_cpu);
  1627. int cpu = smp_processor_id();
  1628. static int first_time = 1;
  1629. /*
  1630. * Structure will already be allocated if cpu has been online,
  1631. * then offlined.
  1632. */
  1633. if (__per_cpu_mca[cpu]) {
  1634. data = __va(__per_cpu_mca[cpu]);
  1635. } else {
  1636. if (first_time) {
  1637. data = mca_bootmem();
  1638. first_time = 0;
  1639. } else
  1640. data = __get_free_pages(GFP_KERNEL, get_order(sz));
  1641. if (!data)
  1642. panic("Could not allocate MCA memory for cpu %d\n",
  1643. cpu);
  1644. }
  1645. format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, mca_stack),
  1646. "MCA", cpu);
  1647. format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, init_stack),
  1648. "INIT", cpu);
  1649. __get_cpu_var(ia64_mca_data) = __per_cpu_mca[cpu] = __pa(data);
  1650. /*
  1651. * Stash away a copy of the PTE needed to map the per-CPU page.
  1652. * We may need it during MCA recovery.
  1653. */
  1654. __get_cpu_var(ia64_mca_per_cpu_pte) =
  1655. pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL));
  1656. /*
  1657. * Also, stash away a copy of the PAL address and the PTE
  1658. * needed to map it.
  1659. */
  1660. pal_vaddr = efi_get_pal_addr();
  1661. if (!pal_vaddr)
  1662. return;
  1663. __get_cpu_var(ia64_mca_pal_base) =
  1664. GRANULEROUNDDOWN((unsigned long) pal_vaddr);
  1665. __get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr),
  1666. PAGE_KERNEL));
  1667. }
  1668. static void __cpuinit ia64_mca_cmc_vector_adjust(void *dummy)
  1669. {
  1670. unsigned long flags;
  1671. local_irq_save(flags);
  1672. if (!cmc_polling_enabled)
  1673. ia64_mca_cmc_vector_enable(NULL);
  1674. local_irq_restore(flags);
  1675. }
  1676. static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
  1677. unsigned long action,
  1678. void *hcpu)
  1679. {
  1680. int hotcpu = (unsigned long) hcpu;
  1681. switch (action) {
  1682. case CPU_ONLINE:
  1683. case CPU_ONLINE_FROZEN:
  1684. smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust,
  1685. NULL, 0);
  1686. break;
  1687. }
  1688. return NOTIFY_OK;
  1689. }
  1690. static struct notifier_block mca_cpu_notifier __cpuinitdata = {
  1691. .notifier_call = mca_cpu_callback
  1692. };
  1693. /*
  1694. * ia64_mca_init
  1695. *
  1696. * Do all the system level mca specific initialization.
  1697. *
  1698. * 1. Register spinloop and wakeup request interrupt vectors
  1699. *
  1700. * 2. Register OS_MCA handler entry point
  1701. *
  1702. * 3. Register OS_INIT handler entry point
  1703. *
  1704. * 4. Initialize MCA/CMC/INIT related log buffers maintained by the OS.
  1705. *
  1706. * Note that this initialization is done very early before some kernel
  1707. * services are available.
  1708. *
  1709. * Inputs : None
  1710. *
  1711. * Outputs : None
  1712. */
  1713. void __init
  1714. ia64_mca_init(void)
  1715. {
  1716. ia64_fptr_t *init_hldlr_ptr_monarch = (ia64_fptr_t *)ia64_os_init_dispatch_monarch;
  1717. ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave;
  1718. ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
  1719. int i;
  1720. long rc;
  1721. struct ia64_sal_retval isrv;
  1722. unsigned long timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */
  1723. static struct notifier_block default_init_monarch_nb = {
  1724. .notifier_call = default_monarch_init_process,
  1725. .priority = 0/* we need to notified last */
  1726. };
  1727. IA64_MCA_DEBUG("%s: begin\n", __func__);
  1728. /* Clear the Rendez checkin flag for all cpus */
  1729. for(i = 0 ; i < NR_CPUS; i++)
  1730. ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
  1731. /*
  1732. * Register the rendezvous spinloop and wakeup mechanism with SAL
  1733. */
  1734. /* Register the rendezvous interrupt vector with SAL */
  1735. while (1) {
  1736. isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT,
  1737. SAL_MC_PARAM_MECHANISM_INT,
  1738. IA64_MCA_RENDEZ_VECTOR,
  1739. timeout,
  1740. SAL_MC_PARAM_RZ_ALWAYS);
  1741. rc = isrv.status;
  1742. if (rc == 0)
  1743. break;
  1744. if (rc == -2) {
  1745. printk(KERN_INFO "Increasing MCA rendezvous timeout from "
  1746. "%ld to %ld milliseconds\n", timeout, isrv.v0);
  1747. timeout = isrv.v0;
  1748. NOTIFY_MCA(DIE_MCA_NEW_TIMEOUT, NULL, timeout, 0);
  1749. continue;
  1750. }
  1751. printk(KERN_ERR "Failed to register rendezvous interrupt "
  1752. "with SAL (status %ld)\n", rc);
  1753. return;
  1754. }
  1755. /* Register the wakeup interrupt vector with SAL */
  1756. isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP,
  1757. SAL_MC_PARAM_MECHANISM_INT,
  1758. IA64_MCA_WAKEUP_VECTOR,
  1759. 0, 0);
  1760. rc = isrv.status;
  1761. if (rc) {
  1762. printk(KERN_ERR "Failed to register wakeup interrupt with SAL "
  1763. "(status %ld)\n", rc);
  1764. return;
  1765. }
  1766. IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __func__);
  1767. ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp);
  1768. /*
  1769. * XXX - disable SAL checksum by setting size to 0; should be
  1770. * ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
  1771. */
  1772. ia64_mc_info.imi_mca_handler_size = 0;
  1773. /* Register the os mca handler with SAL */
  1774. if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA,
  1775. ia64_mc_info.imi_mca_handler,
  1776. ia64_tpa(mca_hldlr_ptr->gp),
  1777. ia64_mc_info.imi_mca_handler_size,
  1778. 0, 0, 0)))
  1779. {
  1780. printk(KERN_ERR "Failed to register OS MCA handler with SAL "
  1781. "(status %ld)\n", rc);
  1782. return;
  1783. }
  1784. IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __func__,
  1785. ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
  1786. /*
  1787. * XXX - disable SAL checksum by setting size to 0, should be
  1788. * size of the actual init handler in mca_asm.S.
  1789. */
  1790. ia64_mc_info.imi_monarch_init_handler = ia64_tpa(init_hldlr_ptr_monarch->fp);
  1791. ia64_mc_info.imi_monarch_init_handler_size = 0;
  1792. ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp);
  1793. ia64_mc_info.imi_slave_init_handler_size = 0;
  1794. IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __func__,
  1795. ia64_mc_info.imi_monarch_init_handler);
  1796. /* Register the os init handler with SAL */
  1797. if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
  1798. ia64_mc_info.imi_monarch_init_handler,
  1799. ia64_tpa(ia64_getreg(_IA64_REG_GP)),
  1800. ia64_mc_info.imi_monarch_init_handler_size,
  1801. ia64_mc_info.imi_slave_init_handler,
  1802. ia64_tpa(ia64_getreg(_IA64_REG_GP)),
  1803. ia64_mc_info.imi_slave_init_handler_size)))
  1804. {
  1805. printk(KERN_ERR "Failed to register m/s INIT handlers with SAL "
  1806. "(status %ld)\n", rc);
  1807. return;
  1808. }
  1809. if (register_die_notifier(&default_init_monarch_nb)) {
  1810. printk(KERN_ERR "Failed to register default monarch INIT process\n");
  1811. return;
  1812. }
  1813. IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__);
  1814. /*
  1815. * Configure the CMCI/P vector and handler. Interrupts for CMC are
  1816. * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
  1817. */
  1818. register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
  1819. register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
  1820. ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
  1821. /* Setup the MCA rendezvous interrupt vector */
  1822. register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
  1823. /* Setup the MCA wakeup interrupt vector */
  1824. register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
  1825. #ifdef CONFIG_ACPI
  1826. /* Setup the CPEI/P handler */
  1827. register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
  1828. #endif
  1829. /* Initialize the areas set aside by the OS to buffer the
  1830. * platform/processor error states for MCA/INIT/CMC
  1831. * handling.
  1832. */
  1833. ia64_log_init(SAL_INFO_TYPE_MCA);
  1834. ia64_log_init(SAL_INFO_TYPE_INIT);
  1835. ia64_log_init(SAL_INFO_TYPE_CMC);
  1836. ia64_log_init(SAL_INFO_TYPE_CPE);
  1837. mca_init = 1;
  1838. printk(KERN_INFO "MCA related initialization done\n");
  1839. }
  1840. /*
  1841. * ia64_mca_late_init
  1842. *
  1843. * Opportunity to setup things that require initialization later
  1844. * than ia64_mca_init. Setup a timer to poll for CPEs if the
  1845. * platform doesn't support an interrupt driven mechanism.
  1846. *
  1847. * Inputs : None
  1848. * Outputs : Status
  1849. */
  1850. static int __init
  1851. ia64_mca_late_init(void)
  1852. {
  1853. if (!mca_init)
  1854. return 0;
  1855. register_hotcpu_notifier(&mca_cpu_notifier);
  1856. /* Setup the CMCI/P vector and handler */
  1857. init_timer(&cmc_poll_timer);
  1858. cmc_poll_timer.function = ia64_mca_cmc_poll;
  1859. /* Unmask/enable the vector */
  1860. cmc_polling_enabled = 0;
  1861. schedule_work(&cmc_enable_work);
  1862. IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__);
  1863. #ifdef CONFIG_ACPI
  1864. /* Setup the CPEI/P vector and handler */
  1865. cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
  1866. init_timer(&cpe_poll_timer);
  1867. cpe_poll_timer.function = ia64_mca_cpe_poll;
  1868. {
  1869. struct irq_desc *desc;
  1870. unsigned int irq;
  1871. if (cpe_vector >= 0) {
  1872. /* If platform supports CPEI, enable the irq. */
  1873. irq = local_vector_to_irq(cpe_vector);
  1874. if (irq > 0) {
  1875. cpe_poll_enabled = 0;
  1876. desc = irq_desc + irq;
  1877. desc->status |= IRQ_PER_CPU;
  1878. setup_irq(irq, &mca_cpe_irqaction);
  1879. ia64_cpe_irq = irq;
  1880. ia64_mca_register_cpev(cpe_vector);
  1881. IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n",
  1882. __func__);
  1883. return 0;
  1884. }
  1885. printk(KERN_ERR "%s: Failed to find irq for CPE "
  1886. "interrupt handler, vector %d\n",
  1887. __func__, cpe_vector);
  1888. }
  1889. /* If platform doesn't support CPEI, get the timer going. */
  1890. if (cpe_poll_enabled) {
  1891. ia64_mca_cpe_poll(0UL);
  1892. IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __func__);
  1893. }
  1894. }
  1895. #endif
  1896. return 0;
  1897. }
  1898. device_initcall(ia64_mca_late_init);