mca.c 53 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810
  1. /*
  2. * File: mca.c
  3. * Purpose: Generic MCA handling layer
  4. *
  5. * Updated for latest kernel
  6. * Copyright (C) 2003 Hewlett-Packard Co
  7. * David Mosberger-Tang <davidm@hpl.hp.com>
  8. *
  9. * Copyright (C) 2002 Dell Inc.
  10. * Copyright (C) Matt Domsch (Matt_Domsch@dell.com)
  11. *
  12. * Copyright (C) 2002 Intel
  13. * Copyright (C) Jenna Hall (jenna.s.hall@intel.com)
  14. *
  15. * Copyright (C) 2001 Intel
  16. * Copyright (C) Fred Lewis (frederick.v.lewis@intel.com)
  17. *
  18. * Copyright (C) 2000 Intel
  19. * Copyright (C) Chuck Fleckenstein (cfleck@co.intel.com)
  20. *
  21. * Copyright (C) 1999, 2004 Silicon Graphics, Inc.
  22. * Copyright (C) Vijay Chander(vijay@engr.sgi.com)
  23. *
  24. * 03/04/15 D. Mosberger Added INIT backtrace support.
  25. * 02/03/25 M. Domsch GUID cleanups
  26. *
  27. * 02/01/04 J. Hall Aligned MCA stack to 16 bytes, added platform vs. CPU
  28. * error flag, set SAL default return values, changed
  29. * error record structure to linked list, added init call
  30. * to sal_get_state_info_size().
  31. *
  32. * 01/01/03 F. Lewis Added setup of CMCI and CPEI IRQs, logging of corrected
  33. * platform errors, completed code for logging of
  34. * corrected & uncorrected machine check errors, and
  35. * updated for conformance with Nov. 2000 revision of the
  36. * SAL 3.0 spec.
  37. * 00/03/29 C. Fleckenstein Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
  38. * added min save state dump, added INIT handler.
  39. *
  40. * 2003-12-08 Keith Owens <kaos@sgi.com>
  41. * smp_call_function() must not be called from interrupt context (can
  42. * deadlock on tasklist_lock). Use keventd to call smp_call_function().
  43. *
  44. * 2004-02-01 Keith Owens <kaos@sgi.com>
  45. * Avoid deadlock when using printk() for MCA and INIT records.
  46. * Delete all record printing code, moved to salinfo_decode in user space.
  47. * Mark variables and functions static where possible.
  48. * Delete dead variables and functions.
  49. * Reorder to remove the need for forward declarations and to consolidate
  50. * related code.
  51. *
  52. * 2005-08-12 Keith Owens <kaos@sgi.com>
  53. * Convert MCA/INIT handlers to use per event stacks and SAL/OS state.
  54. *
  55. * 2005-10-07 Keith Owens <kaos@sgi.com>
  56. * Add notify_die() hooks.
  57. */
  58. #include <linux/types.h>
  59. #include <linux/init.h>
  60. #include <linux/sched.h>
  61. #include <linux/interrupt.h>
  62. #include <linux/irq.h>
  63. #include <linux/smp_lock.h>
  64. #include <linux/bootmem.h>
  65. #include <linux/acpi.h>
  66. #include <linux/timer.h>
  67. #include <linux/module.h>
  68. #include <linux/kernel.h>
  69. #include <linux/smp.h>
  70. #include <linux/workqueue.h>
  71. #include <linux/cpumask.h>
  72. #include <asm/delay.h>
  73. #include <asm/kdebug.h>
  74. #include <asm/machvec.h>
  75. #include <asm/meminit.h>
  76. #include <asm/page.h>
  77. #include <asm/ptrace.h>
  78. #include <asm/system.h>
  79. #include <asm/sal.h>
  80. #include <asm/mca.h>
  81. #include <asm/irq.h>
  82. #include <asm/hw_irq.h>
  83. #include "mca_drv.h"
  84. #include "entry.h"
  85. #if defined(IA64_MCA_DEBUG_INFO)
  86. # define IA64_MCA_DEBUG(fmt...) printk(fmt)
  87. #else
  88. # define IA64_MCA_DEBUG(fmt...)
  89. #endif
  90. /* Used by mca_asm.S */
  91. u32 ia64_mca_serialize;
  92. DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
  93. DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
  94. DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */
  95. DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */
  96. unsigned long __per_cpu_mca[NR_CPUS];
  97. /* In mca_asm.S */
  98. extern void ia64_os_init_dispatch_monarch (void);
  99. extern void ia64_os_init_dispatch_slave (void);
  100. static int monarch_cpu = -1;
  101. static ia64_mc_info_t ia64_mc_info;
  102. #define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
  103. #define MIN_CPE_POLL_INTERVAL (2*60*HZ) /* 2 minutes */
  104. #define CMC_POLL_INTERVAL (1*60*HZ) /* 1 minute */
  105. #define CPE_HISTORY_LENGTH 5
  106. #define CMC_HISTORY_LENGTH 5
  107. static struct timer_list cpe_poll_timer;
  108. static struct timer_list cmc_poll_timer;
  109. /*
  110. * This variable tells whether we are currently in polling mode.
  111. * Start with this in the wrong state so we won't play w/ timers
  112. * before the system is ready.
  113. */
  114. static int cmc_polling_enabled = 1;
  115. /*
  116. * Clearing this variable prevents CPE polling from getting activated
  117. * in mca_late_init. Use it if your system doesn't provide a CPEI,
  118. * but encounters problems retrieving CPE logs. This should only be
  119. * necessary for debugging.
  120. */
  121. static int cpe_poll_enabled = 1;
  122. extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
  123. static int mca_init __initdata;
  124. static void inline
  125. ia64_mca_spin(const char *func)
  126. {
  127. printk(KERN_EMERG "%s: spinning here, not returning to SAL\n", func);
  128. while (1)
  129. cpu_relax();
  130. }
  131. /*
  132. * IA64_MCA log support
  133. */
  134. #define IA64_MAX_LOGS 2 /* Double-buffering for nested MCAs */
  135. #define IA64_MAX_LOG_TYPES 4 /* MCA, INIT, CMC, CPE */
  136. typedef struct ia64_state_log_s
  137. {
  138. spinlock_t isl_lock;
  139. int isl_index;
  140. unsigned long isl_count;
  141. ia64_err_rec_t *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */
  142. } ia64_state_log_t;
  143. static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
  144. #define IA64_LOG_ALLOCATE(it, size) \
  145. {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
  146. (ia64_err_rec_t *)alloc_bootmem(size); \
  147. ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
  148. (ia64_err_rec_t *)alloc_bootmem(size);}
  149. #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
  150. #define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
  151. #define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
  152. #define IA64_LOG_NEXT_INDEX(it) ia64_state_log[it].isl_index
  153. #define IA64_LOG_CURR_INDEX(it) 1 - ia64_state_log[it].isl_index
  154. #define IA64_LOG_INDEX_INC(it) \
  155. {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
  156. ia64_state_log[it].isl_count++;}
  157. #define IA64_LOG_INDEX_DEC(it) \
  158. ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
  159. #define IA64_LOG_NEXT_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
  160. #define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
  161. #define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count
  162. /*
  163. * ia64_log_init
  164. * Reset the OS ia64 log buffer
  165. * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
  166. * Outputs : None
  167. */
  168. static void __init
  169. ia64_log_init(int sal_info_type)
  170. {
  171. u64 max_size = 0;
  172. IA64_LOG_NEXT_INDEX(sal_info_type) = 0;
  173. IA64_LOG_LOCK_INIT(sal_info_type);
  174. // SAL will tell us the maximum size of any error record of this type
  175. max_size = ia64_sal_get_state_info_size(sal_info_type);
  176. if (!max_size)
  177. /* alloc_bootmem() doesn't like zero-sized allocations! */
  178. return;
  179. // set up OS data structures to hold error info
  180. IA64_LOG_ALLOCATE(sal_info_type, max_size);
  181. memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
  182. memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
  183. }
  184. /*
  185. * ia64_log_get
  186. *
  187. * Get the current MCA log from SAL and copy it into the OS log buffer.
  188. *
  189. * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
  190. * irq_safe whether you can use printk at this point
  191. * Outputs : size (total record length)
  192. * *buffer (ptr to error record)
  193. *
  194. */
  195. static u64
  196. ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
  197. {
  198. sal_log_record_header_t *log_buffer;
  199. u64 total_len = 0;
  200. int s;
  201. IA64_LOG_LOCK(sal_info_type);
  202. /* Get the process state information */
  203. log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
  204. total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
  205. if (total_len) {
  206. IA64_LOG_INDEX_INC(sal_info_type);
  207. IA64_LOG_UNLOCK(sal_info_type);
  208. if (irq_safe) {
  209. IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. "
  210. "Record length = %ld\n", __FUNCTION__, sal_info_type, total_len);
  211. }
  212. *buffer = (u8 *) log_buffer;
  213. return total_len;
  214. } else {
  215. IA64_LOG_UNLOCK(sal_info_type);
  216. return 0;
  217. }
  218. }
  219. /*
  220. * ia64_mca_log_sal_error_record
  221. *
  222. * This function retrieves a specified error record type from SAL
  223. * and wakes up any processes waiting for error records.
  224. *
  225. * Inputs : sal_info_type (Type of error record MCA/CMC/CPE)
  226. * FIXME: remove MCA and irq_safe.
  227. */
  228. static void
  229. ia64_mca_log_sal_error_record(int sal_info_type)
  230. {
  231. u8 *buffer;
  232. sal_log_record_header_t *rh;
  233. u64 size;
  234. int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA;
  235. #ifdef IA64_MCA_DEBUG_INFO
  236. static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
  237. #endif
  238. size = ia64_log_get(sal_info_type, &buffer, irq_safe);
  239. if (!size)
  240. return;
  241. salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe);
  242. if (irq_safe)
  243. IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n",
  244. smp_processor_id(),
  245. sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN");
  246. /* Clear logs from corrected errors in case there's no user-level logger */
  247. rh = (sal_log_record_header_t *)buffer;
  248. if (rh->severity == sal_log_severity_corrected)
  249. ia64_sal_clear_state_info(sal_info_type);
  250. }
  251. /*
  252. * search_mca_table
  253. * See if the MCA surfaced in an instruction range
  254. * that has been tagged as recoverable.
  255. *
  256. * Inputs
  257. * first First address range to check
  258. * last Last address range to check
  259. * ip Instruction pointer, address we are looking for
  260. *
  261. * Return value:
  262. * 1 on Success (in the table)/ 0 on Failure (not in the table)
  263. */
  264. int
  265. search_mca_table (const struct mca_table_entry *first,
  266. const struct mca_table_entry *last,
  267. unsigned long ip)
  268. {
  269. const struct mca_table_entry *curr;
  270. u64 curr_start, curr_end;
  271. curr = first;
  272. while (curr <= last) {
  273. curr_start = (u64) &curr->start_addr + curr->start_addr;
  274. curr_end = (u64) &curr->end_addr + curr->end_addr;
  275. if ((ip >= curr_start) && (ip <= curr_end)) {
  276. return 1;
  277. }
  278. curr++;
  279. }
  280. return 0;
  281. }
  282. /* Given an address, look for it in the mca tables. */
  283. int mca_recover_range(unsigned long addr)
  284. {
  285. extern struct mca_table_entry __start___mca_table[];
  286. extern struct mca_table_entry __stop___mca_table[];
  287. return search_mca_table(__start___mca_table, __stop___mca_table-1, addr);
  288. }
  289. EXPORT_SYMBOL_GPL(mca_recover_range);
  290. #ifdef CONFIG_ACPI
  291. int cpe_vector = -1;
  292. int ia64_cpe_irq = -1;
  293. static irqreturn_t
  294. ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
  295. {
  296. static unsigned long cpe_history[CPE_HISTORY_LENGTH];
  297. static int index;
  298. static DEFINE_SPINLOCK(cpe_history_lock);
  299. IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
  300. __FUNCTION__, cpe_irq, smp_processor_id());
  301. /* SAL spec states this should run w/ interrupts enabled */
  302. local_irq_enable();
  303. /* Get the CPE error record and log it */
  304. ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
  305. spin_lock(&cpe_history_lock);
  306. if (!cpe_poll_enabled && cpe_vector >= 0) {
  307. int i, count = 1; /* we know 1 happened now */
  308. unsigned long now = jiffies;
  309. for (i = 0; i < CPE_HISTORY_LENGTH; i++) {
  310. if (now - cpe_history[i] <= HZ)
  311. count++;
  312. }
  313. IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH);
  314. if (count >= CPE_HISTORY_LENGTH) {
  315. cpe_poll_enabled = 1;
  316. spin_unlock(&cpe_history_lock);
  317. disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR));
  318. /*
  319. * Corrected errors will still be corrected, but
  320. * make sure there's a log somewhere that indicates
  321. * something is generating more than we can handle.
  322. */
  323. printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n");
  324. mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL);
  325. /* lock already released, get out now */
  326. return IRQ_HANDLED;
  327. } else {
  328. cpe_history[index++] = now;
  329. if (index == CPE_HISTORY_LENGTH)
  330. index = 0;
  331. }
  332. }
  333. spin_unlock(&cpe_history_lock);
  334. return IRQ_HANDLED;
  335. }
  336. #endif /* CONFIG_ACPI */
  337. #ifdef CONFIG_ACPI
  338. /*
  339. * ia64_mca_register_cpev
  340. *
  341. * Register the corrected platform error vector with SAL.
  342. *
  343. * Inputs
  344. * cpev Corrected Platform Error Vector number
  345. *
  346. * Outputs
  347. * None
  348. */
  349. static void __init
  350. ia64_mca_register_cpev (int cpev)
  351. {
  352. /* Register the CPE interrupt vector with SAL */
  353. struct ia64_sal_retval isrv;
  354. isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0);
  355. if (isrv.status) {
  356. printk(KERN_ERR "Failed to register Corrected Platform "
  357. "Error interrupt vector with SAL (status %ld)\n", isrv.status);
  358. return;
  359. }
  360. IA64_MCA_DEBUG("%s: corrected platform error "
  361. "vector %#x registered\n", __FUNCTION__, cpev);
  362. }
  363. #endif /* CONFIG_ACPI */
  364. /*
  365. * ia64_mca_cmc_vector_setup
  366. *
  367. * Setup the corrected machine check vector register in the processor.
  368. * (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
  369. * This function is invoked on a per-processor basis.
  370. *
  371. * Inputs
  372. * None
  373. *
  374. * Outputs
  375. * None
  376. */
  377. void __cpuinit
  378. ia64_mca_cmc_vector_setup (void)
  379. {
  380. cmcv_reg_t cmcv;
  381. cmcv.cmcv_regval = 0;
  382. cmcv.cmcv_mask = 1; /* Mask/disable interrupt at first */
  383. cmcv.cmcv_vector = IA64_CMC_VECTOR;
  384. ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
  385. IA64_MCA_DEBUG("%s: CPU %d corrected "
  386. "machine check vector %#x registered.\n",
  387. __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);
  388. IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
  389. __FUNCTION__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
  390. }
  391. /*
  392. * ia64_mca_cmc_vector_disable
  393. *
  394. * Mask the corrected machine check vector register in the processor.
  395. * This function is invoked on a per-processor basis.
  396. *
  397. * Inputs
  398. * dummy(unused)
  399. *
  400. * Outputs
  401. * None
  402. */
  403. static void
  404. ia64_mca_cmc_vector_disable (void *dummy)
  405. {
  406. cmcv_reg_t cmcv;
  407. cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
  408. cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
  409. ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
  410. IA64_MCA_DEBUG("%s: CPU %d corrected "
  411. "machine check vector %#x disabled.\n",
  412. __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
  413. }
  414. /*
  415. * ia64_mca_cmc_vector_enable
  416. *
  417. * Unmask the corrected machine check vector register in the processor.
  418. * This function is invoked on a per-processor basis.
  419. *
  420. * Inputs
  421. * dummy(unused)
  422. *
  423. * Outputs
  424. * None
  425. */
  426. static void
  427. ia64_mca_cmc_vector_enable (void *dummy)
  428. {
  429. cmcv_reg_t cmcv;
  430. cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
  431. cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
  432. ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
  433. IA64_MCA_DEBUG("%s: CPU %d corrected "
  434. "machine check vector %#x enabled.\n",
  435. __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
  436. }
  437. /*
  438. * ia64_mca_cmc_vector_disable_keventd
  439. *
  440. * Called via keventd (smp_call_function() is not safe in interrupt context) to
  441. * disable the cmc interrupt vector.
  442. */
  443. static void
  444. ia64_mca_cmc_vector_disable_keventd(void *unused)
  445. {
  446. on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
  447. }
  448. /*
  449. * ia64_mca_cmc_vector_enable_keventd
  450. *
  451. * Called via keventd (smp_call_function() is not safe in interrupt context) to
  452. * enable the cmc interrupt vector.
  453. */
  454. static void
  455. ia64_mca_cmc_vector_enable_keventd(void *unused)
  456. {
  457. on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
  458. }
  459. /*
  460. * ia64_mca_wakeup
  461. *
  462. * Send an inter-cpu interrupt to wake-up a particular cpu
  463. * and mark that cpu to be out of rendez.
  464. *
  465. * Inputs : cpuid
  466. * Outputs : None
  467. */
  468. static void
  469. ia64_mca_wakeup(int cpu)
  470. {
  471. platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
  472. ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
  473. }
  474. /*
  475. * ia64_mca_wakeup_all
  476. *
  477. * Wakeup all the cpus which have rendez'ed previously.
  478. *
  479. * Inputs : None
  480. * Outputs : None
  481. */
  482. static void
  483. ia64_mca_wakeup_all(void)
  484. {
  485. int cpu;
  486. /* Clear the Rendez checkin flag for all cpus */
  487. for_each_online_cpu(cpu) {
  488. if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE)
  489. ia64_mca_wakeup(cpu);
  490. }
  491. }
  492. /*
  493. * ia64_mca_rendez_interrupt_handler
  494. *
  495. * This is handler used to put slave processors into spinloop
  496. * while the monarch processor does the mca handling and later
  497. * wake each slave up once the monarch is done.
  498. *
  499. * Inputs : None
  500. * Outputs : None
  501. */
  502. static irqreturn_t
  503. ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs)
  504. {
  505. unsigned long flags;
  506. int cpu = smp_processor_id();
  507. struct ia64_mca_notify_die nd =
  508. { .sos = NULL, .monarch_cpu = &monarch_cpu };
  509. /* Mask all interrupts */
  510. local_irq_save(flags);
  511. if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", regs, (long)&nd, 0, 0)
  512. == NOTIFY_STOP)
  513. ia64_mca_spin(__FUNCTION__);
  514. ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
  515. /* Register with the SAL monarch that the slave has
  516. * reached SAL
  517. */
  518. ia64_sal_mc_rendez();
  519. if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", regs, (long)&nd, 0, 0)
  520. == NOTIFY_STOP)
  521. ia64_mca_spin(__FUNCTION__);
  522. /* Wait for the monarch cpu to exit. */
  523. while (monarch_cpu != -1)
  524. cpu_relax(); /* spin until monarch leaves */
  525. if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", regs, (long)&nd, 0, 0)
  526. == NOTIFY_STOP)
  527. ia64_mca_spin(__FUNCTION__);
  528. /* Enable all interrupts */
  529. local_irq_restore(flags);
  530. return IRQ_HANDLED;
  531. }
  532. /*
  533. * ia64_mca_wakeup_int_handler
  534. *
  535. * The interrupt handler for processing the inter-cpu interrupt to the
  536. * slave cpu which was spinning in the rendez loop.
  537. * Since this spinning is done by turning off the interrupts and
  538. * polling on the wakeup-interrupt bit in the IRR, there is
  539. * nothing useful to be done in the handler.
  540. *
  541. * Inputs : wakeup_irq (Wakeup-interrupt bit)
  542. * arg (Interrupt handler specific argument)
  543. * ptregs (Exception frame at the time of the interrupt)
  544. * Outputs : None
  545. *
  546. */
  547. static irqreturn_t
  548. ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs)
  549. {
  550. return IRQ_HANDLED;
  551. }
  552. /* Function pointer for extra MCA recovery */
  553. int (*ia64_mca_ucmc_extension)
  554. (void*,struct ia64_sal_os_state*)
  555. = NULL;
  556. int
  557. ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *))
  558. {
  559. if (ia64_mca_ucmc_extension)
  560. return 1;
  561. ia64_mca_ucmc_extension = fn;
  562. return 0;
  563. }
  564. void
  565. ia64_unreg_MCA_extension(void)
  566. {
  567. if (ia64_mca_ucmc_extension)
  568. ia64_mca_ucmc_extension = NULL;
  569. }
  570. EXPORT_SYMBOL(ia64_reg_MCA_extension);
  571. EXPORT_SYMBOL(ia64_unreg_MCA_extension);
  572. static inline void
  573. copy_reg(const u64 *fr, u64 fnat, u64 *tr, u64 *tnat)
  574. {
  575. u64 fslot, tslot, nat;
  576. *tr = *fr;
  577. fslot = ((unsigned long)fr >> 3) & 63;
  578. tslot = ((unsigned long)tr >> 3) & 63;
  579. *tnat &= ~(1UL << tslot);
  580. nat = (fnat >> fslot) & 1;
  581. *tnat |= (nat << tslot);
  582. }
  583. /* Change the comm field on the MCA/INT task to include the pid that
  584. * was interrupted, it makes for easier debugging. If that pid was 0
  585. * (swapper or nested MCA/INIT) then use the start of the previous comm
  586. * field suffixed with its cpu.
  587. */
  588. static void
  589. ia64_mca_modify_comm(const struct task_struct *previous_current)
  590. {
  591. char *p, comm[sizeof(current->comm)];
  592. if (previous_current->pid)
  593. snprintf(comm, sizeof(comm), "%s %d",
  594. current->comm, previous_current->pid);
  595. else {
  596. int l;
  597. if ((p = strchr(previous_current->comm, ' ')))
  598. l = p - previous_current->comm;
  599. else
  600. l = strlen(previous_current->comm);
  601. snprintf(comm, sizeof(comm), "%s %*s %d",
  602. current->comm, l, previous_current->comm,
  603. task_thread_info(previous_current)->cpu);
  604. }
  605. memcpy(current->comm, comm, sizeof(current->comm));
  606. }
  607. /* On entry to this routine, we are running on the per cpu stack, see
  608. * mca_asm.h. The original stack has not been touched by this event. Some of
  609. * the original stack's registers will be in the RBS on this stack. This stack
  610. * also contains a partial pt_regs and switch_stack, the rest of the data is in
  611. * PAL minstate.
  612. *
  613. * The first thing to do is modify the original stack to look like a blocked
  614. * task so we can run backtrace on the original task. Also mark the per cpu
  615. * stack as current to ensure that we use the correct task state, it also means
  616. * that we can do backtrace on the MCA/INIT handler code itself.
  617. */
  618. static struct task_struct *
  619. ia64_mca_modify_original_stack(struct pt_regs *regs,
  620. const struct switch_stack *sw,
  621. struct ia64_sal_os_state *sos,
  622. const char *type)
  623. {
  624. char *p;
  625. ia64_va va;
  626. extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */
  627. const pal_min_state_area_t *ms = sos->pal_min_state;
  628. struct task_struct *previous_current;
  629. struct pt_regs *old_regs;
  630. struct switch_stack *old_sw;
  631. unsigned size = sizeof(struct pt_regs) +
  632. sizeof(struct switch_stack) + 16;
  633. u64 *old_bspstore, *old_bsp;
  634. u64 *new_bspstore, *new_bsp;
  635. u64 old_unat, old_rnat, new_rnat, nat;
  636. u64 slots, loadrs = regs->loadrs;
  637. u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1];
  638. u64 ar_bspstore = regs->ar_bspstore;
  639. u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16);
  640. const u64 *bank;
  641. const char *msg;
  642. int cpu = smp_processor_id();
  643. previous_current = curr_task(cpu);
  644. set_curr_task(cpu, current);
  645. if ((p = strchr(current->comm, ' ')))
  646. *p = '\0';
  647. /* Best effort attempt to cope with MCA/INIT delivered while in
  648. * physical mode.
  649. */
  650. regs->cr_ipsr = ms->pmsa_ipsr;
  651. if (ia64_psr(regs)->dt == 0) {
  652. va.l = r12;
  653. if (va.f.reg == 0) {
  654. va.f.reg = 7;
  655. r12 = va.l;
  656. }
  657. va.l = r13;
  658. if (va.f.reg == 0) {
  659. va.f.reg = 7;
  660. r13 = va.l;
  661. }
  662. }
  663. if (ia64_psr(regs)->rt == 0) {
  664. va.l = ar_bspstore;
  665. if (va.f.reg == 0) {
  666. va.f.reg = 7;
  667. ar_bspstore = va.l;
  668. }
  669. va.l = ar_bsp;
  670. if (va.f.reg == 0) {
  671. va.f.reg = 7;
  672. ar_bsp = va.l;
  673. }
  674. }
  675. /* mca_asm.S ia64_old_stack() cannot assume that the dirty registers
  676. * have been copied to the old stack, the old stack may fail the
  677. * validation tests below. So ia64_old_stack() must restore the dirty
  678. * registers from the new stack. The old and new bspstore probably
  679. * have different alignments, so loadrs calculated on the old bsp
  680. * cannot be used to restore from the new bsp. Calculate a suitable
  681. * loadrs for the new stack and save it in the new pt_regs, where
  682. * ia64_old_stack() can get it.
  683. */
  684. old_bspstore = (u64 *)ar_bspstore;
  685. old_bsp = (u64 *)ar_bsp;
  686. slots = ia64_rse_num_regs(old_bspstore, old_bsp);
  687. new_bspstore = (u64 *)((u64)current + IA64_RBS_OFFSET);
  688. new_bsp = ia64_rse_skip_regs(new_bspstore, slots);
  689. regs->loadrs = (new_bsp - new_bspstore) * 8 << 16;
  690. /* Verify the previous stack state before we change it */
  691. if (user_mode(regs)) {
  692. msg = "occurred in user space";
  693. /* previous_current is guaranteed to be valid when the task was
  694. * in user space, so ...
  695. */
  696. ia64_mca_modify_comm(previous_current);
  697. goto no_mod;
  698. }
  699. if (!mca_recover_range(ms->pmsa_iip)) {
  700. if (r13 != sos->prev_IA64_KR_CURRENT) {
  701. msg = "inconsistent previous current and r13";
  702. goto no_mod;
  703. }
  704. if ((r12 - r13) >= KERNEL_STACK_SIZE) {
  705. msg = "inconsistent r12 and r13";
  706. goto no_mod;
  707. }
  708. if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) {
  709. msg = "inconsistent ar.bspstore and r13";
  710. goto no_mod;
  711. }
  712. va.p = old_bspstore;
  713. if (va.f.reg < 5) {
  714. msg = "old_bspstore is in the wrong region";
  715. goto no_mod;
  716. }
  717. if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) {
  718. msg = "inconsistent ar.bsp and r13";
  719. goto no_mod;
  720. }
  721. size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8;
  722. if (ar_bspstore + size > r12) {
  723. msg = "no room for blocked state";
  724. goto no_mod;
  725. }
  726. }
  727. ia64_mca_modify_comm(previous_current);
  728. /* Make the original task look blocked. First stack a struct pt_regs,
  729. * describing the state at the time of interrupt. mca_asm.S built a
  730. * partial pt_regs, copy it and fill in the blanks using minstate.
  731. */
  732. p = (char *)r12 - sizeof(*regs);
  733. old_regs = (struct pt_regs *)p;
  734. memcpy(old_regs, regs, sizeof(*regs));
  735. /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
  736. * pmsa_{xip,xpsr,xfs}
  737. */
  738. if (ia64_psr(regs)->ic) {
  739. old_regs->cr_iip = ms->pmsa_iip;
  740. old_regs->cr_ipsr = ms->pmsa_ipsr;
  741. old_regs->cr_ifs = ms->pmsa_ifs;
  742. } else {
  743. old_regs->cr_iip = ms->pmsa_xip;
  744. old_regs->cr_ipsr = ms->pmsa_xpsr;
  745. old_regs->cr_ifs = ms->pmsa_xfs;
  746. }
  747. old_regs->pr = ms->pmsa_pr;
  748. old_regs->b0 = ms->pmsa_br0;
  749. old_regs->loadrs = loadrs;
  750. old_regs->ar_rsc = ms->pmsa_rsc;
  751. old_unat = old_regs->ar_unat;
  752. copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &old_regs->r1, &old_unat);
  753. copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &old_regs->r2, &old_unat);
  754. copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &old_regs->r3, &old_unat);
  755. copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &old_regs->r8, &old_unat);
  756. copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &old_regs->r9, &old_unat);
  757. copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &old_regs->r10, &old_unat);
  758. copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &old_regs->r11, &old_unat);
  759. copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &old_regs->r12, &old_unat);
  760. copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &old_regs->r13, &old_unat);
  761. copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &old_regs->r14, &old_unat);
  762. copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &old_regs->r15, &old_unat);
  763. if (ia64_psr(old_regs)->bn)
  764. bank = ms->pmsa_bank1_gr;
  765. else
  766. bank = ms->pmsa_bank0_gr;
  767. copy_reg(&bank[16-16], ms->pmsa_nat_bits, &old_regs->r16, &old_unat);
  768. copy_reg(&bank[17-16], ms->pmsa_nat_bits, &old_regs->r17, &old_unat);
  769. copy_reg(&bank[18-16], ms->pmsa_nat_bits, &old_regs->r18, &old_unat);
  770. copy_reg(&bank[19-16], ms->pmsa_nat_bits, &old_regs->r19, &old_unat);
  771. copy_reg(&bank[20-16], ms->pmsa_nat_bits, &old_regs->r20, &old_unat);
  772. copy_reg(&bank[21-16], ms->pmsa_nat_bits, &old_regs->r21, &old_unat);
  773. copy_reg(&bank[22-16], ms->pmsa_nat_bits, &old_regs->r22, &old_unat);
  774. copy_reg(&bank[23-16], ms->pmsa_nat_bits, &old_regs->r23, &old_unat);
  775. copy_reg(&bank[24-16], ms->pmsa_nat_bits, &old_regs->r24, &old_unat);
  776. copy_reg(&bank[25-16], ms->pmsa_nat_bits, &old_regs->r25, &old_unat);
  777. copy_reg(&bank[26-16], ms->pmsa_nat_bits, &old_regs->r26, &old_unat);
  778. copy_reg(&bank[27-16], ms->pmsa_nat_bits, &old_regs->r27, &old_unat);
  779. copy_reg(&bank[28-16], ms->pmsa_nat_bits, &old_regs->r28, &old_unat);
  780. copy_reg(&bank[29-16], ms->pmsa_nat_bits, &old_regs->r29, &old_unat);
  781. copy_reg(&bank[30-16], ms->pmsa_nat_bits, &old_regs->r30, &old_unat);
  782. copy_reg(&bank[31-16], ms->pmsa_nat_bits, &old_regs->r31, &old_unat);
  783. /* Next stack a struct switch_stack. mca_asm.S built a partial
  784. * switch_stack, copy it and fill in the blanks using pt_regs and
  785. * minstate.
  786. *
  787. * In the synthesized switch_stack, b0 points to ia64_leave_kernel,
  788. * ar.pfs is set to 0.
  789. *
  790. * unwind.c::unw_unwind() does special processing for interrupt frames.
  791. * It checks if the PRED_NON_SYSCALL predicate is set, if the predicate
  792. * is clear then unw_unwind() does _not_ adjust bsp over pt_regs. Not
  793. * that this is documented, of course. Set PRED_NON_SYSCALL in the
  794. * switch_stack on the original stack so it will unwind correctly when
  795. * unwind.c reads pt_regs.
  796. *
  797. * thread.ksp is updated to point to the synthesized switch_stack.
  798. */
  799. p -= sizeof(struct switch_stack);
  800. old_sw = (struct switch_stack *)p;
  801. memcpy(old_sw, sw, sizeof(*sw));
  802. old_sw->caller_unat = old_unat;
  803. old_sw->ar_fpsr = old_regs->ar_fpsr;
  804. copy_reg(&ms->pmsa_gr[4-1], ms->pmsa_nat_bits, &old_sw->r4, &old_unat);
  805. copy_reg(&ms->pmsa_gr[5-1], ms->pmsa_nat_bits, &old_sw->r5, &old_unat);
  806. copy_reg(&ms->pmsa_gr[6-1], ms->pmsa_nat_bits, &old_sw->r6, &old_unat);
  807. copy_reg(&ms->pmsa_gr[7-1], ms->pmsa_nat_bits, &old_sw->r7, &old_unat);
  808. old_sw->b0 = (u64)ia64_leave_kernel;
  809. old_sw->b1 = ms->pmsa_br1;
  810. old_sw->ar_pfs = 0;
  811. old_sw->ar_unat = old_unat;
  812. old_sw->pr = old_regs->pr | (1UL << PRED_NON_SYSCALL);
  813. previous_current->thread.ksp = (u64)p - 16;
  814. /* Finally copy the original stack's registers back to its RBS.
  815. * Registers from ar.bspstore through ar.bsp at the time of the event
  816. * are in the current RBS, copy them back to the original stack. The
  817. * copy must be done register by register because the original bspstore
  818. * and the current one have different alignments, so the saved RNAT
  819. * data occurs at different places.
  820. *
  821. * mca_asm does cover, so the old_bsp already includes all registers at
  822. * the time of MCA/INIT. It also does flushrs, so all registers before
  823. * this function have been written to backing store on the MCA/INIT
  824. * stack.
  825. */
  826. new_rnat = ia64_get_rnat(ia64_rse_rnat_addr(new_bspstore));
  827. old_rnat = regs->ar_rnat;
  828. while (slots--) {
  829. if (ia64_rse_is_rnat_slot(new_bspstore)) {
  830. new_rnat = ia64_get_rnat(new_bspstore++);
  831. }
  832. if (ia64_rse_is_rnat_slot(old_bspstore)) {
  833. *old_bspstore++ = old_rnat;
  834. old_rnat = 0;
  835. }
  836. nat = (new_rnat >> ia64_rse_slot_num(new_bspstore)) & 1UL;
  837. old_rnat &= ~(1UL << ia64_rse_slot_num(old_bspstore));
  838. old_rnat |= (nat << ia64_rse_slot_num(old_bspstore));
  839. *old_bspstore++ = *new_bspstore++;
  840. }
  841. old_sw->ar_bspstore = (unsigned long)old_bspstore;
  842. old_sw->ar_rnat = old_rnat;
  843. sos->prev_task = previous_current;
  844. return previous_current;
  845. no_mod:
  846. printk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
  847. smp_processor_id(), type, msg);
  848. return previous_current;
  849. }
  850. /* The monarch/slave interaction is based on monarch_cpu and requires that all
  851. * slaves have entered rendezvous before the monarch leaves. If any cpu has
  852. * not entered rendezvous yet then wait a bit. The assumption is that any
  853. * slave that has not rendezvoused after a reasonable time is never going to do
  854. * so. In this context, slave includes cpus that respond to the MCA rendezvous
  855. * interrupt, as well as cpus that receive the INIT slave event.
  856. */
  857. static void
  858. ia64_wait_for_slaves(int monarch, const char *type)
  859. {
  860. int c, wait = 0, missing = 0;
  861. for_each_online_cpu(c) {
  862. if (c == monarch)
  863. continue;
  864. if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) {
  865. udelay(1000); /* short wait first */
  866. wait = 1;
  867. break;
  868. }
  869. }
  870. if (!wait)
  871. goto all_in;
  872. for_each_online_cpu(c) {
  873. if (c == monarch)
  874. continue;
  875. if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) {
  876. udelay(5*1000000); /* wait 5 seconds for slaves (arbitrary) */
  877. if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE)
  878. missing = 1;
  879. break;
  880. }
  881. }
  882. if (!missing)
  883. goto all_in;
  884. printk(KERN_INFO "OS %s slave did not rendezvous on cpu", type);
  885. for_each_online_cpu(c) {
  886. if (c == monarch)
  887. continue;
  888. if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE)
  889. printk(" %d", c);
  890. }
  891. printk("\n");
  892. return;
  893. all_in:
  894. printk(KERN_INFO "All OS %s slaves have reached rendezvous\n", type);
  895. return;
  896. }
  897. /*
  898. * ia64_mca_handler
  899. *
  900. * This is uncorrectable machine check handler called from OS_MCA
  901. * dispatch code which is in turn called from SAL_CHECK().
  902. * This is the place where the core of OS MCA handling is done.
  903. * Right now the logs are extracted and displayed in a well-defined
  904. * format. This handler code is supposed to be run only on the
  905. * monarch processor. Once the monarch is done with MCA handling
  906. * further MCA logging is enabled by clearing logs.
  907. * Monarch also has the duty of sending wakeup-IPIs to pull the
  908. * slave processors out of rendezvous spinloop.
  909. */
  910. void
  911. ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
  912. struct ia64_sal_os_state *sos)
  913. {
  914. pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
  915. &sos->proc_state_param;
  916. int recover, cpu = smp_processor_id();
  917. struct task_struct *previous_current;
  918. struct ia64_mca_notify_die nd =
  919. { .sos = sos, .monarch_cpu = &monarch_cpu };
  920. oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */
  921. console_loglevel = 15; /* make sure printks make it to console */
  922. printk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d monarch=%ld\n",
  923. sos->proc_state_param, cpu, sos->monarch);
  924. previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
  925. monarch_cpu = cpu;
  926. if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)
  927. == NOTIFY_STOP)
  928. ia64_mca_spin(__FUNCTION__);
  929. ia64_wait_for_slaves(cpu, "MCA");
  930. /* Wakeup all the processors which are spinning in the rendezvous loop.
  931. * They will leave SAL, then spin in the OS with interrupts disabled
  932. * until this monarch cpu leaves the MCA handler. That gets control
  933. * back to the OS so we can backtrace the other cpus, backtrace when
  934. * spinning in SAL does not work.
  935. */
  936. ia64_mca_wakeup_all();
  937. if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
  938. == NOTIFY_STOP)
  939. ia64_mca_spin(__FUNCTION__);
  940. /* Get the MCA error record and log it */
  941. ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
  942. /* TLB error is only exist in this SAL error record */
  943. recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))
  944. /* other error recovery */
  945. || (ia64_mca_ucmc_extension
  946. && ia64_mca_ucmc_extension(
  947. IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
  948. sos));
  949. if (recover) {
  950. sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
  951. rh->severity = sal_log_severity_corrected;
  952. ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
  953. sos->os_status = IA64_MCA_CORRECTED;
  954. }
  955. if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
  956. == NOTIFY_STOP)
  957. ia64_mca_spin(__FUNCTION__);
  958. set_curr_task(cpu, previous_current);
  959. monarch_cpu = -1;
  960. }
  961. static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);
  962. static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
  963. /*
  964. * ia64_mca_cmc_int_handler
  965. *
  966. * This is corrected machine check interrupt handler.
  967. * Right now the logs are extracted and displayed in a well-defined
  968. * format.
  969. *
  970. * Inputs
  971. * interrupt number
  972. * client data arg ptr
  973. * saved registers ptr
  974. *
  975. * Outputs
  976. * None
  977. */
  978. static irqreturn_t
  979. ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
  980. {
  981. static unsigned long cmc_history[CMC_HISTORY_LENGTH];
  982. static int index;
  983. static DEFINE_SPINLOCK(cmc_history_lock);
  984. IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
  985. __FUNCTION__, cmc_irq, smp_processor_id());
  986. /* SAL spec states this should run w/ interrupts enabled */
  987. local_irq_enable();
  988. /* Get the CMC error record and log it */
  989. ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
  990. spin_lock(&cmc_history_lock);
  991. if (!cmc_polling_enabled) {
  992. int i, count = 1; /* we know 1 happened now */
  993. unsigned long now = jiffies;
  994. for (i = 0; i < CMC_HISTORY_LENGTH; i++) {
  995. if (now - cmc_history[i] <= HZ)
  996. count++;
  997. }
  998. IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH);
  999. if (count >= CMC_HISTORY_LENGTH) {
  1000. cmc_polling_enabled = 1;
  1001. spin_unlock(&cmc_history_lock);
  1002. /* If we're being hit with CMC interrupts, we won't
  1003. * ever execute the schedule_work() below. Need to
  1004. * disable CMC interrupts on this processor now.
  1005. */
  1006. ia64_mca_cmc_vector_disable(NULL);
  1007. schedule_work(&cmc_disable_work);
  1008. /*
  1009. * Corrected errors will still be corrected, but
  1010. * make sure there's a log somewhere that indicates
  1011. * something is generating more than we can handle.
  1012. */
  1013. printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n");
  1014. mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
  1015. /* lock already released, get out now */
  1016. return IRQ_HANDLED;
  1017. } else {
  1018. cmc_history[index++] = now;
  1019. if (index == CMC_HISTORY_LENGTH)
  1020. index = 0;
  1021. }
  1022. }
  1023. spin_unlock(&cmc_history_lock);
  1024. return IRQ_HANDLED;
  1025. }
  1026. /*
  1027. * ia64_mca_cmc_int_caller
  1028. *
  1029. * Triggered by sw interrupt from CMC polling routine. Calls
  1030. * real interrupt handler and either triggers a sw interrupt
  1031. * on the next cpu or does cleanup at the end.
  1032. *
  1033. * Inputs
  1034. * interrupt number
  1035. * client data arg ptr
  1036. * saved registers ptr
  1037. * Outputs
  1038. * handled
  1039. */
  1040. static irqreturn_t
  1041. ia64_mca_cmc_int_caller(int cmc_irq, void *arg, struct pt_regs *ptregs)
  1042. {
  1043. static int start_count = -1;
  1044. unsigned int cpuid;
  1045. cpuid = smp_processor_id();
  1046. /* If first cpu, update count */
  1047. if (start_count == -1)
  1048. start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
  1049. ia64_mca_cmc_int_handler(cmc_irq, arg, ptregs);
  1050. for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
  1051. if (cpuid < NR_CPUS) {
  1052. platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
  1053. } else {
  1054. /* If no log record, switch out of polling mode */
  1055. if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
  1056. printk(KERN_WARNING "Returning to interrupt driven CMC handler\n");
  1057. schedule_work(&cmc_enable_work);
  1058. cmc_polling_enabled = 0;
  1059. } else {
  1060. mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
  1061. }
  1062. start_count = -1;
  1063. }
  1064. return IRQ_HANDLED;
  1065. }
  1066. /*
  1067. * ia64_mca_cmc_poll
  1068. *
  1069. * Poll for Corrected Machine Checks (CMCs)
  1070. *
  1071. * Inputs : dummy(unused)
  1072. * Outputs : None
  1073. *
  1074. */
  1075. static void
  1076. ia64_mca_cmc_poll (unsigned long dummy)
  1077. {
  1078. /* Trigger a CMC interrupt cascade */
  1079. platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
  1080. }
  1081. /*
  1082. * ia64_mca_cpe_int_caller
  1083. *
  1084. * Triggered by sw interrupt from CPE polling routine. Calls
  1085. * real interrupt handler and either triggers a sw interrupt
  1086. * on the next cpu or does cleanup at the end.
  1087. *
  1088. * Inputs
  1089. * interrupt number
  1090. * client data arg ptr
  1091. * saved registers ptr
  1092. * Outputs
  1093. * handled
  1094. */
  1095. #ifdef CONFIG_ACPI
  1096. static irqreturn_t
  1097. ia64_mca_cpe_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
  1098. {
  1099. static int start_count = -1;
  1100. static int poll_time = MIN_CPE_POLL_INTERVAL;
  1101. unsigned int cpuid;
  1102. cpuid = smp_processor_id();
  1103. /* If first cpu, update count */
  1104. if (start_count == -1)
  1105. start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
  1106. ia64_mca_cpe_int_handler(cpe_irq, arg, ptregs);
  1107. for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
  1108. if (cpuid < NR_CPUS) {
  1109. platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
  1110. } else {
  1111. /*
  1112. * If a log was recorded, increase our polling frequency,
  1113. * otherwise, backoff or return to interrupt mode.
  1114. */
  1115. if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) {
  1116. poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2);
  1117. } else if (cpe_vector < 0) {
  1118. poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2);
  1119. } else {
  1120. poll_time = MIN_CPE_POLL_INTERVAL;
  1121. printk(KERN_WARNING "Returning to interrupt driven CPE handler\n");
  1122. enable_irq(local_vector_to_irq(IA64_CPE_VECTOR));
  1123. cpe_poll_enabled = 0;
  1124. }
  1125. if (cpe_poll_enabled)
  1126. mod_timer(&cpe_poll_timer, jiffies + poll_time);
  1127. start_count = -1;
  1128. }
  1129. return IRQ_HANDLED;
  1130. }
  1131. /*
  1132. * ia64_mca_cpe_poll
  1133. *
  1134. * Poll for Corrected Platform Errors (CPEs), trigger interrupt
  1135. * on first cpu, from there it will trickle through all the cpus.
  1136. *
  1137. * Inputs : dummy(unused)
  1138. * Outputs : None
  1139. *
  1140. */
  1141. static void
  1142. ia64_mca_cpe_poll (unsigned long dummy)
  1143. {
  1144. /* Trigger a CPE interrupt cascade */
  1145. platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
  1146. }
  1147. #endif /* CONFIG_ACPI */
  1148. static int
  1149. default_monarch_init_process(struct notifier_block *self, unsigned long val, void *data)
  1150. {
  1151. int c;
  1152. struct task_struct *g, *t;
  1153. if (val != DIE_INIT_MONARCH_PROCESS)
  1154. return NOTIFY_DONE;
  1155. printk(KERN_ERR "Processes interrupted by INIT -");
  1156. for_each_online_cpu(c) {
  1157. struct ia64_sal_os_state *s;
  1158. t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET);
  1159. s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET);
  1160. g = s->prev_task;
  1161. if (g) {
  1162. if (g->pid)
  1163. printk(" %d", g->pid);
  1164. else
  1165. printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g);
  1166. }
  1167. }
  1168. printk("\n\n");
  1169. if (read_trylock(&tasklist_lock)) {
  1170. do_each_thread (g, t) {
  1171. printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
  1172. show_stack(t, NULL);
  1173. } while_each_thread (g, t);
  1174. read_unlock(&tasklist_lock);
  1175. }
  1176. return NOTIFY_DONE;
  1177. }
  1178. /*
  1179. * C portion of the OS INIT handler
  1180. *
  1181. * Called from ia64_os_init_dispatch
  1182. *
  1183. * Inputs: pointer to pt_regs where processor info was saved. SAL/OS state for
  1184. * this event. This code is used for both monarch and slave INIT events, see
  1185. * sos->monarch.
  1186. *
  1187. * All INIT events switch to the INIT stack and change the previous process to
  1188. * blocked status. If one of the INIT events is the monarch then we are
  1189. * probably processing the nmi button/command. Use the monarch cpu to dump all
  1190. * the processes. The slave INIT events all spin until the monarch cpu
  1191. * returns. We can also get INIT slave events for MCA, in which case the MCA
  1192. * process is the monarch.
  1193. */
  1194. void
  1195. ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
  1196. struct ia64_sal_os_state *sos)
  1197. {
  1198. static atomic_t slaves;
  1199. static atomic_t monarchs;
  1200. struct task_struct *previous_current;
  1201. int cpu = smp_processor_id();
  1202. struct ia64_mca_notify_die nd =
  1203. { .sos = sos, .monarch_cpu = &monarch_cpu };
  1204. oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */
  1205. console_loglevel = 15; /* make sure printks make it to console */
  1206. (void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0);
  1207. printk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
  1208. sos->proc_state_param, cpu, sos->monarch);
  1209. salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0);
  1210. previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "INIT");
  1211. sos->os_status = IA64_INIT_RESUME;
  1212. /* FIXME: Workaround for broken proms that drive all INIT events as
  1213. * slaves. The last slave that enters is promoted to be a monarch.
  1214. * Remove this code in September 2006, that gives platforms a year to
  1215. * fix their proms and get their customers updated.
  1216. */
  1217. if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) {
  1218. printk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n",
  1219. __FUNCTION__, cpu);
  1220. atomic_dec(&slaves);
  1221. sos->monarch = 1;
  1222. }
  1223. /* FIXME: Workaround for broken proms that drive all INIT events as
  1224. * monarchs. Second and subsequent monarchs are demoted to slaves.
  1225. * Remove this code in September 2006, that gives platforms a year to
  1226. * fix their proms and get their customers updated.
  1227. */
  1228. if (sos->monarch && atomic_add_return(1, &monarchs) > 1) {
  1229. printk(KERN_WARNING "%s: Demoting cpu %d to slave.\n",
  1230. __FUNCTION__, cpu);
  1231. atomic_dec(&monarchs);
  1232. sos->monarch = 0;
  1233. }
  1234. if (!sos->monarch) {
  1235. ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
  1236. while (monarch_cpu == -1)
  1237. cpu_relax(); /* spin until monarch enters */
  1238. if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0)
  1239. == NOTIFY_STOP)
  1240. ia64_mca_spin(__FUNCTION__);
  1241. if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0)
  1242. == NOTIFY_STOP)
  1243. ia64_mca_spin(__FUNCTION__);
  1244. while (monarch_cpu != -1)
  1245. cpu_relax(); /* spin until monarch leaves */
  1246. if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0)
  1247. == NOTIFY_STOP)
  1248. ia64_mca_spin(__FUNCTION__);
  1249. printk("Slave on cpu %d returning to normal service.\n", cpu);
  1250. set_curr_task(cpu, previous_current);
  1251. ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
  1252. atomic_dec(&slaves);
  1253. return;
  1254. }
  1255. monarch_cpu = cpu;
  1256. if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0)
  1257. == NOTIFY_STOP)
  1258. ia64_mca_spin(__FUNCTION__);
  1259. /*
  1260. * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
  1261. * generated via the BMC's command-line interface, but since the console is on the
  1262. * same serial line, the user will need some time to switch out of the BMC before
  1263. * the dump begins.
  1264. */
  1265. printk("Delaying for 5 seconds...\n");
  1266. udelay(5*1000000);
  1267. ia64_wait_for_slaves(cpu, "INIT");
  1268. /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through
  1269. * to default_monarch_init_process() above and just print all the
  1270. * tasks.
  1271. */
  1272. if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0)
  1273. == NOTIFY_STOP)
  1274. ia64_mca_spin(__FUNCTION__);
  1275. if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
  1276. == NOTIFY_STOP)
  1277. ia64_mca_spin(__FUNCTION__);
  1278. printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
  1279. atomic_dec(&monarchs);
  1280. set_curr_task(cpu, previous_current);
  1281. monarch_cpu = -1;
  1282. return;
  1283. }
  1284. static int __init
  1285. ia64_mca_disable_cpe_polling(char *str)
  1286. {
  1287. cpe_poll_enabled = 0;
  1288. return 1;
  1289. }
  1290. __setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);
  1291. static struct irqaction cmci_irqaction = {
  1292. .handler = ia64_mca_cmc_int_handler,
  1293. .flags = IRQF_DISABLED,
  1294. .name = "cmc_hndlr"
  1295. };
  1296. static struct irqaction cmcp_irqaction = {
  1297. .handler = ia64_mca_cmc_int_caller,
  1298. .flags = IRQF_DISABLED,
  1299. .name = "cmc_poll"
  1300. };
  1301. static struct irqaction mca_rdzv_irqaction = {
  1302. .handler = ia64_mca_rendez_int_handler,
  1303. .flags = IRQF_DISABLED,
  1304. .name = "mca_rdzv"
  1305. };
  1306. static struct irqaction mca_wkup_irqaction = {
  1307. .handler = ia64_mca_wakeup_int_handler,
  1308. .flags = IRQF_DISABLED,
  1309. .name = "mca_wkup"
  1310. };
  1311. #ifdef CONFIG_ACPI
  1312. static struct irqaction mca_cpe_irqaction = {
  1313. .handler = ia64_mca_cpe_int_handler,
  1314. .flags = IRQF_DISABLED,
  1315. .name = "cpe_hndlr"
  1316. };
  1317. static struct irqaction mca_cpep_irqaction = {
  1318. .handler = ia64_mca_cpe_int_caller,
  1319. .flags = IRQF_DISABLED,
  1320. .name = "cpe_poll"
  1321. };
  1322. #endif /* CONFIG_ACPI */
  1323. /* Minimal format of the MCA/INIT stacks. The pseudo processes that run on
  1324. * these stacks can never sleep, they cannot return from the kernel to user
  1325. * space, they do not appear in a normal ps listing. So there is no need to
  1326. * format most of the fields.
  1327. */
  1328. static void __cpuinit
  1329. format_mca_init_stack(void *mca_data, unsigned long offset,
  1330. const char *type, int cpu)
  1331. {
  1332. struct task_struct *p = (struct task_struct *)((char *)mca_data + offset);
  1333. struct thread_info *ti;
  1334. memset(p, 0, KERNEL_STACK_SIZE);
  1335. ti = task_thread_info(p);
  1336. ti->flags = _TIF_MCA_INIT;
  1337. ti->preempt_count = 1;
  1338. ti->task = p;
  1339. ti->cpu = cpu;
  1340. p->thread_info = ti;
  1341. p->state = TASK_UNINTERRUPTIBLE;
  1342. cpu_set(cpu, p->cpus_allowed);
  1343. INIT_LIST_HEAD(&p->tasks);
  1344. p->parent = p->real_parent = p->group_leader = p;
  1345. INIT_LIST_HEAD(&p->children);
  1346. INIT_LIST_HEAD(&p->sibling);
  1347. strncpy(p->comm, type, sizeof(p->comm)-1);
  1348. }
  1349. /* Do per-CPU MCA-related initialization. */
  1350. void __cpuinit
  1351. ia64_mca_cpu_init(void *cpu_data)
  1352. {
  1353. void *pal_vaddr;
  1354. static int first_time = 1;
  1355. if (first_time) {
  1356. void *mca_data;
  1357. int cpu;
  1358. first_time = 0;
  1359. mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu)
  1360. * NR_CPUS + KERNEL_STACK_SIZE);
  1361. mca_data = (void *)(((unsigned long)mca_data +
  1362. KERNEL_STACK_SIZE - 1) &
  1363. (-KERNEL_STACK_SIZE));
  1364. for (cpu = 0; cpu < NR_CPUS; cpu++) {
  1365. format_mca_init_stack(mca_data,
  1366. offsetof(struct ia64_mca_cpu, mca_stack),
  1367. "MCA", cpu);
  1368. format_mca_init_stack(mca_data,
  1369. offsetof(struct ia64_mca_cpu, init_stack),
  1370. "INIT", cpu);
  1371. __per_cpu_mca[cpu] = __pa(mca_data);
  1372. mca_data += sizeof(struct ia64_mca_cpu);
  1373. }
  1374. }
  1375. /*
  1376. * The MCA info structure was allocated earlier and its
  1377. * physical address saved in __per_cpu_mca[cpu]. Copy that
  1378. * address * to ia64_mca_data so we can access it as a per-CPU
  1379. * variable.
  1380. */
  1381. __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()];
  1382. /*
  1383. * Stash away a copy of the PTE needed to map the per-CPU page.
  1384. * We may need it during MCA recovery.
  1385. */
  1386. __get_cpu_var(ia64_mca_per_cpu_pte) =
  1387. pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL));
  1388. /*
  1389. * Also, stash away a copy of the PAL address and the PTE
  1390. * needed to map it.
  1391. */
  1392. pal_vaddr = efi_get_pal_addr();
  1393. if (!pal_vaddr)
  1394. return;
  1395. __get_cpu_var(ia64_mca_pal_base) =
  1396. GRANULEROUNDDOWN((unsigned long) pal_vaddr);
  1397. __get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr),
  1398. PAGE_KERNEL));
  1399. }
  1400. /*
  1401. * ia64_mca_init
  1402. *
  1403. * Do all the system level mca specific initialization.
  1404. *
  1405. * 1. Register spinloop and wakeup request interrupt vectors
  1406. *
  1407. * 2. Register OS_MCA handler entry point
  1408. *
  1409. * 3. Register OS_INIT handler entry point
  1410. *
  1411. * 4. Initialize MCA/CMC/INIT related log buffers maintained by the OS.
  1412. *
  1413. * Note that this initialization is done very early before some kernel
  1414. * services are available.
  1415. *
  1416. * Inputs : None
  1417. *
  1418. * Outputs : None
  1419. */
  1420. void __init
  1421. ia64_mca_init(void)
  1422. {
  1423. ia64_fptr_t *init_hldlr_ptr_monarch = (ia64_fptr_t *)ia64_os_init_dispatch_monarch;
  1424. ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave;
  1425. ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
  1426. int i;
  1427. s64 rc;
  1428. struct ia64_sal_retval isrv;
  1429. u64 timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */
  1430. static struct notifier_block default_init_monarch_nb = {
  1431. .notifier_call = default_monarch_init_process,
  1432. .priority = 0/* we need to notified last */
  1433. };
  1434. IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
  1435. /* Clear the Rendez checkin flag for all cpus */
  1436. for(i = 0 ; i < NR_CPUS; i++)
  1437. ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
  1438. /*
  1439. * Register the rendezvous spinloop and wakeup mechanism with SAL
  1440. */
  1441. /* Register the rendezvous interrupt vector with SAL */
  1442. while (1) {
  1443. isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT,
  1444. SAL_MC_PARAM_MECHANISM_INT,
  1445. IA64_MCA_RENDEZ_VECTOR,
  1446. timeout,
  1447. SAL_MC_PARAM_RZ_ALWAYS);
  1448. rc = isrv.status;
  1449. if (rc == 0)
  1450. break;
  1451. if (rc == -2) {
  1452. printk(KERN_INFO "Increasing MCA rendezvous timeout from "
  1453. "%ld to %ld milliseconds\n", timeout, isrv.v0);
  1454. timeout = isrv.v0;
  1455. (void) notify_die(DIE_MCA_NEW_TIMEOUT, "MCA", NULL, timeout, 0, 0);
  1456. continue;
  1457. }
  1458. printk(KERN_ERR "Failed to register rendezvous interrupt "
  1459. "with SAL (status %ld)\n", rc);
  1460. return;
  1461. }
  1462. /* Register the wakeup interrupt vector with SAL */
  1463. isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP,
  1464. SAL_MC_PARAM_MECHANISM_INT,
  1465. IA64_MCA_WAKEUP_VECTOR,
  1466. 0, 0);
  1467. rc = isrv.status;
  1468. if (rc) {
  1469. printk(KERN_ERR "Failed to register wakeup interrupt with SAL "
  1470. "(status %ld)\n", rc);
  1471. return;
  1472. }
  1473. IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__);
  1474. ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp);
  1475. /*
  1476. * XXX - disable SAL checksum by setting size to 0; should be
  1477. * ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
  1478. */
  1479. ia64_mc_info.imi_mca_handler_size = 0;
  1480. /* Register the os mca handler with SAL */
  1481. if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA,
  1482. ia64_mc_info.imi_mca_handler,
  1483. ia64_tpa(mca_hldlr_ptr->gp),
  1484. ia64_mc_info.imi_mca_handler_size,
  1485. 0, 0, 0)))
  1486. {
  1487. printk(KERN_ERR "Failed to register OS MCA handler with SAL "
  1488. "(status %ld)\n", rc);
  1489. return;
  1490. }
  1491. IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__,
  1492. ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
  1493. /*
  1494. * XXX - disable SAL checksum by setting size to 0, should be
  1495. * size of the actual init handler in mca_asm.S.
  1496. */
  1497. ia64_mc_info.imi_monarch_init_handler = ia64_tpa(init_hldlr_ptr_monarch->fp);
  1498. ia64_mc_info.imi_monarch_init_handler_size = 0;
  1499. ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp);
  1500. ia64_mc_info.imi_slave_init_handler_size = 0;
  1501. IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__,
  1502. ia64_mc_info.imi_monarch_init_handler);
  1503. /* Register the os init handler with SAL */
  1504. if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
  1505. ia64_mc_info.imi_monarch_init_handler,
  1506. ia64_tpa(ia64_getreg(_IA64_REG_GP)),
  1507. ia64_mc_info.imi_monarch_init_handler_size,
  1508. ia64_mc_info.imi_slave_init_handler,
  1509. ia64_tpa(ia64_getreg(_IA64_REG_GP)),
  1510. ia64_mc_info.imi_slave_init_handler_size)))
  1511. {
  1512. printk(KERN_ERR "Failed to register m/s INIT handlers with SAL "
  1513. "(status %ld)\n", rc);
  1514. return;
  1515. }
  1516. if (register_die_notifier(&default_init_monarch_nb)) {
  1517. printk(KERN_ERR "Failed to register default monarch INIT process\n");
  1518. return;
  1519. }
  1520. IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__);
  1521. /*
  1522. * Configure the CMCI/P vector and handler. Interrupts for CMC are
  1523. * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
  1524. */
  1525. register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
  1526. register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
  1527. ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
  1528. /* Setup the MCA rendezvous interrupt vector */
  1529. register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
  1530. /* Setup the MCA wakeup interrupt vector */
  1531. register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
  1532. #ifdef CONFIG_ACPI
  1533. /* Setup the CPEI/P handler */
  1534. register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
  1535. #endif
  1536. /* Initialize the areas set aside by the OS to buffer the
  1537. * platform/processor error states for MCA/INIT/CMC
  1538. * handling.
  1539. */
  1540. ia64_log_init(SAL_INFO_TYPE_MCA);
  1541. ia64_log_init(SAL_INFO_TYPE_INIT);
  1542. ia64_log_init(SAL_INFO_TYPE_CMC);
  1543. ia64_log_init(SAL_INFO_TYPE_CPE);
  1544. mca_init = 1;
  1545. printk(KERN_INFO "MCA related initialization done\n");
  1546. }
  1547. /*
  1548. * ia64_mca_late_init
  1549. *
  1550. * Opportunity to setup things that require initialization later
  1551. * than ia64_mca_init. Setup a timer to poll for CPEs if the
  1552. * platform doesn't support an interrupt driven mechanism.
  1553. *
  1554. * Inputs : None
  1555. * Outputs : Status
  1556. */
  1557. static int __init
  1558. ia64_mca_late_init(void)
  1559. {
  1560. if (!mca_init)
  1561. return 0;
  1562. /* Setup the CMCI/P vector and handler */
  1563. init_timer(&cmc_poll_timer);
  1564. cmc_poll_timer.function = ia64_mca_cmc_poll;
  1565. /* Unmask/enable the vector */
  1566. cmc_polling_enabled = 0;
  1567. schedule_work(&cmc_enable_work);
  1568. IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);
  1569. #ifdef CONFIG_ACPI
  1570. /* Setup the CPEI/P vector and handler */
  1571. cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
  1572. init_timer(&cpe_poll_timer);
  1573. cpe_poll_timer.function = ia64_mca_cpe_poll;
  1574. {
  1575. irq_desc_t *desc;
  1576. unsigned int irq;
  1577. if (cpe_vector >= 0) {
  1578. /* If platform supports CPEI, enable the irq. */
  1579. cpe_poll_enabled = 0;
  1580. for (irq = 0; irq < NR_IRQS; ++irq)
  1581. if (irq_to_vector(irq) == cpe_vector) {
  1582. desc = irq_desc + irq;
  1583. desc->status |= IRQ_PER_CPU;
  1584. setup_irq(irq, &mca_cpe_irqaction);
  1585. ia64_cpe_irq = irq;
  1586. }
  1587. ia64_mca_register_cpev(cpe_vector);
  1588. IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__);
  1589. } else {
  1590. /* If platform doesn't support CPEI, get the timer going. */
  1591. if (cpe_poll_enabled) {
  1592. ia64_mca_cpe_poll(0UL);
  1593. IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__);
  1594. }
  1595. }
  1596. }
  1597. #endif
  1598. return 0;
  1599. }
  1600. device_initcall(ia64_mca_late_init);