mca.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802
  1. /*
  2. * File: mca.c
  3. * Purpose: Generic MCA handling layer
  4. *
  5. * Updated for latest kernel
  6. * Copyright (C) 2003 Hewlett-Packard Co
  7. * David Mosberger-Tang <davidm@hpl.hp.com>
  8. *
  9. * Copyright (C) 2002 Dell Inc.
  10. * Copyright (C) Matt Domsch (Matt_Domsch@dell.com)
  11. *
  12. * Copyright (C) 2002 Intel
  13. * Copyright (C) Jenna Hall (jenna.s.hall@intel.com)
  14. *
  15. * Copyright (C) 2001 Intel
  16. * Copyright (C) Fred Lewis (frederick.v.lewis@intel.com)
  17. *
  18. * Copyright (C) 2000 Intel
  19. * Copyright (C) Chuck Fleckenstein (cfleck@co.intel.com)
  20. *
  21. * Copyright (C) 1999, 2004 Silicon Graphics, Inc.
  22. * Copyright (C) Vijay Chander(vijay@engr.sgi.com)
  23. *
  24. * 03/04/15 D. Mosberger Added INIT backtrace support.
  25. * 02/03/25 M. Domsch GUID cleanups
  26. *
  27. * 02/01/04 J. Hall Aligned MCA stack to 16 bytes, added platform vs. CPU
  28. * error flag, set SAL default return values, changed
  29. * error record structure to linked list, added init call
  30. * to sal_get_state_info_size().
  31. *
  32. * 01/01/03 F. Lewis Added setup of CMCI and CPEI IRQs, logging of corrected
  33. * platform errors, completed code for logging of
  34. * corrected & uncorrected machine check errors, and
  35. * updated for conformance with Nov. 2000 revision of the
  36. * SAL 3.0 spec.
  37. * 00/03/29 C. Fleckenstein Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
  38. * added min save state dump, added INIT handler.
  39. *
  40. * 2003-12-08 Keith Owens <kaos@sgi.com>
  41. * smp_call_function() must not be called from interrupt context (can
  42. * deadlock on tasklist_lock). Use keventd to call smp_call_function().
  43. *
  44. * 2004-02-01 Keith Owens <kaos@sgi.com>
  45. * Avoid deadlock when using printk() for MCA and INIT records.
  46. * Delete all record printing code, moved to salinfo_decode in user space.
  47. * Mark variables and functions static where possible.
  48. * Delete dead variables and functions.
  49. * Reorder to remove the need for forward declarations and to consolidate
  50. * related code.
  51. *
  52. * 2005-08-12 Keith Owens <kaos@sgi.com>
  53. * Convert MCA/INIT handlers to use per event stacks and SAL/OS state.
  54. *
  55. * 2005-10-07 Keith Owens <kaos@sgi.com>
  56. * Add notify_die() hooks.
  57. */
  58. #include <linux/config.h>
  59. #include <linux/types.h>
  60. #include <linux/init.h>
  61. #include <linux/sched.h>
  62. #include <linux/interrupt.h>
  63. #include <linux/irq.h>
  64. #include <linux/smp_lock.h>
  65. #include <linux/bootmem.h>
  66. #include <linux/acpi.h>
  67. #include <linux/timer.h>
  68. #include <linux/module.h>
  69. #include <linux/kernel.h>
  70. #include <linux/smp.h>
  71. #include <linux/workqueue.h>
  72. #include <linux/cpumask.h>
  73. #include <asm/delay.h>
  74. #include <asm/kdebug.h>
  75. #include <asm/machvec.h>
  76. #include <asm/meminit.h>
  77. #include <asm/page.h>
  78. #include <asm/ptrace.h>
  79. #include <asm/system.h>
  80. #include <asm/sal.h>
  81. #include <asm/mca.h>
  82. #include <asm/irq.h>
  83. #include <asm/hw_irq.h>
  84. #include "mca_drv.h"
  85. #include "entry.h"
  86. #if defined(IA64_MCA_DEBUG_INFO)
  87. # define IA64_MCA_DEBUG(fmt...) printk(fmt)
  88. #else
  89. # define IA64_MCA_DEBUG(fmt...)
  90. #endif
  91. /* Used by mca_asm.S */
  92. u32 ia64_mca_serialize;
  93. DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
  94. DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
  95. DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */
  96. DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */
  97. unsigned long __per_cpu_mca[NR_CPUS];
  98. /* In mca_asm.S */
  99. extern void ia64_os_init_dispatch_monarch (void);
  100. extern void ia64_os_init_dispatch_slave (void);
  101. static int monarch_cpu = -1;
  102. static ia64_mc_info_t ia64_mc_info;
  103. #define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
  104. #define MIN_CPE_POLL_INTERVAL (2*60*HZ) /* 2 minutes */
  105. #define CMC_POLL_INTERVAL (1*60*HZ) /* 1 minute */
  106. #define CPE_HISTORY_LENGTH 5
  107. #define CMC_HISTORY_LENGTH 5
  108. static struct timer_list cpe_poll_timer;
  109. static struct timer_list cmc_poll_timer;
  110. /*
  111. * This variable tells whether we are currently in polling mode.
  112. * Start with this in the wrong state so we won't play w/ timers
  113. * before the system is ready.
  114. */
  115. static int cmc_polling_enabled = 1;
  116. /*
  117. * Clearing this variable prevents CPE polling from getting activated
  118. * in mca_late_init. Use it if your system doesn't provide a CPEI,
  119. * but encounters problems retrieving CPE logs. This should only be
  120. * necessary for debugging.
  121. */
  122. static int cpe_poll_enabled = 1;
  123. extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
  124. static int mca_init __initdata;
  125. static void inline
  126. ia64_mca_spin(const char *func)
  127. {
  128. printk(KERN_EMERG "%s: spinning here, not returning to SAL\n", func);
  129. while (1)
  130. cpu_relax();
  131. }
  132. /*
  133. * IA64_MCA log support
  134. */
  135. #define IA64_MAX_LOGS 2 /* Double-buffering for nested MCAs */
  136. #define IA64_MAX_LOG_TYPES 4 /* MCA, INIT, CMC, CPE */
  137. typedef struct ia64_state_log_s
  138. {
  139. spinlock_t isl_lock;
  140. int isl_index;
  141. unsigned long isl_count;
  142. ia64_err_rec_t *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */
  143. } ia64_state_log_t;
  144. static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
  145. #define IA64_LOG_ALLOCATE(it, size) \
  146. {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
  147. (ia64_err_rec_t *)alloc_bootmem(size); \
  148. ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
  149. (ia64_err_rec_t *)alloc_bootmem(size);}
  150. #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
  151. #define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
  152. #define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
  153. #define IA64_LOG_NEXT_INDEX(it) ia64_state_log[it].isl_index
  154. #define IA64_LOG_CURR_INDEX(it) 1 - ia64_state_log[it].isl_index
  155. #define IA64_LOG_INDEX_INC(it) \
  156. {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
  157. ia64_state_log[it].isl_count++;}
  158. #define IA64_LOG_INDEX_DEC(it) \
  159. ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
  160. #define IA64_LOG_NEXT_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
  161. #define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
  162. #define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count
  163. /*
  164. * ia64_log_init
  165. * Reset the OS ia64 log buffer
  166. * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
  167. * Outputs : None
  168. */
  169. static void __init
  170. ia64_log_init(int sal_info_type)
  171. {
  172. u64 max_size = 0;
  173. IA64_LOG_NEXT_INDEX(sal_info_type) = 0;
  174. IA64_LOG_LOCK_INIT(sal_info_type);
  175. // SAL will tell us the maximum size of any error record of this type
  176. max_size = ia64_sal_get_state_info_size(sal_info_type);
  177. if (!max_size)
  178. /* alloc_bootmem() doesn't like zero-sized allocations! */
  179. return;
  180. // set up OS data structures to hold error info
  181. IA64_LOG_ALLOCATE(sal_info_type, max_size);
  182. memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
  183. memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
  184. }
  185. /*
  186. * ia64_log_get
  187. *
  188. * Get the current MCA log from SAL and copy it into the OS log buffer.
  189. *
  190. * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
  191. * irq_safe whether you can use printk at this point
  192. * Outputs : size (total record length)
  193. * *buffer (ptr to error record)
  194. *
  195. */
  196. static u64
  197. ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
  198. {
  199. sal_log_record_header_t *log_buffer;
  200. u64 total_len = 0;
  201. int s;
  202. IA64_LOG_LOCK(sal_info_type);
  203. /* Get the process state information */
  204. log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
  205. total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
  206. if (total_len) {
  207. IA64_LOG_INDEX_INC(sal_info_type);
  208. IA64_LOG_UNLOCK(sal_info_type);
  209. if (irq_safe) {
  210. IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. "
  211. "Record length = %ld\n", __FUNCTION__, sal_info_type, total_len);
  212. }
  213. *buffer = (u8 *) log_buffer;
  214. return total_len;
  215. } else {
  216. IA64_LOG_UNLOCK(sal_info_type);
  217. return 0;
  218. }
  219. }
  220. /*
  221. * ia64_mca_log_sal_error_record
  222. *
  223. * This function retrieves a specified error record type from SAL
  224. * and wakes up any processes waiting for error records.
  225. *
  226. * Inputs : sal_info_type (Type of error record MCA/CMC/CPE)
  227. * FIXME: remove MCA and irq_safe.
  228. */
  229. static void
  230. ia64_mca_log_sal_error_record(int sal_info_type)
  231. {
  232. u8 *buffer;
  233. sal_log_record_header_t *rh;
  234. u64 size;
  235. int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA;
  236. #ifdef IA64_MCA_DEBUG_INFO
  237. static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
  238. #endif
  239. size = ia64_log_get(sal_info_type, &buffer, irq_safe);
  240. if (!size)
  241. return;
  242. salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe);
  243. if (irq_safe)
  244. IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n",
  245. smp_processor_id(),
  246. sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN");
  247. /* Clear logs from corrected errors in case there's no user-level logger */
  248. rh = (sal_log_record_header_t *)buffer;
  249. if (rh->severity == sal_log_severity_corrected)
  250. ia64_sal_clear_state_info(sal_info_type);
  251. }
  252. /*
  253. * search_mca_table
  254. * See if the MCA surfaced in an instruction range
  255. * that has been tagged as recoverable.
  256. *
  257. * Inputs
  258. * first First address range to check
  259. * last Last address range to check
  260. * ip Instruction pointer, address we are looking for
  261. *
  262. * Return value:
  263. * 1 on Success (in the table)/ 0 on Failure (not in the table)
  264. */
  265. int
  266. search_mca_table (const struct mca_table_entry *first,
  267. const struct mca_table_entry *last,
  268. unsigned long ip)
  269. {
  270. const struct mca_table_entry *curr;
  271. u64 curr_start, curr_end;
  272. curr = first;
  273. while (curr <= last) {
  274. curr_start = (u64) &curr->start_addr + curr->start_addr;
  275. curr_end = (u64) &curr->end_addr + curr->end_addr;
  276. if ((ip >= curr_start) && (ip <= curr_end)) {
  277. return 1;
  278. }
  279. curr++;
  280. }
  281. return 0;
  282. }
  283. /* Given an address, look for it in the mca tables. */
  284. int mca_recover_range(unsigned long addr)
  285. {
  286. extern struct mca_table_entry __start___mca_table[];
  287. extern struct mca_table_entry __stop___mca_table[];
  288. return search_mca_table(__start___mca_table, __stop___mca_table-1, addr);
  289. }
  290. EXPORT_SYMBOL_GPL(mca_recover_range);
  291. #ifdef CONFIG_ACPI
  292. int cpe_vector = -1;
  293. int ia64_cpe_irq = -1;
  294. static irqreturn_t
  295. ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
  296. {
  297. static unsigned long cpe_history[CPE_HISTORY_LENGTH];
  298. static int index;
  299. static DEFINE_SPINLOCK(cpe_history_lock);
  300. IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
  301. __FUNCTION__, cpe_irq, smp_processor_id());
  302. /* SAL spec states this should run w/ interrupts enabled */
  303. local_irq_enable();
  304. /* Get the CPE error record and log it */
  305. ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
  306. spin_lock(&cpe_history_lock);
  307. if (!cpe_poll_enabled && cpe_vector >= 0) {
  308. int i, count = 1; /* we know 1 happened now */
  309. unsigned long now = jiffies;
  310. for (i = 0; i < CPE_HISTORY_LENGTH; i++) {
  311. if (now - cpe_history[i] <= HZ)
  312. count++;
  313. }
  314. IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH);
  315. if (count >= CPE_HISTORY_LENGTH) {
  316. cpe_poll_enabled = 1;
  317. spin_unlock(&cpe_history_lock);
  318. disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR));
  319. /*
  320. * Corrected errors will still be corrected, but
  321. * make sure there's a log somewhere that indicates
  322. * something is generating more than we can handle.
  323. */
  324. printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n");
  325. mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL);
  326. /* lock already released, get out now */
  327. return IRQ_HANDLED;
  328. } else {
  329. cpe_history[index++] = now;
  330. if (index == CPE_HISTORY_LENGTH)
  331. index = 0;
  332. }
  333. }
  334. spin_unlock(&cpe_history_lock);
  335. return IRQ_HANDLED;
  336. }
  337. #endif /* CONFIG_ACPI */
  338. #ifdef CONFIG_ACPI
  339. /*
  340. * ia64_mca_register_cpev
  341. *
  342. * Register the corrected platform error vector with SAL.
  343. *
  344. * Inputs
  345. * cpev Corrected Platform Error Vector number
  346. *
  347. * Outputs
  348. * None
  349. */
  350. static void __init
  351. ia64_mca_register_cpev (int cpev)
  352. {
  353. /* Register the CPE interrupt vector with SAL */
  354. struct ia64_sal_retval isrv;
  355. isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0);
  356. if (isrv.status) {
  357. printk(KERN_ERR "Failed to register Corrected Platform "
  358. "Error interrupt vector with SAL (status %ld)\n", isrv.status);
  359. return;
  360. }
  361. IA64_MCA_DEBUG("%s: corrected platform error "
  362. "vector %#x registered\n", __FUNCTION__, cpev);
  363. }
  364. #endif /* CONFIG_ACPI */
  365. /*
  366. * ia64_mca_cmc_vector_setup
  367. *
  368. * Setup the corrected machine check vector register in the processor.
  369. * (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
  370. * This function is invoked on a per-processor basis.
  371. *
  372. * Inputs
  373. * None
  374. *
  375. * Outputs
  376. * None
  377. */
  378. void __cpuinit
  379. ia64_mca_cmc_vector_setup (void)
  380. {
  381. cmcv_reg_t cmcv;
  382. cmcv.cmcv_regval = 0;
  383. cmcv.cmcv_mask = 1; /* Mask/disable interrupt at first */
  384. cmcv.cmcv_vector = IA64_CMC_VECTOR;
  385. ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
  386. IA64_MCA_DEBUG("%s: CPU %d corrected "
  387. "machine check vector %#x registered.\n",
  388. __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);
  389. IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
  390. __FUNCTION__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
  391. }
  392. /*
  393. * ia64_mca_cmc_vector_disable
  394. *
  395. * Mask the corrected machine check vector register in the processor.
  396. * This function is invoked on a per-processor basis.
  397. *
  398. * Inputs
  399. * dummy(unused)
  400. *
  401. * Outputs
  402. * None
  403. */
  404. static void
  405. ia64_mca_cmc_vector_disable (void *dummy)
  406. {
  407. cmcv_reg_t cmcv;
  408. cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
  409. cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
  410. ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
  411. IA64_MCA_DEBUG("%s: CPU %d corrected "
  412. "machine check vector %#x disabled.\n",
  413. __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
  414. }
  415. /*
  416. * ia64_mca_cmc_vector_enable
  417. *
  418. * Unmask the corrected machine check vector register in the processor.
  419. * This function is invoked on a per-processor basis.
  420. *
  421. * Inputs
  422. * dummy(unused)
  423. *
  424. * Outputs
  425. * None
  426. */
  427. static void
  428. ia64_mca_cmc_vector_enable (void *dummy)
  429. {
  430. cmcv_reg_t cmcv;
  431. cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
  432. cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
  433. ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
  434. IA64_MCA_DEBUG("%s: CPU %d corrected "
  435. "machine check vector %#x enabled.\n",
  436. __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
  437. }
  438. /*
  439. * ia64_mca_cmc_vector_disable_keventd
  440. *
  441. * Called via keventd (smp_call_function() is not safe in interrupt context) to
  442. * disable the cmc interrupt vector.
  443. */
  444. static void
  445. ia64_mca_cmc_vector_disable_keventd(void *unused)
  446. {
  447. on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0);
  448. }
  449. /*
  450. * ia64_mca_cmc_vector_enable_keventd
  451. *
  452. * Called via keventd (smp_call_function() is not safe in interrupt context) to
  453. * enable the cmc interrupt vector.
  454. */
  455. static void
  456. ia64_mca_cmc_vector_enable_keventd(void *unused)
  457. {
  458. on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0);
  459. }
  460. /*
  461. * ia64_mca_wakeup
  462. *
  463. * Send an inter-cpu interrupt to wake-up a particular cpu
  464. * and mark that cpu to be out of rendez.
  465. *
  466. * Inputs : cpuid
  467. * Outputs : None
  468. */
  469. static void
  470. ia64_mca_wakeup(int cpu)
  471. {
  472. platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
  473. ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
  474. }
  475. /*
  476. * ia64_mca_wakeup_all
  477. *
  478. * Wakeup all the cpus which have rendez'ed previously.
  479. *
  480. * Inputs : None
  481. * Outputs : None
  482. */
  483. static void
  484. ia64_mca_wakeup_all(void)
  485. {
  486. int cpu;
  487. /* Clear the Rendez checkin flag for all cpus */
  488. for_each_online_cpu(cpu) {
  489. if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE)
  490. ia64_mca_wakeup(cpu);
  491. }
  492. }
  493. /*
  494. * ia64_mca_rendez_interrupt_handler
  495. *
  496. * This is handler used to put slave processors into spinloop
  497. * while the monarch processor does the mca handling and later
  498. * wake each slave up once the monarch is done.
  499. *
  500. * Inputs : None
  501. * Outputs : None
  502. */
  503. static irqreturn_t
  504. ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs)
  505. {
  506. unsigned long flags;
  507. int cpu = smp_processor_id();
  508. /* Mask all interrupts */
  509. local_irq_save(flags);
  510. if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", regs, 0, 0, 0)
  511. == NOTIFY_STOP)
  512. ia64_mca_spin(__FUNCTION__);
  513. ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
  514. /* Register with the SAL monarch that the slave has
  515. * reached SAL
  516. */
  517. ia64_sal_mc_rendez();
  518. if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", regs, 0, 0, 0)
  519. == NOTIFY_STOP)
  520. ia64_mca_spin(__FUNCTION__);
  521. /* Wait for the monarch cpu to exit. */
  522. while (monarch_cpu != -1)
  523. cpu_relax(); /* spin until monarch leaves */
  524. if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", regs, 0, 0, 0)
  525. == NOTIFY_STOP)
  526. ia64_mca_spin(__FUNCTION__);
  527. /* Enable all interrupts */
  528. local_irq_restore(flags);
  529. return IRQ_HANDLED;
  530. }
  531. /*
  532. * ia64_mca_wakeup_int_handler
  533. *
  534. * The interrupt handler for processing the inter-cpu interrupt to the
  535. * slave cpu which was spinning in the rendez loop.
  536. * Since this spinning is done by turning off the interrupts and
  537. * polling on the wakeup-interrupt bit in the IRR, there is
  538. * nothing useful to be done in the handler.
  539. *
  540. * Inputs : wakeup_irq (Wakeup-interrupt bit)
  541. * arg (Interrupt handler specific argument)
  542. * ptregs (Exception frame at the time of the interrupt)
  543. * Outputs : None
  544. *
  545. */
  546. static irqreturn_t
  547. ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs)
  548. {
  549. return IRQ_HANDLED;
  550. }
  551. /* Function pointer for extra MCA recovery */
  552. int (*ia64_mca_ucmc_extension)
  553. (void*,struct ia64_sal_os_state*)
  554. = NULL;
  555. int
  556. ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *))
  557. {
  558. if (ia64_mca_ucmc_extension)
  559. return 1;
  560. ia64_mca_ucmc_extension = fn;
  561. return 0;
  562. }
  563. void
  564. ia64_unreg_MCA_extension(void)
  565. {
  566. if (ia64_mca_ucmc_extension)
  567. ia64_mca_ucmc_extension = NULL;
  568. }
  569. EXPORT_SYMBOL(ia64_reg_MCA_extension);
  570. EXPORT_SYMBOL(ia64_unreg_MCA_extension);
  571. static inline void
  572. copy_reg(const u64 *fr, u64 fnat, u64 *tr, u64 *tnat)
  573. {
  574. u64 fslot, tslot, nat;
  575. *tr = *fr;
  576. fslot = ((unsigned long)fr >> 3) & 63;
  577. tslot = ((unsigned long)tr >> 3) & 63;
  578. *tnat &= ~(1UL << tslot);
  579. nat = (fnat >> fslot) & 1;
  580. *tnat |= (nat << tslot);
  581. }
  582. /* Change the comm field on the MCA/INT task to include the pid that
  583. * was interrupted, it makes for easier debugging. If that pid was 0
  584. * (swapper or nested MCA/INIT) then use the start of the previous comm
  585. * field suffixed with its cpu.
  586. */
  587. static void
  588. ia64_mca_modify_comm(const task_t *previous_current)
  589. {
  590. char *p, comm[sizeof(current->comm)];
  591. if (previous_current->pid)
  592. snprintf(comm, sizeof(comm), "%s %d",
  593. current->comm, previous_current->pid);
  594. else {
  595. int l;
  596. if ((p = strchr(previous_current->comm, ' ')))
  597. l = p - previous_current->comm;
  598. else
  599. l = strlen(previous_current->comm);
  600. snprintf(comm, sizeof(comm), "%s %*s %d",
  601. current->comm, l, previous_current->comm,
  602. task_thread_info(previous_current)->cpu);
  603. }
  604. memcpy(current->comm, comm, sizeof(current->comm));
  605. }
  606. /* On entry to this routine, we are running on the per cpu stack, see
  607. * mca_asm.h. The original stack has not been touched by this event. Some of
  608. * the original stack's registers will be in the RBS on this stack. This stack
  609. * also contains a partial pt_regs and switch_stack, the rest of the data is in
  610. * PAL minstate.
  611. *
  612. * The first thing to do is modify the original stack to look like a blocked
  613. * task so we can run backtrace on the original task. Also mark the per cpu
  614. * stack as current to ensure that we use the correct task state, it also means
  615. * that we can do backtrace on the MCA/INIT handler code itself.
  616. */
  617. static task_t *
  618. ia64_mca_modify_original_stack(struct pt_regs *regs,
  619. const struct switch_stack *sw,
  620. struct ia64_sal_os_state *sos,
  621. const char *type)
  622. {
  623. char *p;
  624. ia64_va va;
  625. extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */
  626. const pal_min_state_area_t *ms = sos->pal_min_state;
  627. task_t *previous_current;
  628. struct pt_regs *old_regs;
  629. struct switch_stack *old_sw;
  630. unsigned size = sizeof(struct pt_regs) +
  631. sizeof(struct switch_stack) + 16;
  632. u64 *old_bspstore, *old_bsp;
  633. u64 *new_bspstore, *new_bsp;
  634. u64 old_unat, old_rnat, new_rnat, nat;
  635. u64 slots, loadrs = regs->loadrs;
  636. u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1];
  637. u64 ar_bspstore = regs->ar_bspstore;
  638. u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16);
  639. const u64 *bank;
  640. const char *msg;
  641. int cpu = smp_processor_id();
  642. previous_current = curr_task(cpu);
  643. set_curr_task(cpu, current);
  644. if ((p = strchr(current->comm, ' ')))
  645. *p = '\0';
  646. /* Best effort attempt to cope with MCA/INIT delivered while in
  647. * physical mode.
  648. */
  649. regs->cr_ipsr = ms->pmsa_ipsr;
  650. if (ia64_psr(regs)->dt == 0) {
  651. va.l = r12;
  652. if (va.f.reg == 0) {
  653. va.f.reg = 7;
  654. r12 = va.l;
  655. }
  656. va.l = r13;
  657. if (va.f.reg == 0) {
  658. va.f.reg = 7;
  659. r13 = va.l;
  660. }
  661. }
  662. if (ia64_psr(regs)->rt == 0) {
  663. va.l = ar_bspstore;
  664. if (va.f.reg == 0) {
  665. va.f.reg = 7;
  666. ar_bspstore = va.l;
  667. }
  668. va.l = ar_bsp;
  669. if (va.f.reg == 0) {
  670. va.f.reg = 7;
  671. ar_bsp = va.l;
  672. }
  673. }
  674. /* mca_asm.S ia64_old_stack() cannot assume that the dirty registers
  675. * have been copied to the old stack, the old stack may fail the
  676. * validation tests below. So ia64_old_stack() must restore the dirty
  677. * registers from the new stack. The old and new bspstore probably
  678. * have different alignments, so loadrs calculated on the old bsp
  679. * cannot be used to restore from the new bsp. Calculate a suitable
  680. * loadrs for the new stack and save it in the new pt_regs, where
  681. * ia64_old_stack() can get it.
  682. */
  683. old_bspstore = (u64 *)ar_bspstore;
  684. old_bsp = (u64 *)ar_bsp;
  685. slots = ia64_rse_num_regs(old_bspstore, old_bsp);
  686. new_bspstore = (u64 *)((u64)current + IA64_RBS_OFFSET);
  687. new_bsp = ia64_rse_skip_regs(new_bspstore, slots);
  688. regs->loadrs = (new_bsp - new_bspstore) * 8 << 16;
  689. /* Verify the previous stack state before we change it */
  690. if (user_mode(regs)) {
  691. msg = "occurred in user space";
  692. /* previous_current is guaranteed to be valid when the task was
  693. * in user space, so ...
  694. */
  695. ia64_mca_modify_comm(previous_current);
  696. goto no_mod;
  697. }
  698. if (!mca_recover_range(ms->pmsa_iip)) {
  699. if (r13 != sos->prev_IA64_KR_CURRENT) {
  700. msg = "inconsistent previous current and r13";
  701. goto no_mod;
  702. }
  703. if ((r12 - r13) >= KERNEL_STACK_SIZE) {
  704. msg = "inconsistent r12 and r13";
  705. goto no_mod;
  706. }
  707. if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) {
  708. msg = "inconsistent ar.bspstore and r13";
  709. goto no_mod;
  710. }
  711. va.p = old_bspstore;
  712. if (va.f.reg < 5) {
  713. msg = "old_bspstore is in the wrong region";
  714. goto no_mod;
  715. }
  716. if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) {
  717. msg = "inconsistent ar.bsp and r13";
  718. goto no_mod;
  719. }
  720. size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8;
  721. if (ar_bspstore + size > r12) {
  722. msg = "no room for blocked state";
  723. goto no_mod;
  724. }
  725. }
  726. ia64_mca_modify_comm(previous_current);
  727. /* Make the original task look blocked. First stack a struct pt_regs,
  728. * describing the state at the time of interrupt. mca_asm.S built a
  729. * partial pt_regs, copy it and fill in the blanks using minstate.
  730. */
  731. p = (char *)r12 - sizeof(*regs);
  732. old_regs = (struct pt_regs *)p;
  733. memcpy(old_regs, regs, sizeof(*regs));
  734. /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
  735. * pmsa_{xip,xpsr,xfs}
  736. */
  737. if (ia64_psr(regs)->ic) {
  738. old_regs->cr_iip = ms->pmsa_iip;
  739. old_regs->cr_ipsr = ms->pmsa_ipsr;
  740. old_regs->cr_ifs = ms->pmsa_ifs;
  741. } else {
  742. old_regs->cr_iip = ms->pmsa_xip;
  743. old_regs->cr_ipsr = ms->pmsa_xpsr;
  744. old_regs->cr_ifs = ms->pmsa_xfs;
  745. }
  746. old_regs->pr = ms->pmsa_pr;
  747. old_regs->b0 = ms->pmsa_br0;
  748. old_regs->loadrs = loadrs;
  749. old_regs->ar_rsc = ms->pmsa_rsc;
  750. old_unat = old_regs->ar_unat;
  751. copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &old_regs->r1, &old_unat);
  752. copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &old_regs->r2, &old_unat);
  753. copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &old_regs->r3, &old_unat);
  754. copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &old_regs->r8, &old_unat);
  755. copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &old_regs->r9, &old_unat);
  756. copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &old_regs->r10, &old_unat);
  757. copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &old_regs->r11, &old_unat);
  758. copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &old_regs->r12, &old_unat);
  759. copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &old_regs->r13, &old_unat);
  760. copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &old_regs->r14, &old_unat);
  761. copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &old_regs->r15, &old_unat);
  762. if (ia64_psr(old_regs)->bn)
  763. bank = ms->pmsa_bank1_gr;
  764. else
  765. bank = ms->pmsa_bank0_gr;
  766. copy_reg(&bank[16-16], ms->pmsa_nat_bits, &old_regs->r16, &old_unat);
  767. copy_reg(&bank[17-16], ms->pmsa_nat_bits, &old_regs->r17, &old_unat);
  768. copy_reg(&bank[18-16], ms->pmsa_nat_bits, &old_regs->r18, &old_unat);
  769. copy_reg(&bank[19-16], ms->pmsa_nat_bits, &old_regs->r19, &old_unat);
  770. copy_reg(&bank[20-16], ms->pmsa_nat_bits, &old_regs->r20, &old_unat);
  771. copy_reg(&bank[21-16], ms->pmsa_nat_bits, &old_regs->r21, &old_unat);
  772. copy_reg(&bank[22-16], ms->pmsa_nat_bits, &old_regs->r22, &old_unat);
  773. copy_reg(&bank[23-16], ms->pmsa_nat_bits, &old_regs->r23, &old_unat);
  774. copy_reg(&bank[24-16], ms->pmsa_nat_bits, &old_regs->r24, &old_unat);
  775. copy_reg(&bank[25-16], ms->pmsa_nat_bits, &old_regs->r25, &old_unat);
  776. copy_reg(&bank[26-16], ms->pmsa_nat_bits, &old_regs->r26, &old_unat);
  777. copy_reg(&bank[27-16], ms->pmsa_nat_bits, &old_regs->r27, &old_unat);
  778. copy_reg(&bank[28-16], ms->pmsa_nat_bits, &old_regs->r28, &old_unat);
  779. copy_reg(&bank[29-16], ms->pmsa_nat_bits, &old_regs->r29, &old_unat);
  780. copy_reg(&bank[30-16], ms->pmsa_nat_bits, &old_regs->r30, &old_unat);
  781. copy_reg(&bank[31-16], ms->pmsa_nat_bits, &old_regs->r31, &old_unat);
  782. /* Next stack a struct switch_stack. mca_asm.S built a partial
  783. * switch_stack, copy it and fill in the blanks using pt_regs and
  784. * minstate.
  785. *
  786. * In the synthesized switch_stack, b0 points to ia64_leave_kernel,
  787. * ar.pfs is set to 0.
  788. *
  789. * unwind.c::unw_unwind() does special processing for interrupt frames.
  790. * It checks if the PRED_NON_SYSCALL predicate is set, if the predicate
  791. * is clear then unw_unwind() does _not_ adjust bsp over pt_regs. Not
  792. * that this is documented, of course. Set PRED_NON_SYSCALL in the
  793. * switch_stack on the original stack so it will unwind correctly when
  794. * unwind.c reads pt_regs.
  795. *
  796. * thread.ksp is updated to point to the synthesized switch_stack.
  797. */
  798. p -= sizeof(struct switch_stack);
  799. old_sw = (struct switch_stack *)p;
  800. memcpy(old_sw, sw, sizeof(*sw));
  801. old_sw->caller_unat = old_unat;
  802. old_sw->ar_fpsr = old_regs->ar_fpsr;
  803. copy_reg(&ms->pmsa_gr[4-1], ms->pmsa_nat_bits, &old_sw->r4, &old_unat);
  804. copy_reg(&ms->pmsa_gr[5-1], ms->pmsa_nat_bits, &old_sw->r5, &old_unat);
  805. copy_reg(&ms->pmsa_gr[6-1], ms->pmsa_nat_bits, &old_sw->r6, &old_unat);
  806. copy_reg(&ms->pmsa_gr[7-1], ms->pmsa_nat_bits, &old_sw->r7, &old_unat);
  807. old_sw->b0 = (u64)ia64_leave_kernel;
  808. old_sw->b1 = ms->pmsa_br1;
  809. old_sw->ar_pfs = 0;
  810. old_sw->ar_unat = old_unat;
  811. old_sw->pr = old_regs->pr | (1UL << PRED_NON_SYSCALL);
  812. previous_current->thread.ksp = (u64)p - 16;
  813. /* Finally copy the original stack's registers back to its RBS.
  814. * Registers from ar.bspstore through ar.bsp at the time of the event
  815. * are in the current RBS, copy them back to the original stack. The
  816. * copy must be done register by register because the original bspstore
  817. * and the current one have different alignments, so the saved RNAT
  818. * data occurs at different places.
  819. *
  820. * mca_asm does cover, so the old_bsp already includes all registers at
  821. * the time of MCA/INIT. It also does flushrs, so all registers before
  822. * this function have been written to backing store on the MCA/INIT
  823. * stack.
  824. */
  825. new_rnat = ia64_get_rnat(ia64_rse_rnat_addr(new_bspstore));
  826. old_rnat = regs->ar_rnat;
  827. while (slots--) {
  828. if (ia64_rse_is_rnat_slot(new_bspstore)) {
  829. new_rnat = ia64_get_rnat(new_bspstore++);
  830. }
  831. if (ia64_rse_is_rnat_slot(old_bspstore)) {
  832. *old_bspstore++ = old_rnat;
  833. old_rnat = 0;
  834. }
  835. nat = (new_rnat >> ia64_rse_slot_num(new_bspstore)) & 1UL;
  836. old_rnat &= ~(1UL << ia64_rse_slot_num(old_bspstore));
  837. old_rnat |= (nat << ia64_rse_slot_num(old_bspstore));
  838. *old_bspstore++ = *new_bspstore++;
  839. }
  840. old_sw->ar_bspstore = (unsigned long)old_bspstore;
  841. old_sw->ar_rnat = old_rnat;
  842. sos->prev_task = previous_current;
  843. return previous_current;
  844. no_mod:
  845. printk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
  846. smp_processor_id(), type, msg);
  847. return previous_current;
  848. }
  849. /* The monarch/slave interaction is based on monarch_cpu and requires that all
  850. * slaves have entered rendezvous before the monarch leaves. If any cpu has
  851. * not entered rendezvous yet then wait a bit. The assumption is that any
  852. * slave that has not rendezvoused after a reasonable time is never going to do
  853. * so. In this context, slave includes cpus that respond to the MCA rendezvous
  854. * interrupt, as well as cpus that receive the INIT slave event.
  855. */
  856. static void
  857. ia64_wait_for_slaves(int monarch)
  858. {
  859. int c, wait = 0, missing = 0;
  860. for_each_online_cpu(c) {
  861. if (c == monarch)
  862. continue;
  863. if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) {
  864. udelay(1000); /* short wait first */
  865. wait = 1;
  866. break;
  867. }
  868. }
  869. if (!wait)
  870. goto all_in;
  871. for_each_online_cpu(c) {
  872. if (c == monarch)
  873. continue;
  874. if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) {
  875. udelay(5*1000000); /* wait 5 seconds for slaves (arbitrary) */
  876. if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE)
  877. missing = 1;
  878. break;
  879. }
  880. }
  881. if (!missing)
  882. goto all_in;
  883. printk(KERN_INFO "OS MCA slave did not rendezvous on cpu");
  884. for_each_online_cpu(c) {
  885. if (c == monarch)
  886. continue;
  887. if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE)
  888. printk(" %d", c);
  889. }
  890. printk("\n");
  891. return;
  892. all_in:
  893. printk(KERN_INFO "All OS MCA slaves have reached rendezvous\n");
  894. return;
  895. }
  896. /*
  897. * ia64_mca_handler
  898. *
  899. * This is uncorrectable machine check handler called from OS_MCA
  900. * dispatch code which is in turn called from SAL_CHECK().
  901. * This is the place where the core of OS MCA handling is done.
  902. * Right now the logs are extracted and displayed in a well-defined
  903. * format. This handler code is supposed to be run only on the
  904. * monarch processor. Once the monarch is done with MCA handling
  905. * further MCA logging is enabled by clearing logs.
  906. * Monarch also has the duty of sending wakeup-IPIs to pull the
  907. * slave processors out of rendezvous spinloop.
  908. */
  909. void
  910. ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
  911. struct ia64_sal_os_state *sos)
  912. {
  913. pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
  914. &sos->proc_state_param;
  915. int recover, cpu = smp_processor_id();
  916. task_t *previous_current;
  917. oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */
  918. console_loglevel = 15; /* make sure printks make it to console */
  919. printk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d monarch=%ld\n",
  920. sos->proc_state_param, cpu, sos->monarch);
  921. previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
  922. monarch_cpu = cpu;
  923. if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, 0, 0, 0)
  924. == NOTIFY_STOP)
  925. ia64_mca_spin(__FUNCTION__);
  926. ia64_wait_for_slaves(cpu);
  927. /* Wakeup all the processors which are spinning in the rendezvous loop.
  928. * They will leave SAL, then spin in the OS with interrupts disabled
  929. * until this monarch cpu leaves the MCA handler. That gets control
  930. * back to the OS so we can backtrace the other cpus, backtrace when
  931. * spinning in SAL does not work.
  932. */
  933. ia64_mca_wakeup_all();
  934. if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, 0, 0, 0)
  935. == NOTIFY_STOP)
  936. ia64_mca_spin(__FUNCTION__);
  937. /* Get the MCA error record and log it */
  938. ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
  939. /* TLB error is only exist in this SAL error record */
  940. recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))
  941. /* other error recovery */
  942. || (ia64_mca_ucmc_extension
  943. && ia64_mca_ucmc_extension(
  944. IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
  945. sos));
  946. if (recover) {
  947. sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
  948. rh->severity = sal_log_severity_corrected;
  949. ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
  950. sos->os_status = IA64_MCA_CORRECTED;
  951. }
  952. if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, 0, 0, recover)
  953. == NOTIFY_STOP)
  954. ia64_mca_spin(__FUNCTION__);
  955. set_curr_task(cpu, previous_current);
  956. monarch_cpu = -1;
  957. }
  958. static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL);
  959. static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL);
  960. /*
  961. * ia64_mca_cmc_int_handler
  962. *
  963. * This is corrected machine check interrupt handler.
  964. * Right now the logs are extracted and displayed in a well-defined
  965. * format.
  966. *
  967. * Inputs
  968. * interrupt number
  969. * client data arg ptr
  970. * saved registers ptr
  971. *
  972. * Outputs
  973. * None
  974. */
  975. static irqreturn_t
  976. ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
  977. {
  978. static unsigned long cmc_history[CMC_HISTORY_LENGTH];
  979. static int index;
  980. static DEFINE_SPINLOCK(cmc_history_lock);
  981. IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
  982. __FUNCTION__, cmc_irq, smp_processor_id());
  983. /* SAL spec states this should run w/ interrupts enabled */
  984. local_irq_enable();
  985. /* Get the CMC error record and log it */
  986. ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
  987. spin_lock(&cmc_history_lock);
  988. if (!cmc_polling_enabled) {
  989. int i, count = 1; /* we know 1 happened now */
  990. unsigned long now = jiffies;
  991. for (i = 0; i < CMC_HISTORY_LENGTH; i++) {
  992. if (now - cmc_history[i] <= HZ)
  993. count++;
  994. }
  995. IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH);
  996. if (count >= CMC_HISTORY_LENGTH) {
  997. cmc_polling_enabled = 1;
  998. spin_unlock(&cmc_history_lock);
  999. /* If we're being hit with CMC interrupts, we won't
  1000. * ever execute the schedule_work() below. Need to
  1001. * disable CMC interrupts on this processor now.
  1002. */
  1003. ia64_mca_cmc_vector_disable(NULL);
  1004. schedule_work(&cmc_disable_work);
  1005. /*
  1006. * Corrected errors will still be corrected, but
  1007. * make sure there's a log somewhere that indicates
  1008. * something is generating more than we can handle.
  1009. */
  1010. printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n");
  1011. mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
  1012. /* lock already released, get out now */
  1013. return IRQ_HANDLED;
  1014. } else {
  1015. cmc_history[index++] = now;
  1016. if (index == CMC_HISTORY_LENGTH)
  1017. index = 0;
  1018. }
  1019. }
  1020. spin_unlock(&cmc_history_lock);
  1021. return IRQ_HANDLED;
  1022. }
  1023. /*
  1024. * ia64_mca_cmc_int_caller
  1025. *
  1026. * Triggered by sw interrupt from CMC polling routine. Calls
  1027. * real interrupt handler and either triggers a sw interrupt
  1028. * on the next cpu or does cleanup at the end.
  1029. *
  1030. * Inputs
  1031. * interrupt number
  1032. * client data arg ptr
  1033. * saved registers ptr
  1034. * Outputs
  1035. * handled
  1036. */
  1037. static irqreturn_t
  1038. ia64_mca_cmc_int_caller(int cmc_irq, void *arg, struct pt_regs *ptregs)
  1039. {
  1040. static int start_count = -1;
  1041. unsigned int cpuid;
  1042. cpuid = smp_processor_id();
  1043. /* If first cpu, update count */
  1044. if (start_count == -1)
  1045. start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
  1046. ia64_mca_cmc_int_handler(cmc_irq, arg, ptregs);
  1047. for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
  1048. if (cpuid < NR_CPUS) {
  1049. platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
  1050. } else {
  1051. /* If no log record, switch out of polling mode */
  1052. if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
  1053. printk(KERN_WARNING "Returning to interrupt driven CMC handler\n");
  1054. schedule_work(&cmc_enable_work);
  1055. cmc_polling_enabled = 0;
  1056. } else {
  1057. mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
  1058. }
  1059. start_count = -1;
  1060. }
  1061. return IRQ_HANDLED;
  1062. }
  1063. /*
  1064. * ia64_mca_cmc_poll
  1065. *
  1066. * Poll for Corrected Machine Checks (CMCs)
  1067. *
  1068. * Inputs : dummy(unused)
  1069. * Outputs : None
  1070. *
  1071. */
  1072. static void
  1073. ia64_mca_cmc_poll (unsigned long dummy)
  1074. {
  1075. /* Trigger a CMC interrupt cascade */
  1076. platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
  1077. }
  1078. /*
  1079. * ia64_mca_cpe_int_caller
  1080. *
  1081. * Triggered by sw interrupt from CPE polling routine. Calls
  1082. * real interrupt handler and either triggers a sw interrupt
  1083. * on the next cpu or does cleanup at the end.
  1084. *
  1085. * Inputs
  1086. * interrupt number
  1087. * client data arg ptr
  1088. * saved registers ptr
  1089. * Outputs
  1090. * handled
  1091. */
  1092. #ifdef CONFIG_ACPI
  1093. static irqreturn_t
  1094. ia64_mca_cpe_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
  1095. {
  1096. static int start_count = -1;
  1097. static int poll_time = MIN_CPE_POLL_INTERVAL;
  1098. unsigned int cpuid;
  1099. cpuid = smp_processor_id();
  1100. /* If first cpu, update count */
  1101. if (start_count == -1)
  1102. start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
  1103. ia64_mca_cpe_int_handler(cpe_irq, arg, ptregs);
  1104. for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
  1105. if (cpuid < NR_CPUS) {
  1106. platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
  1107. } else {
  1108. /*
  1109. * If a log was recorded, increase our polling frequency,
  1110. * otherwise, backoff or return to interrupt mode.
  1111. */
  1112. if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) {
  1113. poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2);
  1114. } else if (cpe_vector < 0) {
  1115. poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2);
  1116. } else {
  1117. poll_time = MIN_CPE_POLL_INTERVAL;
  1118. printk(KERN_WARNING "Returning to interrupt driven CPE handler\n");
  1119. enable_irq(local_vector_to_irq(IA64_CPE_VECTOR));
  1120. cpe_poll_enabled = 0;
  1121. }
  1122. if (cpe_poll_enabled)
  1123. mod_timer(&cpe_poll_timer, jiffies + poll_time);
  1124. start_count = -1;
  1125. }
  1126. return IRQ_HANDLED;
  1127. }
  1128. /*
  1129. * ia64_mca_cpe_poll
  1130. *
  1131. * Poll for Corrected Platform Errors (CPEs), trigger interrupt
  1132. * on first cpu, from there it will trickle through all the cpus.
  1133. *
  1134. * Inputs : dummy(unused)
  1135. * Outputs : None
  1136. *
  1137. */
  1138. static void
  1139. ia64_mca_cpe_poll (unsigned long dummy)
  1140. {
  1141. /* Trigger a CPE interrupt cascade */
  1142. platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
  1143. }
  1144. #endif /* CONFIG_ACPI */
  1145. static int
  1146. default_monarch_init_process(struct notifier_block *self, unsigned long val, void *data)
  1147. {
  1148. int c;
  1149. struct task_struct *g, *t;
  1150. if (val != DIE_INIT_MONARCH_PROCESS)
  1151. return NOTIFY_DONE;
  1152. printk(KERN_ERR "Processes interrupted by INIT -");
  1153. for_each_online_cpu(c) {
  1154. struct ia64_sal_os_state *s;
  1155. t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET);
  1156. s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET);
  1157. g = s->prev_task;
  1158. if (g) {
  1159. if (g->pid)
  1160. printk(" %d", g->pid);
  1161. else
  1162. printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g);
  1163. }
  1164. }
  1165. printk("\n\n");
  1166. if (read_trylock(&tasklist_lock)) {
  1167. do_each_thread (g, t) {
  1168. printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
  1169. show_stack(t, NULL);
  1170. } while_each_thread (g, t);
  1171. read_unlock(&tasklist_lock);
  1172. }
  1173. return NOTIFY_DONE;
  1174. }
  1175. /*
  1176. * C portion of the OS INIT handler
  1177. *
  1178. * Called from ia64_os_init_dispatch
  1179. *
  1180. * Inputs: pointer to pt_regs where processor info was saved. SAL/OS state for
  1181. * this event. This code is used for both monarch and slave INIT events, see
  1182. * sos->monarch.
  1183. *
  1184. * All INIT events switch to the INIT stack and change the previous process to
  1185. * blocked status. If one of the INIT events is the monarch then we are
  1186. * probably processing the nmi button/command. Use the monarch cpu to dump all
  1187. * the processes. The slave INIT events all spin until the monarch cpu
  1188. * returns. We can also get INIT slave events for MCA, in which case the MCA
  1189. * process is the monarch.
  1190. */
  1191. void
  1192. ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
  1193. struct ia64_sal_os_state *sos)
  1194. {
  1195. static atomic_t slaves;
  1196. static atomic_t monarchs;
  1197. task_t *previous_current;
  1198. int cpu = smp_processor_id();
  1199. oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */
  1200. console_loglevel = 15; /* make sure printks make it to console */
  1201. printk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
  1202. sos->proc_state_param, cpu, sos->monarch);
  1203. salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0);
  1204. previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "INIT");
  1205. sos->os_status = IA64_INIT_RESUME;
  1206. /* FIXME: Workaround for broken proms that drive all INIT events as
  1207. * slaves. The last slave that enters is promoted to be a monarch.
  1208. * Remove this code in September 2006, that gives platforms a year to
  1209. * fix their proms and get their customers updated.
  1210. */
  1211. if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) {
  1212. printk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n",
  1213. __FUNCTION__, cpu);
  1214. atomic_dec(&slaves);
  1215. sos->monarch = 1;
  1216. }
  1217. /* FIXME: Workaround for broken proms that drive all INIT events as
  1218. * monarchs. Second and subsequent monarchs are demoted to slaves.
  1219. * Remove this code in September 2006, that gives platforms a year to
  1220. * fix their proms and get their customers updated.
  1221. */
  1222. if (sos->monarch && atomic_add_return(1, &monarchs) > 1) {
  1223. printk(KERN_WARNING "%s: Demoting cpu %d to slave.\n",
  1224. __FUNCTION__, cpu);
  1225. atomic_dec(&monarchs);
  1226. sos->monarch = 0;
  1227. }
  1228. if (!sos->monarch) {
  1229. ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
  1230. while (monarch_cpu == -1)
  1231. cpu_relax(); /* spin until monarch enters */
  1232. if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, 0, 0, 0)
  1233. == NOTIFY_STOP)
  1234. ia64_mca_spin(__FUNCTION__);
  1235. if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, 0, 0, 0)
  1236. == NOTIFY_STOP)
  1237. ia64_mca_spin(__FUNCTION__);
  1238. while (monarch_cpu != -1)
  1239. cpu_relax(); /* spin until monarch leaves */
  1240. if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, 0, 0, 0)
  1241. == NOTIFY_STOP)
  1242. ia64_mca_spin(__FUNCTION__);
  1243. printk("Slave on cpu %d returning to normal service.\n", cpu);
  1244. set_curr_task(cpu, previous_current);
  1245. ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
  1246. atomic_dec(&slaves);
  1247. return;
  1248. }
  1249. monarch_cpu = cpu;
  1250. if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, 0, 0, 0)
  1251. == NOTIFY_STOP)
  1252. ia64_mca_spin(__FUNCTION__);
  1253. /*
  1254. * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
  1255. * generated via the BMC's command-line interface, but since the console is on the
  1256. * same serial line, the user will need some time to switch out of the BMC before
  1257. * the dump begins.
  1258. */
  1259. printk("Delaying for 5 seconds...\n");
  1260. udelay(5*1000000);
  1261. ia64_wait_for_slaves(cpu);
  1262. /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through
  1263. * to default_monarch_init_process() above and just print all the
  1264. * tasks.
  1265. */
  1266. if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, 0, 0, 0)
  1267. == NOTIFY_STOP)
  1268. ia64_mca_spin(__FUNCTION__);
  1269. if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, 0, 0, 0)
  1270. == NOTIFY_STOP)
  1271. ia64_mca_spin(__FUNCTION__);
  1272. printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
  1273. atomic_dec(&monarchs);
  1274. set_curr_task(cpu, previous_current);
  1275. monarch_cpu = -1;
  1276. return;
  1277. }
  1278. static int __init
  1279. ia64_mca_disable_cpe_polling(char *str)
  1280. {
  1281. cpe_poll_enabled = 0;
  1282. return 1;
  1283. }
  1284. __setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);
  1285. static struct irqaction cmci_irqaction = {
  1286. .handler = ia64_mca_cmc_int_handler,
  1287. .flags = SA_INTERRUPT,
  1288. .name = "cmc_hndlr"
  1289. };
  1290. static struct irqaction cmcp_irqaction = {
  1291. .handler = ia64_mca_cmc_int_caller,
  1292. .flags = SA_INTERRUPT,
  1293. .name = "cmc_poll"
  1294. };
  1295. static struct irqaction mca_rdzv_irqaction = {
  1296. .handler = ia64_mca_rendez_int_handler,
  1297. .flags = SA_INTERRUPT,
  1298. .name = "mca_rdzv"
  1299. };
  1300. static struct irqaction mca_wkup_irqaction = {
  1301. .handler = ia64_mca_wakeup_int_handler,
  1302. .flags = SA_INTERRUPT,
  1303. .name = "mca_wkup"
  1304. };
  1305. #ifdef CONFIG_ACPI
  1306. static struct irqaction mca_cpe_irqaction = {
  1307. .handler = ia64_mca_cpe_int_handler,
  1308. .flags = SA_INTERRUPT,
  1309. .name = "cpe_hndlr"
  1310. };
  1311. static struct irqaction mca_cpep_irqaction = {
  1312. .handler = ia64_mca_cpe_int_caller,
  1313. .flags = SA_INTERRUPT,
  1314. .name = "cpe_poll"
  1315. };
  1316. #endif /* CONFIG_ACPI */
  1317. /* Minimal format of the MCA/INIT stacks. The pseudo processes that run on
  1318. * these stacks can never sleep, they cannot return from the kernel to user
  1319. * space, they do not appear in a normal ps listing. So there is no need to
  1320. * format most of the fields.
  1321. */
  1322. static void __cpuinit
  1323. format_mca_init_stack(void *mca_data, unsigned long offset,
  1324. const char *type, int cpu)
  1325. {
  1326. struct task_struct *p = (struct task_struct *)((char *)mca_data + offset);
  1327. struct thread_info *ti;
  1328. memset(p, 0, KERNEL_STACK_SIZE);
  1329. ti = task_thread_info(p);
  1330. ti->flags = _TIF_MCA_INIT;
  1331. ti->preempt_count = 1;
  1332. ti->task = p;
  1333. ti->cpu = cpu;
  1334. p->thread_info = ti;
  1335. p->state = TASK_UNINTERRUPTIBLE;
  1336. cpu_set(cpu, p->cpus_allowed);
  1337. INIT_LIST_HEAD(&p->tasks);
  1338. p->parent = p->real_parent = p->group_leader = p;
  1339. INIT_LIST_HEAD(&p->children);
  1340. INIT_LIST_HEAD(&p->sibling);
  1341. strncpy(p->comm, type, sizeof(p->comm)-1);
  1342. }
  1343. /* Do per-CPU MCA-related initialization. */
  1344. void __cpuinit
  1345. ia64_mca_cpu_init(void *cpu_data)
  1346. {
  1347. void *pal_vaddr;
  1348. static int first_time = 1;
  1349. if (first_time) {
  1350. void *mca_data;
  1351. int cpu;
  1352. first_time = 0;
  1353. mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu)
  1354. * NR_CPUS + KERNEL_STACK_SIZE);
  1355. mca_data = (void *)(((unsigned long)mca_data +
  1356. KERNEL_STACK_SIZE - 1) &
  1357. (-KERNEL_STACK_SIZE));
  1358. for (cpu = 0; cpu < NR_CPUS; cpu++) {
  1359. format_mca_init_stack(mca_data,
  1360. offsetof(struct ia64_mca_cpu, mca_stack),
  1361. "MCA", cpu);
  1362. format_mca_init_stack(mca_data,
  1363. offsetof(struct ia64_mca_cpu, init_stack),
  1364. "INIT", cpu);
  1365. __per_cpu_mca[cpu] = __pa(mca_data);
  1366. mca_data += sizeof(struct ia64_mca_cpu);
  1367. }
  1368. }
  1369. /*
  1370. * The MCA info structure was allocated earlier and its
  1371. * physical address saved in __per_cpu_mca[cpu]. Copy that
  1372. * address * to ia64_mca_data so we can access it as a per-CPU
  1373. * variable.
  1374. */
  1375. __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()];
  1376. /*
  1377. * Stash away a copy of the PTE needed to map the per-CPU page.
  1378. * We may need it during MCA recovery.
  1379. */
  1380. __get_cpu_var(ia64_mca_per_cpu_pte) =
  1381. pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL));
  1382. /*
  1383. * Also, stash away a copy of the PAL address and the PTE
  1384. * needed to map it.
  1385. */
  1386. pal_vaddr = efi_get_pal_addr();
  1387. if (!pal_vaddr)
  1388. return;
  1389. __get_cpu_var(ia64_mca_pal_base) =
  1390. GRANULEROUNDDOWN((unsigned long) pal_vaddr);
  1391. __get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr),
  1392. PAGE_KERNEL));
  1393. }
  1394. /*
  1395. * ia64_mca_init
  1396. *
  1397. * Do all the system level mca specific initialization.
  1398. *
  1399. * 1. Register spinloop and wakeup request interrupt vectors
  1400. *
  1401. * 2. Register OS_MCA handler entry point
  1402. *
  1403. * 3. Register OS_INIT handler entry point
  1404. *
  1405. * 4. Initialize MCA/CMC/INIT related log buffers maintained by the OS.
  1406. *
  1407. * Note that this initialization is done very early before some kernel
  1408. * services are available.
  1409. *
  1410. * Inputs : None
  1411. *
  1412. * Outputs : None
  1413. */
  1414. void __init
  1415. ia64_mca_init(void)
  1416. {
  1417. ia64_fptr_t *init_hldlr_ptr_monarch = (ia64_fptr_t *)ia64_os_init_dispatch_monarch;
  1418. ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave;
  1419. ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
  1420. int i;
  1421. s64 rc;
  1422. struct ia64_sal_retval isrv;
  1423. u64 timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */
  1424. static struct notifier_block default_init_monarch_nb = {
  1425. .notifier_call = default_monarch_init_process,
  1426. .priority = 0/* we need to notified last */
  1427. };
  1428. IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
  1429. /* Clear the Rendez checkin flag for all cpus */
  1430. for(i = 0 ; i < NR_CPUS; i++)
  1431. ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
  1432. /*
  1433. * Register the rendezvous spinloop and wakeup mechanism with SAL
  1434. */
  1435. /* Register the rendezvous interrupt vector with SAL */
  1436. while (1) {
  1437. isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT,
  1438. SAL_MC_PARAM_MECHANISM_INT,
  1439. IA64_MCA_RENDEZ_VECTOR,
  1440. timeout,
  1441. SAL_MC_PARAM_RZ_ALWAYS);
  1442. rc = isrv.status;
  1443. if (rc == 0)
  1444. break;
  1445. if (rc == -2) {
  1446. printk(KERN_INFO "Increasing MCA rendezvous timeout from "
  1447. "%ld to %ld milliseconds\n", timeout, isrv.v0);
  1448. timeout = isrv.v0;
  1449. continue;
  1450. }
  1451. printk(KERN_ERR "Failed to register rendezvous interrupt "
  1452. "with SAL (status %ld)\n", rc);
  1453. return;
  1454. }
  1455. /* Register the wakeup interrupt vector with SAL */
  1456. isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP,
  1457. SAL_MC_PARAM_MECHANISM_INT,
  1458. IA64_MCA_WAKEUP_VECTOR,
  1459. 0, 0);
  1460. rc = isrv.status;
  1461. if (rc) {
  1462. printk(KERN_ERR "Failed to register wakeup interrupt with SAL "
  1463. "(status %ld)\n", rc);
  1464. return;
  1465. }
  1466. IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__);
  1467. ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp);
  1468. /*
  1469. * XXX - disable SAL checksum by setting size to 0; should be
  1470. * ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
  1471. */
  1472. ia64_mc_info.imi_mca_handler_size = 0;
  1473. /* Register the os mca handler with SAL */
  1474. if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA,
  1475. ia64_mc_info.imi_mca_handler,
  1476. ia64_tpa(mca_hldlr_ptr->gp),
  1477. ia64_mc_info.imi_mca_handler_size,
  1478. 0, 0, 0)))
  1479. {
  1480. printk(KERN_ERR "Failed to register OS MCA handler with SAL "
  1481. "(status %ld)\n", rc);
  1482. return;
  1483. }
  1484. IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__,
  1485. ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
  1486. /*
  1487. * XXX - disable SAL checksum by setting size to 0, should be
  1488. * size of the actual init handler in mca_asm.S.
  1489. */
  1490. ia64_mc_info.imi_monarch_init_handler = ia64_tpa(init_hldlr_ptr_monarch->fp);
  1491. ia64_mc_info.imi_monarch_init_handler_size = 0;
  1492. ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp);
  1493. ia64_mc_info.imi_slave_init_handler_size = 0;
  1494. IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__,
  1495. ia64_mc_info.imi_monarch_init_handler);
  1496. /* Register the os init handler with SAL */
  1497. if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
  1498. ia64_mc_info.imi_monarch_init_handler,
  1499. ia64_tpa(ia64_getreg(_IA64_REG_GP)),
  1500. ia64_mc_info.imi_monarch_init_handler_size,
  1501. ia64_mc_info.imi_slave_init_handler,
  1502. ia64_tpa(ia64_getreg(_IA64_REG_GP)),
  1503. ia64_mc_info.imi_slave_init_handler_size)))
  1504. {
  1505. printk(KERN_ERR "Failed to register m/s INIT handlers with SAL "
  1506. "(status %ld)\n", rc);
  1507. return;
  1508. }
  1509. if (register_die_notifier(&default_init_monarch_nb)) {
  1510. printk(KERN_ERR "Failed to register default monarch INIT process\n");
  1511. return;
  1512. }
  1513. IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__);
  1514. /*
  1515. * Configure the CMCI/P vector and handler. Interrupts for CMC are
  1516. * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
  1517. */
  1518. register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
  1519. register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
  1520. ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
  1521. /* Setup the MCA rendezvous interrupt vector */
  1522. register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
  1523. /* Setup the MCA wakeup interrupt vector */
  1524. register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
  1525. #ifdef CONFIG_ACPI
  1526. /* Setup the CPEI/P handler */
  1527. register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
  1528. #endif
  1529. /* Initialize the areas set aside by the OS to buffer the
  1530. * platform/processor error states for MCA/INIT/CMC
  1531. * handling.
  1532. */
  1533. ia64_log_init(SAL_INFO_TYPE_MCA);
  1534. ia64_log_init(SAL_INFO_TYPE_INIT);
  1535. ia64_log_init(SAL_INFO_TYPE_CMC);
  1536. ia64_log_init(SAL_INFO_TYPE_CPE);
  1537. mca_init = 1;
  1538. printk(KERN_INFO "MCA related initialization done\n");
  1539. }
  1540. /*
  1541. * ia64_mca_late_init
  1542. *
  1543. * Opportunity to setup things that require initialization later
  1544. * than ia64_mca_init. Setup a timer to poll for CPEs if the
  1545. * platform doesn't support an interrupt driven mechanism.
  1546. *
  1547. * Inputs : None
  1548. * Outputs : Status
  1549. */
  1550. static int __init
  1551. ia64_mca_late_init(void)
  1552. {
  1553. if (!mca_init)
  1554. return 0;
  1555. /* Setup the CMCI/P vector and handler */
  1556. init_timer(&cmc_poll_timer);
  1557. cmc_poll_timer.function = ia64_mca_cmc_poll;
  1558. /* Unmask/enable the vector */
  1559. cmc_polling_enabled = 0;
  1560. schedule_work(&cmc_enable_work);
  1561. IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);
  1562. #ifdef CONFIG_ACPI
  1563. /* Setup the CPEI/P vector and handler */
  1564. cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
  1565. init_timer(&cpe_poll_timer);
  1566. cpe_poll_timer.function = ia64_mca_cpe_poll;
  1567. {
  1568. irq_desc_t *desc;
  1569. unsigned int irq;
  1570. if (cpe_vector >= 0) {
  1571. /* If platform supports CPEI, enable the irq. */
  1572. cpe_poll_enabled = 0;
  1573. for (irq = 0; irq < NR_IRQS; ++irq)
  1574. if (irq_to_vector(irq) == cpe_vector) {
  1575. desc = irq_descp(irq);
  1576. desc->status |= IRQ_PER_CPU;
  1577. setup_irq(irq, &mca_cpe_irqaction);
  1578. ia64_cpe_irq = irq;
  1579. }
  1580. ia64_mca_register_cpev(cpe_vector);
  1581. IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__);
  1582. } else {
  1583. /* If platform doesn't support CPEI, get the timer going. */
  1584. if (cpe_poll_enabled) {
  1585. ia64_mca_cpe_poll(0UL);
  1586. IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__);
  1587. }
  1588. }
  1589. }
  1590. #endif
  1591. return 0;
  1592. }
  1593. device_initcall(ia64_mca_late_init);