mca_drv.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636
  1. /*
  2. * File: mca_drv.c
  3. * Purpose: Generic MCA handling layer
  4. *
  5. * Copyright (C) 2004 FUJITSU LIMITED
  6. * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com)
  7. * Copyright (C) 2005 Silicon Graphics, Inc
  8. * Copyright (C) 2005 Keith Owens <kaos@sgi.com>
  9. */
  10. #include <linux/config.h>
  11. #include <linux/types.h>
  12. #include <linux/init.h>
  13. #include <linux/sched.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/irq.h>
  16. #include <linux/kallsyms.h>
  17. #include <linux/smp_lock.h>
  18. #include <linux/bootmem.h>
  19. #include <linux/acpi.h>
  20. #include <linux/timer.h>
  21. #include <linux/module.h>
  22. #include <linux/kernel.h>
  23. #include <linux/smp.h>
  24. #include <linux/workqueue.h>
  25. #include <linux/mm.h>
  26. #include <asm/delay.h>
  27. #include <asm/machvec.h>
  28. #include <asm/page.h>
  29. #include <asm/ptrace.h>
  30. #include <asm/system.h>
  31. #include <asm/sal.h>
  32. #include <asm/mca.h>
  33. #include <asm/irq.h>
  34. #include <asm/hw_irq.h>
  35. #include "mca_drv.h"
  36. /* max size of SAL error record (default) */
  37. static int sal_rec_max = 10000;
  38. /* from mca_drv_asm.S */
  39. extern void *mca_handler_bhhook(void);
  40. static DEFINE_SPINLOCK(mca_bh_lock);
  41. typedef enum {
  42. MCA_IS_LOCAL = 0,
  43. MCA_IS_GLOBAL = 1
  44. } mca_type_t;
  45. #define MAX_PAGE_ISOLATE 1024
  46. static struct page *page_isolate[MAX_PAGE_ISOLATE];
  47. static int num_page_isolate = 0;
  48. typedef enum {
  49. ISOLATE_NG = 0,
  50. ISOLATE_OK = 1
  51. } isolate_status_t;
  52. /*
  53. * This pool keeps pointers to the section part of SAL error record
  54. */
  55. static struct {
  56. slidx_list_t *buffer; /* section pointer list pool */
  57. int cur_idx; /* Current index of section pointer list pool */
  58. int max_idx; /* Maximum index of section pointer list pool */
  59. } slidx_pool;
  60. /**
  61. * mca_page_isolate - isolate a poisoned page in order not to use it later
  62. * @paddr: poisoned memory location
  63. *
  64. * Return value:
  65. * ISOLATE_OK / ISOLATE_NG
  66. */
  67. static isolate_status_t
  68. mca_page_isolate(unsigned long paddr)
  69. {
  70. int i;
  71. struct page *p;
  72. /* whether physical address is valid or not */
  73. if ( !ia64_phys_addr_valid(paddr) )
  74. return ISOLATE_NG;
  75. /* convert physical address to physical page number */
  76. p = pfn_to_page(paddr>>PAGE_SHIFT);
  77. /* check whether a page number have been already registered or not */
  78. for( i = 0; i < num_page_isolate; i++ )
  79. if( page_isolate[i] == p )
  80. return ISOLATE_OK; /* already listed */
  81. /* limitation check */
  82. if( num_page_isolate == MAX_PAGE_ISOLATE )
  83. return ISOLATE_NG;
  84. /* kick pages having attribute 'SLAB' or 'Reserved' */
  85. if( PageSlab(p) || PageReserved(p) )
  86. return ISOLATE_NG;
  87. /* add attribute 'Reserved' and register the page */
  88. SetPageReserved(p);
  89. page_isolate[num_page_isolate++] = p;
  90. return ISOLATE_OK;
  91. }
  92. /**
  93. * mca_hanlder_bh - Kill the process which occurred memory read error
  94. * @paddr: poisoned address received from MCA Handler
  95. */
  96. void
  97. mca_handler_bh(unsigned long paddr)
  98. {
  99. printk(KERN_DEBUG "OS_MCA: process [pid: %d](%s) encounters MCA.\n",
  100. current->pid, current->comm);
  101. spin_lock(&mca_bh_lock);
  102. if (mca_page_isolate(paddr) == ISOLATE_OK) {
  103. printk(KERN_DEBUG "Page isolation: ( %lx ) success.\n", paddr);
  104. } else {
  105. printk(KERN_DEBUG "Page isolation: ( %lx ) failure.\n", paddr);
  106. }
  107. spin_unlock(&mca_bh_lock);
  108. /* This process is about to be killed itself */
  109. do_exit(SIGKILL);
  110. }
  111. /**
  112. * mca_make_peidx - Make index of processor error section
  113. * @slpi: pointer to record of processor error section
  114. * @peidx: pointer to index of processor error section
  115. */
  116. static void
  117. mca_make_peidx(sal_log_processor_info_t *slpi, peidx_table_t *peidx)
  118. {
  119. /*
  120. * calculate the start address of
  121. * "struct cpuid_info" and "sal_processor_static_info_t".
  122. */
  123. u64 total_check_num = slpi->valid.num_cache_check
  124. + slpi->valid.num_tlb_check
  125. + slpi->valid.num_bus_check
  126. + slpi->valid.num_reg_file_check
  127. + slpi->valid.num_ms_check;
  128. u64 head_size = sizeof(sal_log_mod_error_info_t) * total_check_num
  129. + sizeof(sal_log_processor_info_t);
  130. u64 mid_size = slpi->valid.cpuid_info * sizeof(struct sal_cpuid_info);
  131. peidx_head(peidx) = slpi;
  132. peidx_mid(peidx) = (struct sal_cpuid_info *)
  133. (slpi->valid.cpuid_info ? ((char*)slpi + head_size) : NULL);
  134. peidx_bottom(peidx) = (sal_processor_static_info_t *)
  135. (slpi->valid.psi_static_struct ?
  136. ((char*)slpi + head_size + mid_size) : NULL);
  137. }
  138. /**
  139. * mca_make_slidx - Make index of SAL error record
  140. * @buffer: pointer to SAL error record
  141. * @slidx: pointer to index of SAL error record
  142. *
  143. * Return value:
  144. * 1 if record has platform error / 0 if not
  145. */
  146. #define LOG_INDEX_ADD_SECT_PTR(sect, ptr) \
  147. { slidx_list_t *hl = &slidx_pool.buffer[slidx_pool.cur_idx]; \
  148. hl->hdr = ptr; \
  149. list_add(&hl->list, &(sect)); \
  150. slidx_pool.cur_idx = (slidx_pool.cur_idx + 1)%slidx_pool.max_idx; }
  151. static int
  152. mca_make_slidx(void *buffer, slidx_table_t *slidx)
  153. {
  154. int platform_err = 0;
  155. int record_len = ((sal_log_record_header_t*)buffer)->len;
  156. u32 ercd_pos;
  157. int sects;
  158. sal_log_section_hdr_t *sp;
  159. /*
  160. * Initialize index referring current record
  161. */
  162. INIT_LIST_HEAD(&(slidx->proc_err));
  163. INIT_LIST_HEAD(&(slidx->mem_dev_err));
  164. INIT_LIST_HEAD(&(slidx->sel_dev_err));
  165. INIT_LIST_HEAD(&(slidx->pci_bus_err));
  166. INIT_LIST_HEAD(&(slidx->smbios_dev_err));
  167. INIT_LIST_HEAD(&(slidx->pci_comp_err));
  168. INIT_LIST_HEAD(&(slidx->plat_specific_err));
  169. INIT_LIST_HEAD(&(slidx->host_ctlr_err));
  170. INIT_LIST_HEAD(&(slidx->plat_bus_err));
  171. INIT_LIST_HEAD(&(slidx->unsupported));
  172. /*
  173. * Extract a Record Header
  174. */
  175. slidx->header = buffer;
  176. /*
  177. * Extract each section records
  178. * (arranged from "int ia64_log_platform_info_print()")
  179. */
  180. for (ercd_pos = sizeof(sal_log_record_header_t), sects = 0;
  181. ercd_pos < record_len; ercd_pos += sp->len, sects++) {
  182. sp = (sal_log_section_hdr_t *)((char*)buffer + ercd_pos);
  183. if (!efi_guidcmp(sp->guid, SAL_PROC_DEV_ERR_SECT_GUID)) {
  184. LOG_INDEX_ADD_SECT_PTR(slidx->proc_err, sp);
  185. } else if (!efi_guidcmp(sp->guid, SAL_PLAT_MEM_DEV_ERR_SECT_GUID)) {
  186. platform_err = 1;
  187. LOG_INDEX_ADD_SECT_PTR(slidx->mem_dev_err, sp);
  188. } else if (!efi_guidcmp(sp->guid, SAL_PLAT_SEL_DEV_ERR_SECT_GUID)) {
  189. platform_err = 1;
  190. LOG_INDEX_ADD_SECT_PTR(slidx->sel_dev_err, sp);
  191. } else if (!efi_guidcmp(sp->guid, SAL_PLAT_PCI_BUS_ERR_SECT_GUID)) {
  192. platform_err = 1;
  193. LOG_INDEX_ADD_SECT_PTR(slidx->pci_bus_err, sp);
  194. } else if (!efi_guidcmp(sp->guid, SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID)) {
  195. platform_err = 1;
  196. LOG_INDEX_ADD_SECT_PTR(slidx->smbios_dev_err, sp);
  197. } else if (!efi_guidcmp(sp->guid, SAL_PLAT_PCI_COMP_ERR_SECT_GUID)) {
  198. platform_err = 1;
  199. LOG_INDEX_ADD_SECT_PTR(slidx->pci_comp_err, sp);
  200. } else if (!efi_guidcmp(sp->guid, SAL_PLAT_SPECIFIC_ERR_SECT_GUID)) {
  201. platform_err = 1;
  202. LOG_INDEX_ADD_SECT_PTR(slidx->plat_specific_err, sp);
  203. } else if (!efi_guidcmp(sp->guid, SAL_PLAT_HOST_CTLR_ERR_SECT_GUID)) {
  204. platform_err = 1;
  205. LOG_INDEX_ADD_SECT_PTR(slidx->host_ctlr_err, sp);
  206. } else if (!efi_guidcmp(sp->guid, SAL_PLAT_BUS_ERR_SECT_GUID)) {
  207. platform_err = 1;
  208. LOG_INDEX_ADD_SECT_PTR(slidx->plat_bus_err, sp);
  209. } else {
  210. LOG_INDEX_ADD_SECT_PTR(slidx->unsupported, sp);
  211. }
  212. }
  213. slidx->n_sections = sects;
  214. return platform_err;
  215. }
  216. /**
  217. * init_record_index_pools - Initialize pool of lists for SAL record index
  218. *
  219. * Return value:
  220. * 0 on Success / -ENOMEM on Failure
  221. */
  222. static int
  223. init_record_index_pools(void)
  224. {
  225. int i;
  226. int rec_max_size; /* Maximum size of SAL error records */
  227. int sect_min_size; /* Minimum size of SAL error sections */
  228. /* minimum size table of each section */
  229. static int sal_log_sect_min_sizes[] = {
  230. sizeof(sal_log_processor_info_t) + sizeof(sal_processor_static_info_t),
  231. sizeof(sal_log_mem_dev_err_info_t),
  232. sizeof(sal_log_sel_dev_err_info_t),
  233. sizeof(sal_log_pci_bus_err_info_t),
  234. sizeof(sal_log_smbios_dev_err_info_t),
  235. sizeof(sal_log_pci_comp_err_info_t),
  236. sizeof(sal_log_plat_specific_err_info_t),
  237. sizeof(sal_log_host_ctlr_err_info_t),
  238. sizeof(sal_log_plat_bus_err_info_t),
  239. };
  240. /*
  241. * MCA handler cannot allocate new memory on flight,
  242. * so we preallocate enough memory to handle a SAL record.
  243. *
  244. * Initialize a handling set of slidx_pool:
  245. * 1. Pick up the max size of SAL error records
  246. * 2. Pick up the min size of SAL error sections
  247. * 3. Allocate the pool as enough to 2 SAL records
  248. * (now we can estimate the maxinum of section in a record.)
  249. */
  250. /* - 1 - */
  251. rec_max_size = sal_rec_max;
  252. /* - 2 - */
  253. sect_min_size = sal_log_sect_min_sizes[0];
  254. for (i = 1; i < sizeof sal_log_sect_min_sizes/sizeof(size_t); i++)
  255. if (sect_min_size > sal_log_sect_min_sizes[i])
  256. sect_min_size = sal_log_sect_min_sizes[i];
  257. /* - 3 - */
  258. slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1;
  259. slidx_pool.buffer = (slidx_list_t *) kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL);
  260. return slidx_pool.buffer ? 0 : -ENOMEM;
  261. }
  262. /*****************************************************************************
  263. * Recovery functions *
  264. *****************************************************************************/
  265. /**
  266. * is_mca_global - Check whether this MCA is global or not
  267. * @peidx: pointer of index of processor error section
  268. * @pbci: pointer to pal_bus_check_info_t
  269. *
  270. * Return value:
  271. * MCA_IS_LOCAL / MCA_IS_GLOBAL
  272. */
  273. static mca_type_t
  274. is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci,
  275. struct ia64_sal_os_state *sos)
  276. {
  277. pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx);
  278. /*
  279. * PAL can request a rendezvous, if the MCA has a global scope.
  280. * If "rz_always" flag is set, SAL requests MCA rendezvous
  281. * in spite of global MCA.
  282. * Therefore it is local MCA when rendezvous has not been requested.
  283. * Failed to rendezvous, the system must be down.
  284. */
  285. switch (sos->rv_rc) {
  286. case -1: /* SAL rendezvous unsuccessful */
  287. return MCA_IS_GLOBAL;
  288. case 0: /* SAL rendezvous not required */
  289. return MCA_IS_LOCAL;
  290. case 1: /* SAL rendezvous successful int */
  291. case 2: /* SAL rendezvous successful int with init */
  292. default:
  293. break;
  294. }
  295. /*
  296. * If One or more Cache/TLB/Reg_File/Uarch_Check is here,
  297. * it would be a local MCA. (i.e. processor internal error)
  298. */
  299. if (psp->tc || psp->cc || psp->rc || psp->uc)
  300. return MCA_IS_LOCAL;
  301. /*
  302. * Bus_Check structure with Bus_Check.ib (internal bus error) flag set
  303. * would be a global MCA. (e.g. a system bus address parity error)
  304. */
  305. if (!pbci || pbci->ib)
  306. return MCA_IS_GLOBAL;
  307. /*
  308. * Bus_Check structure with Bus_Check.eb (external bus error) flag set
  309. * could be either a local MCA or a global MCA.
  310. *
  311. * Referring Bus_Check.bsi:
  312. * 0: Unknown/unclassified
  313. * 1: BERR#
  314. * 2: BINIT#
  315. * 3: Hard Fail
  316. * (FIXME: Are these SGI specific or generic bsi values?)
  317. */
  318. if (pbci->eb)
  319. switch (pbci->bsi) {
  320. case 0:
  321. /* e.g. a load from poisoned memory */
  322. return MCA_IS_LOCAL;
  323. case 1:
  324. case 2:
  325. case 3:
  326. return MCA_IS_GLOBAL;
  327. }
  328. return MCA_IS_GLOBAL;
  329. }
  330. /**
  331. * recover_from_read_error - Try to recover the errors which type are "read"s.
  332. * @slidx: pointer of index of SAL error record
  333. * @peidx: pointer of index of processor error section
  334. * @pbci: pointer of pal_bus_check_info
  335. *
  336. * Return value:
  337. * 1 on Success / 0 on Failure
  338. */
  339. static int
  340. recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci,
  341. struct ia64_sal_os_state *sos)
  342. {
  343. sal_log_mod_error_info_t *smei;
  344. pal_min_state_area_t *pmsa;
  345. struct ia64_psr *psr1, *psr2;
  346. ia64_fptr_t *mca_hdlr_bh = (ia64_fptr_t*)mca_handler_bhhook;
  347. /* Is target address valid? */
  348. if (!pbci->tv)
  349. return 0;
  350. /*
  351. * cpu read or memory-mapped io read
  352. *
  353. * offending process affected process OS MCA do
  354. * kernel mode kernel mode down system
  355. * kernel mode user mode kill the process
  356. * user mode kernel mode down system (*)
  357. * user mode user mode kill the process
  358. *
  359. * (*) You could terminate offending user-mode process
  360. * if (pbci->pv && pbci->pl != 0) *and* if you sure
  361. * the process not have any locks of kernel.
  362. */
  363. psr1 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_ipsr);
  364. /*
  365. * Check the privilege level of interrupted context.
  366. * If it is user-mode, then terminate affected process.
  367. */
  368. if (psr1->cpl != 0) {
  369. smei = peidx_bus_check(peidx, 0);
  370. if (smei->valid.target_identifier) {
  371. /*
  372. * setup for resume to bottom half of MCA,
  373. * "mca_handler_bhhook"
  374. */
  375. pmsa = sos->pal_min_state;
  376. /* pass to bhhook as 1st argument (gr8) */
  377. pmsa->pmsa_gr[8-1] = smei->target_identifier;
  378. /* set interrupted return address (but no use) */
  379. pmsa->pmsa_br0 = pmsa->pmsa_iip;
  380. /* change resume address to bottom half */
  381. pmsa->pmsa_iip = mca_hdlr_bh->fp;
  382. pmsa->pmsa_gr[1-1] = mca_hdlr_bh->gp;
  383. /* set cpl with kernel mode */
  384. psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
  385. psr2->cpl = 0;
  386. psr2->ri = 0;
  387. psr2->i = 0;
  388. return 1;
  389. }
  390. }
  391. return 0;
  392. }
  393. /**
  394. * recover_from_platform_error - Recover from platform error.
  395. * @slidx: pointer of index of SAL error record
  396. * @peidx: pointer of index of processor error section
  397. * @pbci: pointer of pal_bus_check_info
  398. *
  399. * Return value:
  400. * 1 on Success / 0 on Failure
  401. */
  402. static int
  403. recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci,
  404. struct ia64_sal_os_state *sos)
  405. {
  406. int status = 0;
  407. pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx);
  408. if (psp->bc && pbci->eb && pbci->bsi == 0) {
  409. switch(pbci->type) {
  410. case 1: /* partial read */
  411. case 3: /* full line(cpu) read */
  412. case 9: /* I/O space read */
  413. status = recover_from_read_error(slidx, peidx, pbci, sos);
  414. break;
  415. case 0: /* unknown */
  416. case 2: /* partial write */
  417. case 4: /* full line write */
  418. case 5: /* implicit or explicit write-back operation */
  419. case 6: /* snoop probe */
  420. case 7: /* incoming or outgoing ptc.g */
  421. case 8: /* write coalescing transactions */
  422. case 10: /* I/O space write */
  423. case 11: /* inter-processor interrupt message(IPI) */
  424. case 12: /* interrupt acknowledge or external task priority cycle */
  425. default:
  426. break;
  427. }
  428. }
  429. return status;
  430. }
  431. /**
  432. * recover_from_processor_error
  433. * @platform: whether there are some platform error section or not
  434. * @slidx: pointer of index of SAL error record
  435. * @peidx: pointer of index of processor error section
  436. * @pbci: pointer of pal_bus_check_info
  437. *
  438. * Return value:
  439. * 1 on Success / 0 on Failure
  440. */
  441. /*
  442. * Later we try to recover when below all conditions are satisfied.
  443. * 1. Only one processor error section is exist.
  444. * 2. BUS_CHECK is exist and the others are not exist.(Except TLB_CHECK)
  445. * 3. The entry of BUS_CHECK_INFO is 1.
  446. * 4. "External bus error" flag is set and the others are not set.
  447. */
  448. static int
  449. recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci,
  450. struct ia64_sal_os_state *sos)
  451. {
  452. pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx);
  453. /*
  454. * We cannot recover errors with other than bus_check.
  455. */
  456. if (psp->cc || psp->rc || psp->uc)
  457. return 0;
  458. /*
  459. * If there is no bus error, record is weird but we need not to recover.
  460. */
  461. if (psp->bc == 0 || pbci == NULL)
  462. return 1;
  463. /*
  464. * Sorry, we cannot handle so many.
  465. */
  466. if (peidx_bus_check_num(peidx) > 1)
  467. return 0;
  468. /*
  469. * Well, here is only one bus error.
  470. */
  471. if (pbci->ib || pbci->cc)
  472. return 0;
  473. if (pbci->eb && pbci->bsi > 0)
  474. return 0;
  475. if (psp->ci == 0)
  476. return 0;
  477. /*
  478. * This is a local MCA and estimated as recoverble external bus error.
  479. * (e.g. a load from poisoned memory)
  480. * This means "there are some platform errors".
  481. */
  482. if (platform)
  483. return recover_from_platform_error(slidx, peidx, pbci, sos);
  484. /*
  485. * On account of strange SAL error record, we cannot recover.
  486. */
  487. return 0;
  488. }
  489. /**
  490. * mca_try_to_recover - Try to recover from MCA
  491. * @rec: pointer to a SAL error record
  492. *
  493. * Return value:
  494. * 1 on Success / 0 on Failure
  495. */
  496. static int
  497. mca_try_to_recover(void *rec,
  498. struct ia64_sal_os_state *sos)
  499. {
  500. int platform_err;
  501. int n_proc_err;
  502. slidx_table_t slidx;
  503. peidx_table_t peidx;
  504. pal_bus_check_info_t pbci;
  505. /* Make index of SAL error record */
  506. platform_err = mca_make_slidx(rec, &slidx);
  507. /* Count processor error sections */
  508. n_proc_err = slidx_count(&slidx, proc_err);
  509. /* Now, OS can recover when there is one processor error section */
  510. if (n_proc_err > 1)
  511. return 0;
  512. else if (n_proc_err == 0) {
  513. /* Weird SAL record ... We need not to recover */
  514. return 1;
  515. }
  516. /* Make index of processor error section */
  517. mca_make_peidx((sal_log_processor_info_t*)slidx_first_entry(&slidx.proc_err)->hdr, &peidx);
  518. /* Extract Processor BUS_CHECK[0] */
  519. *((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0);
  520. /* Check whether MCA is global or not */
  521. if (is_mca_global(&peidx, &pbci, sos))
  522. return 0;
  523. /* Try to recover a processor error */
  524. return recover_from_processor_error(platform_err, &slidx, &peidx, &pbci, sos);
  525. }
  526. /*
  527. * =============================================================================
  528. */
  529. int __init mca_external_handler_init(void)
  530. {
  531. if (init_record_index_pools())
  532. return -ENOMEM;
  533. /* register external mca handlers */
  534. if (ia64_reg_MCA_extension(mca_try_to_recover)){
  535. printk(KERN_ERR "ia64_reg_MCA_extension failed.\n");
  536. kfree(slidx_pool.buffer);
  537. return -EFAULT;
  538. }
  539. return 0;
  540. }
  541. void __exit mca_external_handler_exit(void)
  542. {
  543. /* unregister external mca handlers */
  544. ia64_unreg_MCA_extension();
  545. kfree(slidx_pool.buffer);
  546. }
  547. module_init(mca_external_handler_init);
  548. module_exit(mca_external_handler_exit);
  549. module_param(sal_rec_max, int, 0644);
  550. MODULE_PARM_DESC(sal_rec_max, "Max size of SAL error record");
  551. MODULE_DESCRIPTION("ia64 platform dependent mca handler driver");
  552. MODULE_LICENSE("GPL");