ras.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353
  1. /*
  2. * ras.c
  3. * Copyright (C) 2001 Dave Engebretsen IBM Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. /* Change Activity:
  20. * 2001/09/21 : engebret : Created with minimal EPOW and HW exception support.
  21. * End Change Activity
  22. */
  23. #include <linux/errno.h>
  24. #include <linux/threads.h>
  25. #include <linux/kernel_stat.h>
  26. #include <linux/signal.h>
  27. #include <linux/sched.h>
  28. #include <linux/ioport.h>
  29. #include <linux/interrupt.h>
  30. #include <linux/timex.h>
  31. #include <linux/init.h>
  32. #include <linux/slab.h>
  33. #include <linux/pci.h>
  34. #include <linux/delay.h>
  35. #include <linux/irq.h>
  36. #include <linux/random.h>
  37. #include <linux/sysrq.h>
  38. #include <linux/bitops.h>
  39. #include <asm/uaccess.h>
  40. #include <asm/system.h>
  41. #include <asm/io.h>
  42. #include <asm/pgtable.h>
  43. #include <asm/irq.h>
  44. #include <asm/cache.h>
  45. #include <asm/prom.h>
  46. #include <asm/ptrace.h>
  47. #include <asm/machdep.h>
  48. #include <asm/rtas.h>
  49. #include <asm/ppcdebug.h>
  50. static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX];
  51. static DEFINE_SPINLOCK(ras_log_buf_lock);
  52. char mce_data_buf[RTAS_ERROR_LOG_MAX]
  53. ;
  54. /* This is true if we are using the firmware NMI handler (typically LPAR) */
  55. extern int fwnmi_active;
  56. static int ras_get_sensor_state_token;
  57. static int ras_check_exception_token;
  58. #define EPOW_SENSOR_TOKEN 9
  59. #define EPOW_SENSOR_INDEX 0
  60. #define RAS_VECTOR_OFFSET 0x500
  61. static irqreturn_t ras_epow_interrupt(int irq, void *dev_id,
  62. struct pt_regs * regs);
  63. static irqreturn_t ras_error_interrupt(int irq, void *dev_id,
  64. struct pt_regs * regs);
  65. /* #define DEBUG */
  66. static void request_ras_irqs(struct device_node *np, char *propname,
  67. irqreturn_t (*handler)(int, void *, struct pt_regs *),
  68. const char *name)
  69. {
  70. unsigned int *ireg, len, i;
  71. int virq, n_intr;
  72. ireg = (unsigned int *)get_property(np, propname, &len);
  73. if (ireg == NULL)
  74. return;
  75. n_intr = prom_n_intr_cells(np);
  76. len /= n_intr * sizeof(*ireg);
  77. for (i = 0; i < len; i++) {
  78. virq = virt_irq_create_mapping(*ireg);
  79. if (virq == NO_IRQ) {
  80. printk(KERN_ERR "Unable to allocate interrupt "
  81. "number for %s\n", np->full_name);
  82. return;
  83. }
  84. if (request_irq(irq_offset_up(virq), handler, 0, name, NULL)) {
  85. printk(KERN_ERR "Unable to request interrupt %d for "
  86. "%s\n", irq_offset_up(virq), np->full_name);
  87. return;
  88. }
  89. ireg += n_intr;
  90. }
  91. }
  92. /*
  93. * Initialize handlers for the set of interrupts caused by hardware errors
  94. * and power system events.
  95. */
  96. static int __init init_ras_IRQ(void)
  97. {
  98. struct device_node *np;
  99. ras_get_sensor_state_token = rtas_token("get-sensor-state");
  100. ras_check_exception_token = rtas_token("check-exception");
  101. /* Internal Errors */
  102. np = of_find_node_by_path("/event-sources/internal-errors");
  103. if (np != NULL) {
  104. request_ras_irqs(np, "open-pic-interrupt", ras_error_interrupt,
  105. "RAS_ERROR");
  106. request_ras_irqs(np, "interrupts", ras_error_interrupt,
  107. "RAS_ERROR");
  108. of_node_put(np);
  109. }
  110. /* EPOW Events */
  111. np = of_find_node_by_path("/event-sources/epow-events");
  112. if (np != NULL) {
  113. request_ras_irqs(np, "open-pic-interrupt", ras_epow_interrupt,
  114. "RAS_EPOW");
  115. request_ras_irqs(np, "interrupts", ras_epow_interrupt,
  116. "RAS_EPOW");
  117. of_node_put(np);
  118. }
  119. return 1;
  120. }
  121. __initcall(init_ras_IRQ);
  122. /*
  123. * Handle power subsystem events (EPOW).
  124. *
  125. * Presently we just log the event has occurred. This should be fixed
  126. * to examine the type of power failure and take appropriate action where
  127. * the time horizon permits something useful to be done.
  128. */
  129. static irqreturn_t
  130. ras_epow_interrupt(int irq, void *dev_id, struct pt_regs * regs)
  131. {
  132. int status = 0xdeadbeef;
  133. int state = 0;
  134. int critical;
  135. status = rtas_call(ras_get_sensor_state_token, 2, 2, &state,
  136. EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX);
  137. if (state > 3)
  138. critical = 1; /* Time Critical */
  139. else
  140. critical = 0;
  141. spin_lock(&ras_log_buf_lock);
  142. status = rtas_call(ras_check_exception_token, 6, 1, NULL,
  143. RAS_VECTOR_OFFSET,
  144. virt_irq_to_real(irq_offset_down(irq)),
  145. RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS,
  146. critical, __pa(&ras_log_buf),
  147. rtas_get_error_log_max());
  148. udbg_printf("EPOW <0x%lx 0x%x 0x%x>\n",
  149. *((unsigned long *)&ras_log_buf), status, state);
  150. printk(KERN_WARNING "EPOW <0x%lx 0x%x 0x%x>\n",
  151. *((unsigned long *)&ras_log_buf), status, state);
  152. /* format and print the extended information */
  153. log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0);
  154. spin_unlock(&ras_log_buf_lock);
  155. return IRQ_HANDLED;
  156. }
  157. /*
  158. * Handle hardware error interrupts.
  159. *
  160. * RTAS check-exception is called to collect data on the exception. If
  161. * the error is deemed recoverable, we log a warning and return.
  162. * For nonrecoverable errors, an error is logged and we stop all processing
  163. * as quickly as possible in order to prevent propagation of the failure.
  164. */
  165. static irqreturn_t
  166. ras_error_interrupt(int irq, void *dev_id, struct pt_regs * regs)
  167. {
  168. struct rtas_error_log *rtas_elog;
  169. int status = 0xdeadbeef;
  170. int fatal;
  171. spin_lock(&ras_log_buf_lock);
  172. status = rtas_call(ras_check_exception_token, 6, 1, NULL,
  173. RAS_VECTOR_OFFSET,
  174. virt_irq_to_real(irq_offset_down(irq)),
  175. RTAS_INTERNAL_ERROR, 1 /*Time Critical */,
  176. __pa(&ras_log_buf),
  177. rtas_get_error_log_max());
  178. rtas_elog = (struct rtas_error_log *)ras_log_buf;
  179. if ((status == 0) && (rtas_elog->severity >= RTAS_SEVERITY_ERROR_SYNC))
  180. fatal = 1;
  181. else
  182. fatal = 0;
  183. /* format and print the extended information */
  184. log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, fatal);
  185. if (fatal) {
  186. udbg_printf("Fatal HW Error <0x%lx 0x%x>\n",
  187. *((unsigned long *)&ras_log_buf), status);
  188. printk(KERN_EMERG "Error: Fatal hardware error <0x%lx 0x%x>\n",
  189. *((unsigned long *)&ras_log_buf), status);
  190. #ifndef DEBUG
  191. /* Don't actually power off when debugging so we can test
  192. * without actually failing while injecting errors.
  193. * Error data will not be logged to syslog.
  194. */
  195. ppc_md.power_off();
  196. #endif
  197. } else {
  198. udbg_printf("Recoverable HW Error <0x%lx 0x%x>\n",
  199. *((unsigned long *)&ras_log_buf), status);
  200. printk(KERN_WARNING
  201. "Warning: Recoverable hardware error <0x%lx 0x%x>\n",
  202. *((unsigned long *)&ras_log_buf), status);
  203. }
  204. spin_unlock(&ras_log_buf_lock);
  205. return IRQ_HANDLED;
  206. }
  207. /* Get the error information for errors coming through the
  208. * FWNMI vectors. The pt_regs' r3 will be updated to reflect
  209. * the actual r3 if possible, and a ptr to the error log entry
  210. * will be returned if found.
  211. *
  212. * The mce_data_buf does not have any locks or protection around it,
  213. * if a second machine check comes in, or a system reset is done
  214. * before we have logged the error, then we will get corruption in the
  215. * error log. This is preferable over holding off on calling
  216. * ibm,nmi-interlock which would result in us checkstopping if a
  217. * second machine check did come in.
  218. */
  219. static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
  220. {
  221. unsigned long errdata = regs->gpr[3];
  222. struct rtas_error_log *errhdr = NULL;
  223. unsigned long *savep;
  224. if ((errdata >= 0x7000 && errdata < 0x7fff0) ||
  225. (errdata >= rtas.base && errdata < rtas.base + rtas.size - 16)) {
  226. savep = __va(errdata);
  227. regs->gpr[3] = savep[0]; /* restore original r3 */
  228. memset(mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
  229. memcpy(mce_data_buf, (char *)(savep + 1), RTAS_ERROR_LOG_MAX);
  230. errhdr = (struct rtas_error_log *)mce_data_buf;
  231. } else {
  232. printk("FWNMI: corrupt r3\n");
  233. }
  234. return errhdr;
  235. }
  236. /* Call this when done with the data returned by FWNMI_get_errinfo.
  237. * It will release the saved data area for other CPUs in the
  238. * partition to receive FWNMI errors.
  239. */
  240. static void fwnmi_release_errinfo(void)
  241. {
  242. int ret = rtas_call(rtas_token("ibm,nmi-interlock"), 0, 1, NULL);
  243. if (ret != 0)
  244. printk("FWNMI: nmi-interlock failed: %d\n", ret);
  245. }
  246. void pSeries_system_reset_exception(struct pt_regs *regs)
  247. {
  248. if (fwnmi_active) {
  249. struct rtas_error_log *errhdr = fwnmi_get_errinfo(regs);
  250. if (errhdr) {
  251. /* XXX Should look at FWNMI information */
  252. }
  253. fwnmi_release_errinfo();
  254. }
  255. }
  256. /*
  257. * See if we can recover from a machine check exception.
  258. * This is only called on power4 (or above) and only via
  259. * the Firmware Non-Maskable Interrupts (fwnmi) handler
  260. * which provides the error analysis for us.
  261. *
  262. * Return 1 if corrected (or delivered a signal).
  263. * Return 0 if there is nothing we can do.
  264. */
  265. static int recover_mce(struct pt_regs *regs, struct rtas_error_log * err)
  266. {
  267. int nonfatal = 0;
  268. if (err->disposition == RTAS_DISP_FULLY_RECOVERED) {
  269. /* Platform corrected itself */
  270. nonfatal = 1;
  271. } else if ((regs->msr & MSR_RI) &&
  272. user_mode(regs) &&
  273. err->severity == RTAS_SEVERITY_ERROR_SYNC &&
  274. err->disposition == RTAS_DISP_NOT_RECOVERED &&
  275. err->target == RTAS_TARGET_MEMORY &&
  276. err->type == RTAS_TYPE_ECC_UNCORR &&
  277. !(current->pid == 0 || current->pid == 1)) {
  278. /* Kill off a user process with an ECC error */
  279. printk(KERN_ERR "MCE: uncorrectable ecc error for pid %d\n",
  280. current->pid);
  281. /* XXX something better for ECC error? */
  282. _exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
  283. nonfatal = 1;
  284. }
  285. log_error((char *)err, ERR_TYPE_RTAS_LOG, !nonfatal);
  286. return nonfatal;
  287. }
  288. /*
  289. * Handle a machine check.
  290. *
  291. * Note that on Power 4 and beyond Firmware Non-Maskable Interrupts (fwnmi)
  292. * should be present. If so the handler which called us tells us if the
  293. * error was recovered (never true if RI=0).
  294. *
  295. * On hardware prior to Power 4 these exceptions were asynchronous which
  296. * means we can't tell exactly where it occurred and so we can't recover.
  297. */
  298. int pSeries_machine_check_exception(struct pt_regs *regs)
  299. {
  300. struct rtas_error_log *errp;
  301. if (fwnmi_active) {
  302. errp = fwnmi_get_errinfo(regs);
  303. fwnmi_release_errinfo();
  304. if (errp && recover_mce(regs, errp))
  305. return 1;
  306. }
  307. return 0;
  308. }