nmi.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. /*
  2. * Copyright (C) 1991, 1992 Linus Torvalds
  3. * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  4. * Copyright (C) 2011 Don Zickus Red Hat, Inc.
  5. *
  6. * Pentium III FXSR, SSE support
  7. * Gareth Hughes <gareth@valinux.com>, May 2000
  8. */
  9. /*
  10. * Handle hardware traps and faults.
  11. */
  12. #include <linux/spinlock.h>
  13. #include <linux/kprobes.h>
  14. #include <linux/kdebug.h>
  15. #include <linux/nmi.h>
  16. #include <linux/delay.h>
  17. #include <linux/hardirq.h>
  18. #include <linux/slab.h>
  19. #if defined(CONFIG_EDAC)
  20. #include <linux/edac.h>
  21. #endif
  22. #include <linux/atomic.h>
  23. #include <asm/traps.h>
  24. #include <asm/mach_traps.h>
  25. #include <asm/nmi.h>
  26. #define NMI_MAX_NAMELEN 16
  27. struct nmiaction {
  28. struct list_head list;
  29. nmi_handler_t handler;
  30. unsigned int flags;
  31. char *name;
  32. };
  33. struct nmi_desc {
  34. spinlock_t lock;
  35. struct list_head head;
  36. };
  37. static struct nmi_desc nmi_desc[NMI_MAX] =
  38. {
  39. {
  40. .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
  41. .head = LIST_HEAD_INIT(nmi_desc[0].head),
  42. },
  43. {
  44. .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
  45. .head = LIST_HEAD_INIT(nmi_desc[1].head),
  46. },
  47. };
  48. static int ignore_nmis;
  49. int unknown_nmi_panic;
  50. /*
  51. * Prevent NMI reason port (0x61) being accessed simultaneously, can
  52. * only be used in NMI handler.
  53. */
  54. static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
  55. static int __init setup_unknown_nmi_panic(char *str)
  56. {
  57. unknown_nmi_panic = 1;
  58. return 1;
  59. }
  60. __setup("unknown_nmi_panic", setup_unknown_nmi_panic);
  61. #define nmi_to_desc(type) (&nmi_desc[type])
  62. static int notrace __kprobes nmi_handle(unsigned int type, struct pt_regs *regs)
  63. {
  64. struct nmi_desc *desc = nmi_to_desc(type);
  65. struct nmiaction *a;
  66. int handled=0;
  67. rcu_read_lock();
  68. /*
  69. * NMIs are edge-triggered, which means if you have enough
  70. * of them concurrently, you can lose some because only one
  71. * can be latched at any given time. Walk the whole list
  72. * to handle those situations.
  73. */
  74. list_for_each_entry_rcu(a, &desc->head, list) {
  75. handled += a->handler(type, regs);
  76. }
  77. rcu_read_unlock();
  78. /* return total number of NMI events handled */
  79. return handled;
  80. }
  81. static int __setup_nmi(unsigned int type, struct nmiaction *action)
  82. {
  83. struct nmi_desc *desc = nmi_to_desc(type);
  84. unsigned long flags;
  85. spin_lock_irqsave(&desc->lock, flags);
  86. /*
  87. * some handlers need to be executed first otherwise a fake
  88. * event confuses some handlers (kdump uses this flag)
  89. */
  90. if (action->flags & NMI_FLAG_FIRST)
  91. list_add_rcu(&action->list, &desc->head);
  92. else
  93. list_add_tail_rcu(&action->list, &desc->head);
  94. spin_unlock_irqrestore(&desc->lock, flags);
  95. return 0;
  96. }
  97. static struct nmiaction *__free_nmi(unsigned int type, const char *name)
  98. {
  99. struct nmi_desc *desc = nmi_to_desc(type);
  100. struct nmiaction *n;
  101. unsigned long flags;
  102. spin_lock_irqsave(&desc->lock, flags);
  103. list_for_each_entry_rcu(n, &desc->head, list) {
  104. /*
  105. * the name passed in to describe the nmi handler
  106. * is used as the lookup key
  107. */
  108. if (!strcmp(n->name, name)) {
  109. WARN(in_nmi(),
  110. "Trying to free NMI (%s) from NMI context!\n", n->name);
  111. list_del_rcu(&n->list);
  112. break;
  113. }
  114. }
  115. spin_unlock_irqrestore(&desc->lock, flags);
  116. synchronize_rcu();
  117. return (n);
  118. }
  119. int register_nmi_handler(unsigned int type, nmi_handler_t handler,
  120. unsigned long nmiflags, const char *devname)
  121. {
  122. struct nmiaction *action;
  123. int retval = -ENOMEM;
  124. if (!handler)
  125. return -EINVAL;
  126. action = kzalloc(sizeof(struct nmiaction), GFP_KERNEL);
  127. if (!action)
  128. goto fail_action;
  129. action->handler = handler;
  130. action->flags = nmiflags;
  131. action->name = kstrndup(devname, NMI_MAX_NAMELEN, GFP_KERNEL);
  132. if (!action->name)
  133. goto fail_action_name;
  134. retval = __setup_nmi(type, action);
  135. if (retval)
  136. goto fail_setup_nmi;
  137. return retval;
  138. fail_setup_nmi:
  139. kfree(action->name);
  140. fail_action_name:
  141. kfree(action);
  142. fail_action:
  143. return retval;
  144. }
  145. EXPORT_SYMBOL_GPL(register_nmi_handler);
  146. void unregister_nmi_handler(unsigned int type, const char *name)
  147. {
  148. struct nmiaction *a;
  149. a = __free_nmi(type, name);
  150. if (a) {
  151. kfree(a->name);
  152. kfree(a);
  153. }
  154. }
  155. EXPORT_SYMBOL_GPL(unregister_nmi_handler);
  156. static notrace __kprobes void
  157. pci_serr_error(unsigned char reason, struct pt_regs *regs)
  158. {
  159. pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
  160. reason, smp_processor_id());
  161. /*
  162. * On some machines, PCI SERR line is used to report memory
  163. * errors. EDAC makes use of it.
  164. */
  165. #if defined(CONFIG_EDAC)
  166. if (edac_handler_set()) {
  167. edac_atomic_assert_error();
  168. return;
  169. }
  170. #endif
  171. if (panic_on_unrecovered_nmi)
  172. panic("NMI: Not continuing");
  173. pr_emerg("Dazed and confused, but trying to continue\n");
  174. /* Clear and disable the PCI SERR error line. */
  175. reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
  176. outb(reason, NMI_REASON_PORT);
  177. }
  178. static notrace __kprobes void
  179. io_check_error(unsigned char reason, struct pt_regs *regs)
  180. {
  181. unsigned long i;
  182. pr_emerg(
  183. "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
  184. reason, smp_processor_id());
  185. show_registers(regs);
  186. if (panic_on_io_nmi)
  187. panic("NMI IOCK error: Not continuing");
  188. /* Re-enable the IOCK line, wait for a few seconds */
  189. reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
  190. outb(reason, NMI_REASON_PORT);
  191. i = 20000;
  192. while (--i) {
  193. touch_nmi_watchdog();
  194. udelay(100);
  195. }
  196. reason &= ~NMI_REASON_CLEAR_IOCHK;
  197. outb(reason, NMI_REASON_PORT);
  198. }
  199. static notrace __kprobes void
  200. unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
  201. {
  202. int handled;
  203. handled = nmi_handle(NMI_UNKNOWN, regs);
  204. if (handled)
  205. return;
  206. #ifdef CONFIG_MCA
  207. /*
  208. * Might actually be able to figure out what the guilty party
  209. * is:
  210. */
  211. if (MCA_bus) {
  212. mca_handle_nmi();
  213. return;
  214. }
  215. #endif
  216. pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
  217. reason, smp_processor_id());
  218. pr_emerg("Do you have a strange power saving mode enabled?\n");
  219. if (unknown_nmi_panic || panic_on_unrecovered_nmi)
  220. panic("NMI: Not continuing");
  221. pr_emerg("Dazed and confused, but trying to continue\n");
  222. }
  223. static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
  224. {
  225. unsigned char reason = 0;
  226. int handled;
  227. /*
  228. * CPU-specific NMI must be processed before non-CPU-specific
  229. * NMI, otherwise we may lose it, because the CPU-specific
  230. * NMI can not be detected/processed on other CPUs.
  231. */
  232. handled = nmi_handle(NMI_LOCAL, regs);
  233. if (handled)
  234. return;
  235. /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
  236. raw_spin_lock(&nmi_reason_lock);
  237. reason = get_nmi_reason();
  238. if (reason & NMI_REASON_MASK) {
  239. if (reason & NMI_REASON_SERR)
  240. pci_serr_error(reason, regs);
  241. else if (reason & NMI_REASON_IOCHK)
  242. io_check_error(reason, regs);
  243. #ifdef CONFIG_X86_32
  244. /*
  245. * Reassert NMI in case it became active
  246. * meanwhile as it's edge-triggered:
  247. */
  248. reassert_nmi();
  249. #endif
  250. raw_spin_unlock(&nmi_reason_lock);
  251. return;
  252. }
  253. raw_spin_unlock(&nmi_reason_lock);
  254. unknown_nmi_error(reason, regs);
  255. }
  256. dotraplinkage notrace __kprobes void
  257. do_nmi(struct pt_regs *regs, long error_code)
  258. {
  259. nmi_enter();
  260. inc_irq_stat(__nmi_count);
  261. if (!ignore_nmis)
  262. default_do_nmi(regs);
  263. nmi_exit();
  264. }
  265. void stop_nmi(void)
  266. {
  267. ignore_nmis++;
  268. }
  269. void restart_nmi(void)
  270. {
  271. ignore_nmis--;
  272. }