ip27-nmi.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. #include <linux/kallsyms.h>
  2. #include <linux/kernel.h>
  3. #include <linux/mmzone.h>
  4. #include <linux/nodemask.h>
  5. #include <linux/spinlock.h>
  6. #include <linux/smp.h>
  7. #include <asm/atomic.h>
  8. #include <asm/sn/types.h>
  9. #include <asm/sn/addrs.h>
  10. #include <asm/sn/nmi.h>
  11. #include <asm/sn/arch.h>
  12. #include <asm/sn/sn0/hub.h>
  13. #if 0
  14. #define NODE_NUM_CPUS(n) CNODE_NUM_CPUS(n)
  15. #else
  16. #define NODE_NUM_CPUS(n) CPUS_PER_NODE
  17. #endif
  18. #define CNODEID_NONE (cnodeid_t)-1
  19. #define enter_panic_mode() spin_lock(&nmi_lock)
  20. typedef unsigned long machreg_t;
  21. DEFINE_SPINLOCK(nmi_lock);
  22. /*
  23. * Lets see what else we need to do here. Set up sp, gp?
  24. */
  25. void nmi_dump(void)
  26. {
  27. void cont_nmi_dump(void);
  28. cont_nmi_dump();
  29. }
  30. void install_cpu_nmi_handler(int slice)
  31. {
  32. nmi_t *nmi_addr;
  33. nmi_addr = (nmi_t *)NMI_ADDR(get_nasid(), slice);
  34. if (nmi_addr->call_addr)
  35. return;
  36. nmi_addr->magic = NMI_MAGIC;
  37. nmi_addr->call_addr = (void *)nmi_dump;
  38. nmi_addr->call_addr_c =
  39. (void *)(~((unsigned long)(nmi_addr->call_addr)));
  40. nmi_addr->call_parm = 0;
  41. }
  42. /*
  43. * Copy the cpu registers which have been saved in the IP27prom format
  44. * into the eframe format for the node under consideration.
  45. */
  46. void nmi_cpu_eframe_save(nasid_t nasid, int slice)
  47. {
  48. struct reg_struct *nr;
  49. int i;
  50. /* Get the pointer to the current cpu's register set. */
  51. nr = (struct reg_struct *)
  52. (TO_UNCAC(TO_NODE(nasid, IP27_NMI_KREGS_OFFSET)) +
  53. slice * IP27_NMI_KREGS_CPU_SIZE);
  54. printk("NMI nasid %d: slice %d\n", nasid, slice);
  55. /*
  56. * Saved main processor registers
  57. */
  58. for (i = 0; i < 32; ) {
  59. if ((i % 4) == 0)
  60. printk("$%2d :", i);
  61. printk(" %016lx", nr->gpr[i]);
  62. i++;
  63. if ((i % 4) == 0)
  64. printk("\n");
  65. }
  66. printk("Hi : (value lost)\n");
  67. printk("Lo : (value lost)\n");
  68. /*
  69. * Saved cp0 registers
  70. */
  71. printk("epc : %016lx ", nr->epc);
  72. print_symbol("%s ", nr->epc);
  73. printk("%s\n", print_tainted());
  74. printk("ErrEPC: %016lx ", nr->error_epc);
  75. print_symbol("%s\n", nr->error_epc);
  76. printk("ra : %016lx ", nr->gpr[31]);
  77. print_symbol("%s\n", nr->gpr[31]);
  78. printk("Status: %08lx ", nr->sr);
  79. if (nr->sr & ST0_KX)
  80. printk("KX ");
  81. if (nr->sr & ST0_SX)
  82. printk("SX ");
  83. if (nr->sr & ST0_UX)
  84. printk("UX ");
  85. switch (nr->sr & ST0_KSU) {
  86. case KSU_USER:
  87. printk("USER ");
  88. break;
  89. case KSU_SUPERVISOR:
  90. printk("SUPERVISOR ");
  91. break;
  92. case KSU_KERNEL:
  93. printk("KERNEL ");
  94. break;
  95. default:
  96. printk("BAD_MODE ");
  97. break;
  98. }
  99. if (nr->sr & ST0_ERL)
  100. printk("ERL ");
  101. if (nr->sr & ST0_EXL)
  102. printk("EXL ");
  103. if (nr->sr & ST0_IE)
  104. printk("IE ");
  105. printk("\n");
  106. printk("Cause : %08lx\n", nr->cause);
  107. printk("PrId : %08x\n", read_c0_prid());
  108. printk("BadVA : %016lx\n", nr->badva);
  109. printk("CErr : %016lx\n", nr->cache_err);
  110. printk("NMI_SR: %016lx\n", nr->nmi_sr);
  111. printk("\n");
  112. }
  113. void nmi_dump_hub_irq(nasid_t nasid, int slice)
  114. {
  115. hubreg_t mask0, mask1, pend0, pend1;
  116. if (slice == 0) { /* Slice A */
  117. mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_A);
  118. mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_A);
  119. } else { /* Slice B */
  120. mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_B);
  121. mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_B);
  122. }
  123. pend0 = REMOTE_HUB_L(nasid, PI_INT_PEND0);
  124. pend1 = REMOTE_HUB_L(nasid, PI_INT_PEND1);
  125. printk("PI_INT_MASK0: %16lx PI_INT_MASK1: %16lx\n", mask0, mask1);
  126. printk("PI_INT_PEND0: %16lx PI_INT_PEND1: %16lx\n", pend0, pend1);
  127. printk("\n\n");
  128. }
  129. /*
  130. * Copy the cpu registers which have been saved in the IP27prom format
  131. * into the eframe format for the node under consideration.
  132. */
  133. void nmi_node_eframe_save(cnodeid_t cnode)
  134. {
  135. nasid_t nasid;
  136. int slice;
  137. /* Make sure that we have a valid node */
  138. if (cnode == CNODEID_NONE)
  139. return;
  140. nasid = COMPACT_TO_NASID_NODEID(cnode);
  141. if (nasid == INVALID_NASID)
  142. return;
  143. /* Save the registers into eframe for each cpu */
  144. for (slice = 0; slice < NODE_NUM_CPUS(slice); slice++) {
  145. nmi_cpu_eframe_save(nasid, slice);
  146. nmi_dump_hub_irq(nasid, slice);
  147. }
  148. }
  149. /*
  150. * Save the nmi cpu registers for all cpus in the system.
  151. */
  152. void
  153. nmi_eframes_save(void)
  154. {
  155. cnodeid_t cnode;
  156. for_each_online_node(cnode)
  157. nmi_node_eframe_save(cnode);
  158. }
  159. void
  160. cont_nmi_dump(void)
  161. {
  162. #ifndef REAL_NMI_SIGNAL
  163. static atomic_t nmied_cpus = ATOMIC_INIT(0);
  164. atomic_inc(&nmied_cpus);
  165. #endif
  166. /*
  167. * Use enter_panic_mode to allow only 1 cpu to proceed
  168. */
  169. enter_panic_mode();
  170. #ifdef REAL_NMI_SIGNAL
  171. /*
  172. * Wait up to 15 seconds for the other cpus to respond to the NMI.
  173. * If a cpu has not responded after 10 sec, send it 1 additional NMI.
  174. * This is for 2 reasons:
  175. * - sometimes a MMSC fail to NMI all cpus.
  176. * - on 512p SN0 system, the MMSC will only send NMIs to
  177. * half the cpus. Unfortunately, we don't know which cpus may be
  178. * NMIed - it depends on how the site chooses to configure.
  179. *
  180. * Note: it has been measure that it takes the MMSC up to 2.3 secs to
  181. * send NMIs to all cpus on a 256p system.
  182. */
  183. for (i=0; i < 1500; i++) {
  184. for_each_online_node(node)
  185. if (NODEPDA(node)->dump_count == 0)
  186. break;
  187. if (node == MAX_NUMNODES)
  188. break;
  189. if (i == 1000) {
  190. for_each_online_node(node)
  191. if (NODEPDA(node)->dump_count == 0) {
  192. cpu = node_to_first_cpu(node);
  193. for (n=0; n < CNODE_NUM_CPUS(node); cpu++, n++) {
  194. CPUMASK_SETB(nmied_cpus, cpu);
  195. /*
  196. * cputonasid, cputoslice
  197. * needs kernel cpuid
  198. */
  199. SEND_NMI((cputonasid(cpu)), (cputoslice(cpu)));
  200. }
  201. }
  202. }
  203. udelay(10000);
  204. }
  205. #else
  206. while (atomic_read(&nmied_cpus) != num_online_cpus());
  207. #endif
  208. /*
  209. * Save the nmi cpu registers for all cpu in the eframe format.
  210. */
  211. nmi_eframes_save();
  212. LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
  213. }