ip27-irq.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461
  1. /*
  2. * ip27-irq.c: Highlevel interrupt handling for IP27 architecture.
  3. *
  4. * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
  5. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  6. * Copyright (C) 1999 - 2001 Kanoj Sarcar
  7. */
  8. #include <linux/config.h>
  9. #include <linux/init.h>
  10. #include <linux/irq.h>
  11. #include <linux/errno.h>
  12. #include <linux/signal.h>
  13. #include <linux/sched.h>
  14. #include <linux/types.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/ioport.h>
  17. #include <linux/irq.h>
  18. #include <linux/timex.h>
  19. #include <linux/slab.h>
  20. #include <linux/random.h>
  21. #include <linux/smp_lock.h>
  22. #include <linux/kernel_stat.h>
  23. #include <linux/delay.h>
  24. #include <linux/bitops.h>
  25. #include <asm/bootinfo.h>
  26. #include <asm/io.h>
  27. #include <asm/mipsregs.h>
  28. #include <asm/system.h>
  29. #include <asm/ptrace.h>
  30. #include <asm/processor.h>
  31. #include <asm/pci/bridge.h>
  32. #include <asm/sn/addrs.h>
  33. #include <asm/sn/agent.h>
  34. #include <asm/sn/arch.h>
  35. #include <asm/sn/hub.h>
  36. #include <asm/sn/intr.h>
  37. #undef DEBUG_IRQ
  38. #ifdef DEBUG_IRQ
  39. #define DBG(x...) printk(x)
  40. #else
  41. #define DBG(x...)
  42. #endif
  43. /*
  44. * Linux has a controller-independent x86 interrupt architecture.
  45. * every controller has a 'controller-template', that is used
  46. * by the main code to do the right thing. Each driver-visible
  47. * interrupt source is transparently wired to the apropriate
  48. * controller. Thus drivers need not be aware of the
  49. * interrupt-controller.
  50. *
  51. * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
  52. * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
  53. * (IO-APICs assumed to be messaging to Pentium local-APICs)
  54. *
  55. * the code is designed to be easily extended with new/different
  56. * interrupt controllers, without having to do assembly magic.
  57. */
  58. extern asmlinkage void ip27_irq(void);
  59. extern struct bridge_controller *irq_to_bridge[];
  60. extern int irq_to_slot[];
  61. /*
  62. * use these macros to get the encoded nasid and widget id
  63. * from the irq value
  64. */
  65. #define IRQ_TO_BRIDGE(i) irq_to_bridge[(i)]
  66. #define SLOT_FROM_PCI_IRQ(i) irq_to_slot[i]
  67. static inline int alloc_level(int cpu, int irq)
  68. {
  69. struct hub_data *hub = hub_data(cpu_to_node(cpu));
  70. struct slice_data *si = cpu_data[cpu].data;
  71. int level;
  72. level = find_first_zero_bit(hub->irq_alloc_mask, LEVELS_PER_SLICE);
  73. if (level >= LEVELS_PER_SLICE)
  74. panic("Cpu %d flooded with devices\n", cpu);
  75. __set_bit(level, hub->irq_alloc_mask);
  76. si->level_to_irq[level] = irq;
  77. return level;
  78. }
  79. static inline int find_level(cpuid_t *cpunum, int irq)
  80. {
  81. int cpu, i;
  82. for (cpu = 0; cpu <= NR_CPUS; cpu++) {
  83. struct slice_data *si = cpu_data[cpu].data;
  84. if (!cpu_online(cpu))
  85. continue;
  86. for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++)
  87. if (si->level_to_irq[i] == irq) {
  88. *cpunum = cpu;
  89. return i;
  90. }
  91. }
  92. panic("Could not identify cpu/level for irq %d\n", irq);
  93. }
  94. /*
  95. * Find first bit set
  96. */
  97. static int ms1bit(unsigned long x)
  98. {
  99. int b = 0, s;
  100. s = 16; if (x >> 16 == 0) s = 0; b += s; x >>= s;
  101. s = 8; if (x >> 8 == 0) s = 0; b += s; x >>= s;
  102. s = 4; if (x >> 4 == 0) s = 0; b += s; x >>= s;
  103. s = 2; if (x >> 2 == 0) s = 0; b += s; x >>= s;
  104. s = 1; if (x >> 1 == 0) s = 0; b += s;
  105. return b;
  106. }
  107. /*
  108. * This code is unnecessarily complex, because we do SA_INTERRUPT
  109. * intr enabling. Basically, once we grab the set of intrs we need
  110. * to service, we must mask _all_ these interrupts; firstly, to make
  111. * sure the same intr does not intr again, causing recursion that
  112. * can lead to stack overflow. Secondly, we can not just mask the
  113. * one intr we are do_IRQing, because the non-masked intrs in the
  114. * first set might intr again, causing multiple servicings of the
  115. * same intr. This effect is mostly seen for intercpu intrs.
  116. * Kanoj 05.13.00
  117. */
  118. void ip27_do_irq_mask0(struct pt_regs *regs)
  119. {
  120. int irq, swlevel;
  121. hubreg_t pend0, mask0;
  122. cpuid_t cpu = smp_processor_id();
  123. int pi_int_mask0 =
  124. (cputoslice(cpu) == 0) ? PI_INT_MASK0_A : PI_INT_MASK0_B;
  125. /* copied from Irix intpend0() */
  126. pend0 = LOCAL_HUB_L(PI_INT_PEND0);
  127. mask0 = LOCAL_HUB_L(pi_int_mask0);
  128. pend0 &= mask0; /* Pick intrs we should look at */
  129. if (!pend0)
  130. return;
  131. swlevel = ms1bit(pend0);
  132. #ifdef CONFIG_SMP
  133. if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) {
  134. LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
  135. } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) {
  136. LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
  137. } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
  138. LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
  139. smp_call_function_interrupt();
  140. } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
  141. LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
  142. smp_call_function_interrupt();
  143. } else
  144. #endif
  145. {
  146. /* "map" swlevel to irq */
  147. struct slice_data *si = cpu_data[cpu].data;
  148. irq = si->level_to_irq[swlevel];
  149. do_IRQ(irq, regs);
  150. }
  151. LOCAL_HUB_L(PI_INT_PEND0);
  152. }
  153. void ip27_do_irq_mask1(struct pt_regs *regs)
  154. {
  155. int irq, swlevel;
  156. hubreg_t pend1, mask1;
  157. cpuid_t cpu = smp_processor_id();
  158. int pi_int_mask1 = (cputoslice(cpu) == 0) ? PI_INT_MASK1_A : PI_INT_MASK1_B;
  159. struct slice_data *si = cpu_data[cpu].data;
  160. /* copied from Irix intpend0() */
  161. pend1 = LOCAL_HUB_L(PI_INT_PEND1);
  162. mask1 = LOCAL_HUB_L(pi_int_mask1);
  163. pend1 &= mask1; /* Pick intrs we should look at */
  164. if (!pend1)
  165. return;
  166. swlevel = ms1bit(pend1);
  167. /* "map" swlevel to irq */
  168. irq = si->level_to_irq[swlevel];
  169. LOCAL_HUB_CLR_INTR(swlevel);
  170. do_IRQ(irq, regs);
  171. LOCAL_HUB_L(PI_INT_PEND1);
  172. }
  173. void ip27_prof_timer(struct pt_regs *regs)
  174. {
  175. panic("CPU %d got a profiling interrupt", smp_processor_id());
  176. }
  177. void ip27_hub_error(struct pt_regs *regs)
  178. {
  179. panic("CPU %d got a hub error interrupt", smp_processor_id());
  180. }
  181. static int intr_connect_level(int cpu, int bit)
  182. {
  183. nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
  184. struct slice_data *si = cpu_data[cpu].data;
  185. unsigned long flags;
  186. set_bit(bit, si->irq_enable_mask);
  187. local_irq_save(flags);
  188. if (!cputoslice(cpu)) {
  189. REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
  190. REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
  191. } else {
  192. REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
  193. REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
  194. }
  195. local_irq_restore(flags);
  196. return 0;
  197. }
  198. static int intr_disconnect_level(int cpu, int bit)
  199. {
  200. nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
  201. struct slice_data *si = cpu_data[cpu].data;
  202. clear_bit(bit, si->irq_enable_mask);
  203. if (!cputoslice(cpu)) {
  204. REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
  205. REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
  206. } else {
  207. REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
  208. REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
  209. }
  210. return 0;
  211. }
  212. /* Startup one of the (PCI ...) IRQs routes over a bridge. */
  213. static unsigned int startup_bridge_irq(unsigned int irq)
  214. {
  215. struct bridge_controller *bc;
  216. bridgereg_t device;
  217. bridge_t *bridge;
  218. int pin, swlevel;
  219. cpuid_t cpu;
  220. pin = SLOT_FROM_PCI_IRQ(irq);
  221. bc = IRQ_TO_BRIDGE(irq);
  222. bridge = bc->base;
  223. DBG("bridge_startup(): irq= 0x%x pin=%d\n", irq, pin);
  224. /*
  225. * "map" irq to a swlevel greater than 6 since the first 6 bits
  226. * of INT_PEND0 are taken
  227. */
  228. swlevel = find_level(&cpu, irq);
  229. bridge->b_int_addr[pin].addr = (0x20000 | swlevel | (bc->nasid << 8));
  230. bridge->b_int_enable |= (1 << pin);
  231. bridge->b_int_enable |= 0x7ffffe00; /* more stuff in int_enable */
  232. /*
  233. * Enable sending of an interrupt clear packt to the hub on a high to
  234. * low transition of the interrupt pin.
  235. *
  236. * IRIX sets additional bits in the address which are documented as
  237. * reserved in the bridge docs.
  238. */
  239. bridge->b_int_mode |= (1UL << pin);
  240. /*
  241. * We assume the bridge to have a 1:1 mapping between devices
  242. * (slots) and intr pins.
  243. */
  244. device = bridge->b_int_device;
  245. device &= ~(7 << (pin*3));
  246. device |= (pin << (pin*3));
  247. bridge->b_int_device = device;
  248. bridge->b_wid_tflush;
  249. return 0; /* Never anything pending. */
  250. }
  251. /* Shutdown one of the (PCI ...) IRQs routes over a bridge. */
  252. static void shutdown_bridge_irq(unsigned int irq)
  253. {
  254. struct bridge_controller *bc = IRQ_TO_BRIDGE(irq);
  255. struct hub_data *hub = hub_data(cpu_to_node(bc->irq_cpu));
  256. bridge_t *bridge = bc->base;
  257. struct slice_data *si = cpu_data[bc->irq_cpu].data;
  258. int pin, swlevel;
  259. cpuid_t cpu;
  260. DBG("bridge_shutdown: irq 0x%x\n", irq);
  261. pin = SLOT_FROM_PCI_IRQ(irq);
  262. /*
  263. * map irq to a swlevel greater than 6 since the first 6 bits
  264. * of INT_PEND0 are taken
  265. */
  266. swlevel = find_level(&cpu, irq);
  267. intr_disconnect_level(cpu, swlevel);
  268. __clear_bit(swlevel, hub->irq_alloc_mask);
  269. si->level_to_irq[swlevel] = -1;
  270. bridge->b_int_enable &= ~(1 << pin);
  271. bridge->b_wid_tflush;
  272. }
  273. static inline void enable_bridge_irq(unsigned int irq)
  274. {
  275. cpuid_t cpu;
  276. int swlevel;
  277. swlevel = find_level(&cpu, irq); /* Criminal offence */
  278. intr_connect_level(cpu, swlevel);
  279. }
  280. static inline void disable_bridge_irq(unsigned int irq)
  281. {
  282. cpuid_t cpu;
  283. int swlevel;
  284. swlevel = find_level(&cpu, irq); /* Criminal offence */
  285. intr_disconnect_level(cpu, swlevel);
  286. }
  287. static void mask_and_ack_bridge_irq(unsigned int irq)
  288. {
  289. disable_bridge_irq(irq);
  290. }
  291. static void end_bridge_irq(unsigned int irq)
  292. {
  293. if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
  294. irq_desc[irq].action)
  295. enable_bridge_irq(irq);
  296. }
  297. static struct hw_interrupt_type bridge_irq_type = {
  298. .typename = "bridge",
  299. .startup = startup_bridge_irq,
  300. .shutdown = shutdown_bridge_irq,
  301. .enable = enable_bridge_irq,
  302. .disable = disable_bridge_irq,
  303. .ack = mask_and_ack_bridge_irq,
  304. .end = end_bridge_irq,
  305. };
  306. static unsigned long irq_map[NR_IRQS / BITS_PER_LONG];
  307. static int allocate_irqno(void)
  308. {
  309. int irq;
  310. again:
  311. irq = find_first_zero_bit(irq_map, NR_IRQS);
  312. if (irq >= NR_IRQS)
  313. return -ENOSPC;
  314. if (test_and_set_bit(irq, irq_map))
  315. goto again;
  316. return irq;
  317. }
  318. void free_irqno(unsigned int irq)
  319. {
  320. clear_bit(irq, irq_map);
  321. }
  322. void __devinit register_bridge_irq(unsigned int irq)
  323. {
  324. irq_desc[irq].status = IRQ_DISABLED;
  325. irq_desc[irq].action = 0;
  326. irq_desc[irq].depth = 1;
  327. irq_desc[irq].handler = &bridge_irq_type;
  328. }
  329. int __devinit request_bridge_irq(struct bridge_controller *bc)
  330. {
  331. int irq = allocate_irqno();
  332. int swlevel, cpu;
  333. nasid_t nasid;
  334. if (irq < 0)
  335. return irq;
  336. /*
  337. * "map" irq to a swlevel greater than 6 since the first 6 bits
  338. * of INT_PEND0 are taken
  339. */
  340. cpu = bc->irq_cpu;
  341. swlevel = alloc_level(cpu, irq);
  342. if (unlikely(swlevel < 0)) {
  343. free_irqno(irq);
  344. return -EAGAIN;
  345. }
  346. /* Make sure it's not already pending when we connect it. */
  347. nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
  348. REMOTE_HUB_CLR_INTR(nasid, swlevel);
  349. intr_connect_level(cpu, swlevel);
  350. register_bridge_irq(irq);
  351. return irq;
  352. }
  353. void __init arch_init_irq(void)
  354. {
  355. set_except_vector(0, ip27_irq);
  356. }
  357. void install_ipi(void)
  358. {
  359. int slice = LOCAL_HUB_L(PI_CPU_NUM);
  360. int cpu = smp_processor_id();
  361. struct slice_data *si = cpu_data[cpu].data;
  362. struct hub_data *hub = hub_data(cpu_to_node(cpu));
  363. int resched, call;
  364. resched = CPU_RESCHED_A_IRQ + slice;
  365. __set_bit(resched, hub->irq_alloc_mask);
  366. __set_bit(resched, si->irq_enable_mask);
  367. LOCAL_HUB_CLR_INTR(resched);
  368. call = CPU_CALL_A_IRQ + slice;
  369. __set_bit(call, hub->irq_alloc_mask);
  370. __set_bit(call, si->irq_enable_mask);
  371. LOCAL_HUB_CLR_INTR(call);
  372. if (slice == 0) {
  373. LOCAL_HUB_S(PI_INT_MASK0_A, si->irq_enable_mask[0]);
  374. LOCAL_HUB_S(PI_INT_MASK1_A, si->irq_enable_mask[1]);
  375. } else {
  376. LOCAL_HUB_S(PI_INT_MASK0_B, si->irq_enable_mask[0]);
  377. LOCAL_HUB_S(PI_INT_MASK1_B, si->irq_enable_mask[1]);
  378. }
  379. }