ip27-irq.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471
  1. /*
  2. * ip27-irq.c: Highlevel interrupt handling for IP27 architecture.
  3. *
  4. * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
  5. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  6. * Copyright (C) 1999 - 2001 Kanoj Sarcar
  7. */
  8. #undef DEBUG
  9. #include <linux/config.h>
  10. #include <linux/init.h>
  11. #include <linux/irq.h>
  12. #include <linux/errno.h>
  13. #include <linux/signal.h>
  14. #include <linux/sched.h>
  15. #include <linux/types.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/ioport.h>
  18. #include <linux/timex.h>
  19. #include <linux/slab.h>
  20. #include <linux/random.h>
  21. #include <linux/smp_lock.h>
  22. #include <linux/kernel.h>
  23. #include <linux/kernel_stat.h>
  24. #include <linux/delay.h>
  25. #include <linux/bitops.h>
  26. #include <asm/bootinfo.h>
  27. #include <asm/io.h>
  28. #include <asm/mipsregs.h>
  29. #include <asm/system.h>
  30. #include <asm/ptrace.h>
  31. #include <asm/processor.h>
  32. #include <asm/pci/bridge.h>
  33. #include <asm/sn/addrs.h>
  34. #include <asm/sn/agent.h>
  35. #include <asm/sn/arch.h>
  36. #include <asm/sn/hub.h>
  37. #include <asm/sn/intr.h>
  38. /*
  39. * Linux has a controller-independent x86 interrupt architecture.
  40. * every controller has a 'controller-template', that is used
  41. * by the main code to do the right thing. Each driver-visible
  42. * interrupt source is transparently wired to the apropriate
  43. * controller. Thus drivers need not be aware of the
  44. * interrupt-controller.
  45. *
  46. * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
  47. * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
  48. * (IO-APICs assumed to be messaging to Pentium local-APICs)
  49. *
  50. * the code is designed to be easily extended with new/different
  51. * interrupt controllers, without having to do assembly magic.
  52. */
  53. extern asmlinkage void ip27_irq(void);
  54. extern struct bridge_controller *irq_to_bridge[];
  55. extern int irq_to_slot[];
  56. /*
  57. * use these macros to get the encoded nasid and widget id
  58. * from the irq value
  59. */
  60. #define IRQ_TO_BRIDGE(i) irq_to_bridge[(i)]
  61. #define SLOT_FROM_PCI_IRQ(i) irq_to_slot[i]
  62. static inline int alloc_level(int cpu, int irq)
  63. {
  64. struct hub_data *hub = hub_data(cpu_to_node(cpu));
  65. struct slice_data *si = cpu_data[cpu].data;
  66. int level;
  67. level = find_first_zero_bit(hub->irq_alloc_mask, LEVELS_PER_SLICE);
  68. if (level >= LEVELS_PER_SLICE)
  69. panic("Cpu %d flooded with devices\n", cpu);
  70. __set_bit(level, hub->irq_alloc_mask);
  71. si->level_to_irq[level] = irq;
  72. return level;
  73. }
  74. static inline int find_level(cpuid_t *cpunum, int irq)
  75. {
  76. int cpu, i;
  77. for_each_online_cpu(cpu) {
  78. struct slice_data *si = cpu_data[cpu].data;
  79. for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++)
  80. if (si->level_to_irq[i] == irq) {
  81. *cpunum = cpu;
  82. return i;
  83. }
  84. }
  85. panic("Could not identify cpu/level for irq %d\n", irq);
  86. }
  87. /*
  88. * Find first bit set
  89. */
  90. static int ms1bit(unsigned long x)
  91. {
  92. int b = 0, s;
  93. s = 16; if (x >> 16 == 0) s = 0; b += s; x >>= s;
  94. s = 8; if (x >> 8 == 0) s = 0; b += s; x >>= s;
  95. s = 4; if (x >> 4 == 0) s = 0; b += s; x >>= s;
  96. s = 2; if (x >> 2 == 0) s = 0; b += s; x >>= s;
  97. s = 1; if (x >> 1 == 0) s = 0; b += s;
  98. return b;
  99. }
  100. /*
  101. * This code is unnecessarily complex, because we do SA_INTERRUPT
  102. * intr enabling. Basically, once we grab the set of intrs we need
  103. * to service, we must mask _all_ these interrupts; firstly, to make
  104. * sure the same intr does not intr again, causing recursion that
  105. * can lead to stack overflow. Secondly, we can not just mask the
  106. * one intr we are do_IRQing, because the non-masked intrs in the
  107. * first set might intr again, causing multiple servicings of the
  108. * same intr. This effect is mostly seen for intercpu intrs.
  109. * Kanoj 05.13.00
  110. */
  111. static void ip27_do_irq_mask0(struct pt_regs *regs)
  112. {
  113. int irq, swlevel;
  114. hubreg_t pend0, mask0;
  115. cpuid_t cpu = smp_processor_id();
  116. int pi_int_mask0 =
  117. (cputoslice(cpu) == 0) ? PI_INT_MASK0_A : PI_INT_MASK0_B;
  118. /* copied from Irix intpend0() */
  119. pend0 = LOCAL_HUB_L(PI_INT_PEND0);
  120. mask0 = LOCAL_HUB_L(pi_int_mask0);
  121. pend0 &= mask0; /* Pick intrs we should look at */
  122. if (!pend0)
  123. return;
  124. swlevel = ms1bit(pend0);
  125. #ifdef CONFIG_SMP
  126. if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) {
  127. LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
  128. } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) {
  129. LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
  130. } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
  131. LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
  132. smp_call_function_interrupt();
  133. } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
  134. LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
  135. smp_call_function_interrupt();
  136. } else
  137. #endif
  138. {
  139. /* "map" swlevel to irq */
  140. struct slice_data *si = cpu_data[cpu].data;
  141. irq = si->level_to_irq[swlevel];
  142. do_IRQ(irq, regs);
  143. }
  144. LOCAL_HUB_L(PI_INT_PEND0);
  145. }
  146. static void ip27_do_irq_mask1(struct pt_regs *regs)
  147. {
  148. int irq, swlevel;
  149. hubreg_t pend1, mask1;
  150. cpuid_t cpu = smp_processor_id();
  151. int pi_int_mask1 = (cputoslice(cpu) == 0) ? PI_INT_MASK1_A : PI_INT_MASK1_B;
  152. struct slice_data *si = cpu_data[cpu].data;
  153. /* copied from Irix intpend0() */
  154. pend1 = LOCAL_HUB_L(PI_INT_PEND1);
  155. mask1 = LOCAL_HUB_L(pi_int_mask1);
  156. pend1 &= mask1; /* Pick intrs we should look at */
  157. if (!pend1)
  158. return;
  159. swlevel = ms1bit(pend1);
  160. /* "map" swlevel to irq */
  161. irq = si->level_to_irq[swlevel];
  162. LOCAL_HUB_CLR_INTR(swlevel);
  163. do_IRQ(irq, regs);
  164. LOCAL_HUB_L(PI_INT_PEND1);
  165. }
  166. static void ip27_prof_timer(struct pt_regs *regs)
  167. {
  168. panic("CPU %d got a profiling interrupt", smp_processor_id());
  169. }
  170. static void ip27_hub_error(struct pt_regs *regs)
  171. {
  172. panic("CPU %d got a hub error interrupt", smp_processor_id());
  173. }
  174. static int intr_connect_level(int cpu, int bit)
  175. {
  176. nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
  177. struct slice_data *si = cpu_data[cpu].data;
  178. unsigned long flags;
  179. set_bit(bit, si->irq_enable_mask);
  180. local_irq_save(flags);
  181. if (!cputoslice(cpu)) {
  182. REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
  183. REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
  184. } else {
  185. REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
  186. REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
  187. }
  188. local_irq_restore(flags);
  189. return 0;
  190. }
  191. static int intr_disconnect_level(int cpu, int bit)
  192. {
  193. nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
  194. struct slice_data *si = cpu_data[cpu].data;
  195. clear_bit(bit, si->irq_enable_mask);
  196. if (!cputoslice(cpu)) {
  197. REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
  198. REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
  199. } else {
  200. REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
  201. REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
  202. }
  203. return 0;
  204. }
  205. /* Startup one of the (PCI ...) IRQs routes over a bridge. */
  206. static unsigned int startup_bridge_irq(unsigned int irq)
  207. {
  208. struct bridge_controller *bc;
  209. bridgereg_t device;
  210. bridge_t *bridge;
  211. int pin, swlevel;
  212. cpuid_t cpu;
  213. pin = SLOT_FROM_PCI_IRQ(irq);
  214. bc = IRQ_TO_BRIDGE(irq);
  215. bridge = bc->base;
  216. pr_debug("bridge_startup(): irq= 0x%x pin=%d\n", irq, pin);
  217. /*
  218. * "map" irq to a swlevel greater than 6 since the first 6 bits
  219. * of INT_PEND0 are taken
  220. */
  221. swlevel = find_level(&cpu, irq);
  222. bridge->b_int_addr[pin].addr = (0x20000 | swlevel | (bc->nasid << 8));
  223. bridge->b_int_enable |= (1 << pin);
  224. bridge->b_int_enable |= 0x7ffffe00; /* more stuff in int_enable */
  225. /*
  226. * Enable sending of an interrupt clear packt to the hub on a high to
  227. * low transition of the interrupt pin.
  228. *
  229. * IRIX sets additional bits in the address which are documented as
  230. * reserved in the bridge docs.
  231. */
  232. bridge->b_int_mode |= (1UL << pin);
  233. /*
  234. * We assume the bridge to have a 1:1 mapping between devices
  235. * (slots) and intr pins.
  236. */
  237. device = bridge->b_int_device;
  238. device &= ~(7 << (pin*3));
  239. device |= (pin << (pin*3));
  240. bridge->b_int_device = device;
  241. bridge->b_wid_tflush;
  242. return 0; /* Never anything pending. */
  243. }
  244. /* Shutdown one of the (PCI ...) IRQs routes over a bridge. */
  245. static void shutdown_bridge_irq(unsigned int irq)
  246. {
  247. struct bridge_controller *bc = IRQ_TO_BRIDGE(irq);
  248. struct hub_data *hub = hub_data(cpu_to_node(bc->irq_cpu));
  249. bridge_t *bridge = bc->base;
  250. struct slice_data *si = cpu_data[bc->irq_cpu].data;
  251. int pin, swlevel;
  252. cpuid_t cpu;
  253. pr_debug("bridge_shutdown: irq 0x%x\n", irq);
  254. pin = SLOT_FROM_PCI_IRQ(irq);
  255. /*
  256. * map irq to a swlevel greater than 6 since the first 6 bits
  257. * of INT_PEND0 are taken
  258. */
  259. swlevel = find_level(&cpu, irq);
  260. intr_disconnect_level(cpu, swlevel);
  261. __clear_bit(swlevel, hub->irq_alloc_mask);
  262. si->level_to_irq[swlevel] = -1;
  263. bridge->b_int_enable &= ~(1 << pin);
  264. bridge->b_wid_tflush;
  265. }
  266. static inline void enable_bridge_irq(unsigned int irq)
  267. {
  268. cpuid_t cpu;
  269. int swlevel;
  270. swlevel = find_level(&cpu, irq); /* Criminal offence */
  271. intr_connect_level(cpu, swlevel);
  272. }
  273. static inline void disable_bridge_irq(unsigned int irq)
  274. {
  275. cpuid_t cpu;
  276. int swlevel;
  277. swlevel = find_level(&cpu, irq); /* Criminal offence */
  278. intr_disconnect_level(cpu, swlevel);
  279. }
  280. static void mask_and_ack_bridge_irq(unsigned int irq)
  281. {
  282. disable_bridge_irq(irq);
  283. }
  284. static void end_bridge_irq(unsigned int irq)
  285. {
  286. if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
  287. irq_desc[irq].action)
  288. enable_bridge_irq(irq);
  289. }
  290. static struct hw_interrupt_type bridge_irq_type = {
  291. .typename = "bridge",
  292. .startup = startup_bridge_irq,
  293. .shutdown = shutdown_bridge_irq,
  294. .enable = enable_bridge_irq,
  295. .disable = disable_bridge_irq,
  296. .ack = mask_and_ack_bridge_irq,
  297. .end = end_bridge_irq,
  298. };
  299. static unsigned long irq_map[NR_IRQS / BITS_PER_LONG];
  300. static int allocate_irqno(void)
  301. {
  302. int irq;
  303. again:
  304. irq = find_first_zero_bit(irq_map, NR_IRQS);
  305. if (irq >= NR_IRQS)
  306. return -ENOSPC;
  307. if (test_and_set_bit(irq, irq_map))
  308. goto again;
  309. return irq;
  310. }
  311. void free_irqno(unsigned int irq)
  312. {
  313. clear_bit(irq, irq_map);
  314. }
  315. void __devinit register_bridge_irq(unsigned int irq)
  316. {
  317. irq_desc[irq].status = IRQ_DISABLED;
  318. irq_desc[irq].action = 0;
  319. irq_desc[irq].depth = 1;
  320. irq_desc[irq].handler = &bridge_irq_type;
  321. }
  322. int __devinit request_bridge_irq(struct bridge_controller *bc)
  323. {
  324. int irq = allocate_irqno();
  325. int swlevel, cpu;
  326. nasid_t nasid;
  327. if (irq < 0)
  328. return irq;
  329. /*
  330. * "map" irq to a swlevel greater than 6 since the first 6 bits
  331. * of INT_PEND0 are taken
  332. */
  333. cpu = bc->irq_cpu;
  334. swlevel = alloc_level(cpu, irq);
  335. if (unlikely(swlevel < 0)) {
  336. free_irqno(irq);
  337. return -EAGAIN;
  338. }
  339. /* Make sure it's not already pending when we connect it. */
  340. nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
  341. REMOTE_HUB_CLR_INTR(nasid, swlevel);
  342. intr_connect_level(cpu, swlevel);
  343. register_bridge_irq(irq);
  344. return irq;
  345. }
  346. extern void ip27_rt_timer_interrupt(struct pt_regs *regs);
  347. asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
  348. {
  349. unsigned long pending = read_c0_cause() & read_c0_status();
  350. if (pending & CAUSEF_IP4)
  351. ip27_rt_timer_interrupt(regs);
  352. else if (pending & CAUSEF_IP2) /* PI_INT_PEND_0 or CC_PEND_{A|B} */
  353. ip27_do_irq_mask0(regs);
  354. else if (pending & CAUSEF_IP3) /* PI_INT_PEND_1 */
  355. ip27_do_irq_mask1(regs);
  356. else if (pending & CAUSEF_IP5)
  357. ip27_prof_timer(regs);
  358. else if (pending & CAUSEF_IP6)
  359. ip27_hub_error(regs);
  360. }
  361. void __init arch_init_irq(void)
  362. {
  363. }
  364. void install_ipi(void)
  365. {
  366. int slice = LOCAL_HUB_L(PI_CPU_NUM);
  367. int cpu = smp_processor_id();
  368. struct slice_data *si = cpu_data[cpu].data;
  369. struct hub_data *hub = hub_data(cpu_to_node(cpu));
  370. int resched, call;
  371. resched = CPU_RESCHED_A_IRQ + slice;
  372. __set_bit(resched, hub->irq_alloc_mask);
  373. __set_bit(resched, si->irq_enable_mask);
  374. LOCAL_HUB_CLR_INTR(resched);
  375. call = CPU_CALL_A_IRQ + slice;
  376. __set_bit(call, hub->irq_alloc_mask);
  377. __set_bit(call, si->irq_enable_mask);
  378. LOCAL_HUB_CLR_INTR(call);
  379. if (slice == 0) {
  380. LOCAL_HUB_S(PI_INT_MASK0_A, si->irq_enable_mask[0]);
  381. LOCAL_HUB_S(PI_INT_MASK1_A, si->irq_enable_mask[1]);
  382. } else {
  383. LOCAL_HUB_S(PI_INT_MASK0_B, si->irq_enable_mask[0]);
  384. LOCAL_HUB_S(PI_INT_MASK1_B, si->irq_enable_mask[1]);
  385. }
  386. }