interrupt.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353
  1. /*
  2. * Cell Internal Interrupt Controller
  3. *
  4. * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
  5. *
  6. * Author: Arnd Bergmann <arndb@de.ibm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2, or (at your option)
  11. * any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #include <linux/interrupt.h>
  23. #include <linux/irq.h>
  24. #include <linux/module.h>
  25. #include <linux/percpu.h>
  26. #include <linux/types.h>
  27. #include <asm/io.h>
  28. #include <asm/pgtable.h>
  29. #include <asm/prom.h>
  30. #include <asm/ptrace.h>
  31. #include "interrupt.h"
  32. #include "cbe_regs.h"
  33. struct iic {
  34. struct cbe_iic_thread_regs __iomem *regs;
  35. u8 target_id;
  36. u8 eoi_stack[16];
  37. int eoi_ptr;
  38. };
  39. static DEFINE_PER_CPU(struct iic, iic);
  40. static void iic_mask(unsigned int irq)
  41. {
  42. }
  43. static void iic_unmask(unsigned int irq)
  44. {
  45. }
  46. static void iic_eoi(unsigned int irq)
  47. {
  48. struct iic *iic = &__get_cpu_var(iic);
  49. out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
  50. BUG_ON(iic->eoi_ptr < 0);
  51. }
  52. static struct irq_chip iic_chip = {
  53. .typename = " CELL-IIC ",
  54. .mask = iic_mask,
  55. .unmask = iic_unmask,
  56. .eoi = iic_eoi,
  57. };
  58. /* XXX All of this has to be reworked completely. We need to assign a real
  59. * interrupt numbers to the external interrupts and remove all the hard coded
  60. * interrupt maps (rely on the device-tree whenever possible).
  61. *
  62. * Basically, my scheme is to define the "pendings" bits to be the HW interrupt
  63. * number (ignoring the data and flags here). That means we can sort-of split
  64. * external sources based on priority, and we can use request_irq() on pretty
  65. * much anything.
  66. *
  67. * For spider or axon, they have their own interrupt space. spider will just have
  68. * local "hardward" interrupts 0...xx * node stride. The node stride is not
  69. * necessary (separate interrupt chips will have separate HW number space), but
  70. * will allow to be compatible with existing device-trees.
  71. *
  72. * All of thise little world will get a standard remapping scheme to map those HW
  73. * numbers into the linux flat irq number space.
  74. */
  75. static int iic_external_get_irq(struct cbe_iic_pending_bits pending)
  76. {
  77. int irq;
  78. unsigned char node, unit;
  79. node = pending.source >> 4;
  80. unit = pending.source & 0xf;
  81. irq = -1;
  82. /*
  83. * This mapping is specific to the Cell Broadband
  84. * Engine. We might need to get the numbers
  85. * from the device tree to support future CPUs.
  86. */
  87. switch (unit) {
  88. case 0x00:
  89. case 0x0b:
  90. /*
  91. * One of these units can be connected
  92. * to an external interrupt controller.
  93. */
  94. if (pending.class != 2)
  95. break;
  96. /* TODO: We might want to silently ignore cascade interrupts
  97. * when no cascade handler exist yet
  98. */
  99. irq = IIC_EXT_CASCADE + node * IIC_NODE_STRIDE;
  100. break;
  101. case 0x01 ... 0x04:
  102. case 0x07 ... 0x0a:
  103. /*
  104. * These units are connected to the SPEs
  105. */
  106. if (pending.class > 2)
  107. break;
  108. irq = IIC_SPE_OFFSET
  109. + pending.class * IIC_CLASS_STRIDE
  110. + node * IIC_NODE_STRIDE
  111. + unit;
  112. break;
  113. }
  114. if (irq == -1)
  115. printk(KERN_WARNING "Unexpected interrupt class %02x, "
  116. "source %02x, prio %02x, cpu %02x\n", pending.class,
  117. pending.source, pending.prio, smp_processor_id());
  118. return irq;
  119. }
  120. /* Get an IRQ number from the pending state register of the IIC */
  121. int iic_get_irq(struct pt_regs *regs)
  122. {
  123. struct iic *iic;
  124. int irq;
  125. struct cbe_iic_pending_bits pending;
  126. iic = &__get_cpu_var(iic);
  127. *(unsigned long *) &pending =
  128. in_be64((unsigned long __iomem *) &iic->regs->pending_destr);
  129. iic->eoi_stack[++iic->eoi_ptr] = pending.prio;
  130. BUG_ON(iic->eoi_ptr > 15);
  131. irq = -1;
  132. if (pending.flags & CBE_IIC_IRQ_VALID) {
  133. if (pending.flags & CBE_IIC_IRQ_IPI) {
  134. irq = IIC_IPI_OFFSET + (pending.prio >> 4);
  135. /*
  136. if (irq > 0x80)
  137. printk(KERN_WARNING "Unexpected IPI prio %02x"
  138. "on CPU %02x\n", pending.prio,
  139. smp_processor_id());
  140. */
  141. } else {
  142. irq = iic_external_get_irq(pending);
  143. }
  144. }
  145. return irq;
  146. }
  147. /* hardcoded part to be compatible with older firmware */
  148. static int __init setup_iic_hardcoded(void)
  149. {
  150. struct device_node *np;
  151. int nodeid, cpu;
  152. unsigned long regs;
  153. struct iic *iic;
  154. for_each_possible_cpu(cpu) {
  155. iic = &per_cpu(iic, cpu);
  156. nodeid = cpu/2;
  157. for (np = of_find_node_by_type(NULL, "cpu");
  158. np;
  159. np = of_find_node_by_type(np, "cpu")) {
  160. if (nodeid == *(int *)get_property(np, "node-id", NULL))
  161. break;
  162. }
  163. if (!np) {
  164. printk(KERN_WARNING "IIC: CPU %d not found\n", cpu);
  165. iic->regs = NULL;
  166. iic->target_id = 0xff;
  167. return -ENODEV;
  168. }
  169. regs = *(long *)get_property(np, "iic", NULL);
  170. /* hack until we have decided on the devtree info */
  171. regs += 0x400;
  172. if (cpu & 1)
  173. regs += 0x20;
  174. printk(KERN_INFO "IIC for CPU %d at %lx\n", cpu, regs);
  175. iic->regs = ioremap(regs, sizeof(struct cbe_iic_thread_regs));
  176. iic->target_id = (nodeid << 4) + ((cpu & 1) ? 0xf : 0xe);
  177. iic->eoi_stack[0] = 0xff;
  178. }
  179. return 0;
  180. }
  181. static int __init setup_iic(void)
  182. {
  183. struct device_node *dn;
  184. unsigned long *regs;
  185. char *compatible;
  186. unsigned *np, found = 0;
  187. struct iic *iic = NULL;
  188. for (dn = NULL; (dn = of_find_node_by_name(dn, "interrupt-controller"));) {
  189. compatible = (char *)get_property(dn, "compatible", NULL);
  190. if (!compatible) {
  191. printk(KERN_WARNING "no compatible property found !\n");
  192. continue;
  193. }
  194. if (strstr(compatible, "IBM,CBEA-Internal-Interrupt-Controller"))
  195. regs = (unsigned long *)get_property(dn,"reg", NULL);
  196. else
  197. continue;
  198. if (!regs)
  199. printk(KERN_WARNING "IIC: no reg property\n");
  200. np = (unsigned int *)get_property(dn, "ibm,interrupt-server-ranges", NULL);
  201. if (!np) {
  202. printk(KERN_WARNING "IIC: CPU association not found\n");
  203. iic->regs = NULL;
  204. iic->target_id = 0xff;
  205. return -ENODEV;
  206. }
  207. iic = &per_cpu(iic, np[0]);
  208. iic->regs = ioremap(regs[0], sizeof(struct cbe_iic_thread_regs));
  209. iic->target_id = ((np[0] & 2) << 3) + ((np[0] & 1) ? 0xf : 0xe);
  210. iic->eoi_stack[0] = 0xff;
  211. printk("IIC for CPU %d at %lx mapped to %p\n", np[0], regs[0], iic->regs);
  212. iic = &per_cpu(iic, np[1]);
  213. iic->regs = ioremap(regs[2], sizeof(struct cbe_iic_thread_regs));
  214. iic->target_id = ((np[1] & 2) << 3) + ((np[1] & 1) ? 0xf : 0xe);
  215. iic->eoi_stack[0] = 0xff;
  216. printk("IIC for CPU %d at %lx mapped to %p\n", np[1], regs[2], iic->regs);
  217. found++;
  218. }
  219. if (found)
  220. return 0;
  221. else
  222. return -ENODEV;
  223. }
  224. #ifdef CONFIG_SMP
  225. /* Use the highest interrupt priorities for IPI */
  226. static inline int iic_ipi_to_irq(int ipi)
  227. {
  228. return IIC_IPI_OFFSET + IIC_NUM_IPIS - 1 - ipi;
  229. }
  230. static inline int iic_irq_to_ipi(int irq)
  231. {
  232. return IIC_NUM_IPIS - 1 - (irq - IIC_IPI_OFFSET);
  233. }
  234. void iic_setup_cpu(void)
  235. {
  236. out_be64(&__get_cpu_var(iic).regs->prio, 0xff);
  237. }
  238. void iic_cause_IPI(int cpu, int mesg)
  239. {
  240. out_be64(&per_cpu(iic, cpu).regs->generate, (IIC_NUM_IPIS - 1 - mesg) << 4);
  241. }
  242. u8 iic_get_target_id(int cpu)
  243. {
  244. return per_cpu(iic, cpu).target_id;
  245. }
  246. EXPORT_SYMBOL_GPL(iic_get_target_id);
  247. static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
  248. {
  249. smp_message_recv(iic_irq_to_ipi(irq), regs);
  250. return IRQ_HANDLED;
  251. }
  252. static void iic_request_ipi(int ipi, const char *name)
  253. {
  254. int irq;
  255. irq = iic_ipi_to_irq(ipi);
  256. /* IPIs are marked IRQF_DISABLED as they must run with irqs
  257. * disabled */
  258. set_irq_chip_and_handler(irq, &iic_chip, handle_percpu_irq);
  259. request_irq(irq, iic_ipi_action, IRQF_DISABLED, name, NULL);
  260. }
  261. void iic_request_IPIs(void)
  262. {
  263. iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call");
  264. iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched");
  265. #ifdef CONFIG_DEBUGGER
  266. iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug");
  267. #endif /* CONFIG_DEBUGGER */
  268. }
  269. #endif /* CONFIG_SMP */
  270. static void __init iic_setup_builtin_handlers(void)
  271. {
  272. int be, isrc;
  273. /* XXX FIXME: Assume two threads per BE are present */
  274. for (be=0; be < num_present_cpus() / 2; be++) {
  275. int irq;
  276. /* setup SPE chip and handlers */
  277. for (isrc = 0; isrc < IIC_CLASS_STRIDE * 3; isrc++) {
  278. irq = IIC_NODE_STRIDE * be + IIC_SPE_OFFSET + isrc;
  279. set_irq_chip_and_handler(irq, &iic_chip, handle_fasteoi_irq);
  280. }
  281. /* setup cascade chip */
  282. irq = IIC_EXT_CASCADE + be * IIC_NODE_STRIDE;
  283. set_irq_chip_and_handler(irq, &iic_chip, handle_fasteoi_irq);
  284. }
  285. }
  286. void __init iic_init_IRQ(void)
  287. {
  288. int cpu, irq_offset;
  289. struct iic *iic;
  290. if (setup_iic() < 0)
  291. setup_iic_hardcoded();
  292. irq_offset = 0;
  293. for_each_possible_cpu(cpu) {
  294. iic = &per_cpu(iic, cpu);
  295. if (iic->regs)
  296. out_be64(&iic->regs->prio, 0xff);
  297. }
  298. iic_setup_builtin_handlers();
  299. }