interrupt.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430
  1. /*
  2. * Cell Internal Interrupt Controller
  3. *
  4. * Copyright (C) 2006 Benjamin Herrenschmidt (benh@kernel.crashing.org)
  5. * IBM, Corp.
  6. *
  7. * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
  8. *
  9. * Author: Arnd Bergmann <arndb@de.ibm.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2, or (at your option)
  14. * any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, write to the Free Software
  23. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  24. *
  25. * TODO:
  26. * - Fix various assumptions related to HW CPU numbers vs. linux CPU numbers
  27. * vs node numbers in the setup code
  28. * - Implement proper handling of maxcpus=1/2 (that is, routing of irqs from
  29. * a non-active node to the active node)
  30. */
  31. #include <linux/interrupt.h>
  32. #include <linux/irq.h>
  33. #include <linux/module.h>
  34. #include <linux/percpu.h>
  35. #include <linux/types.h>
  36. #include <linux/ioport.h>
  37. #include <linux/kernel_stat.h>
  38. #include <asm/io.h>
  39. #include <asm/pgtable.h>
  40. #include <asm/prom.h>
  41. #include <asm/ptrace.h>
  42. #include <asm/machdep.h>
  43. #include <asm/cell-regs.h>
  44. #include "interrupt.h"
  45. struct iic {
  46. struct cbe_iic_thread_regs __iomem *regs;
  47. u8 target_id;
  48. u8 eoi_stack[16];
  49. int eoi_ptr;
  50. struct device_node *node;
  51. };
  52. static DEFINE_PER_CPU(struct iic, cpu_iic);
  53. #define IIC_NODE_COUNT 2
  54. static struct irq_host *iic_host;
  55. /* Convert between "pending" bits and hw irq number */
  56. static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)
  57. {
  58. unsigned char unit = bits.source & 0xf;
  59. unsigned char node = bits.source >> 4;
  60. unsigned char class = bits.class & 3;
  61. /* Decode IPIs */
  62. if (bits.flags & CBE_IIC_IRQ_IPI)
  63. return IIC_IRQ_TYPE_IPI | (bits.prio >> 4);
  64. else
  65. return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit;
  66. }
  67. static void iic_mask(struct irq_data *d)
  68. {
  69. }
  70. static void iic_unmask(struct irq_data *d)
  71. {
  72. }
  73. static void iic_eoi(struct irq_data *d)
  74. {
  75. struct iic *iic = &__get_cpu_var(cpu_iic);
  76. out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
  77. BUG_ON(iic->eoi_ptr < 0);
  78. }
  79. static struct irq_chip iic_chip = {
  80. .name = "CELL-IIC",
  81. .irq_mask = iic_mask,
  82. .irq_unmask = iic_unmask,
  83. .irq_eoi = iic_eoi,
  84. };
  85. static void iic_ioexc_eoi(struct irq_data *d)
  86. {
  87. }
  88. static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc)
  89. {
  90. struct irq_chip *chip = irq_desc_get_chip(desc);
  91. struct cbe_iic_regs __iomem *node_iic =
  92. (void __iomem *)irq_desc_get_handler_data(desc);
  93. unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC;
  94. unsigned long bits, ack;
  95. int cascade;
  96. for (;;) {
  97. bits = in_be64(&node_iic->iic_is);
  98. if (bits == 0)
  99. break;
  100. /* pre-ack edge interrupts */
  101. ack = bits & IIC_ISR_EDGE_MASK;
  102. if (ack)
  103. out_be64(&node_iic->iic_is, ack);
  104. /* handle them */
  105. for (cascade = 63; cascade >= 0; cascade--)
  106. if (bits & (0x8000000000000000UL >> cascade)) {
  107. unsigned int cirq =
  108. irq_linear_revmap(iic_host,
  109. base | cascade);
  110. if (cirq != NO_IRQ)
  111. generic_handle_irq(cirq);
  112. }
  113. /* post-ack level interrupts */
  114. ack = bits & ~IIC_ISR_EDGE_MASK;
  115. if (ack)
  116. out_be64(&node_iic->iic_is, ack);
  117. }
  118. chip->irq_eoi(&desc->irq_data);
  119. }
  120. static struct irq_chip iic_ioexc_chip = {
  121. .name = "CELL-IOEX",
  122. .irq_mask = iic_mask,
  123. .irq_unmask = iic_unmask,
  124. .irq_eoi = iic_ioexc_eoi,
  125. };
  126. /* Get an IRQ number from the pending state register of the IIC */
  127. static unsigned int iic_get_irq(void)
  128. {
  129. struct cbe_iic_pending_bits pending;
  130. struct iic *iic;
  131. unsigned int virq;
  132. iic = &__get_cpu_var(cpu_iic);
  133. *(unsigned long *) &pending =
  134. in_be64((u64 __iomem *) &iic->regs->pending_destr);
  135. if (!(pending.flags & CBE_IIC_IRQ_VALID))
  136. return NO_IRQ;
  137. virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending));
  138. if (virq == NO_IRQ)
  139. return NO_IRQ;
  140. iic->eoi_stack[++iic->eoi_ptr] = pending.prio;
  141. BUG_ON(iic->eoi_ptr > 15);
  142. return virq;
  143. }
  144. void iic_setup_cpu(void)
  145. {
  146. out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff);
  147. }
  148. u8 iic_get_target_id(int cpu)
  149. {
  150. return per_cpu(cpu_iic, cpu).target_id;
  151. }
  152. EXPORT_SYMBOL_GPL(iic_get_target_id);
  153. #ifdef CONFIG_SMP
  154. /* Use the highest interrupt priorities for IPI */
  155. static inline int iic_ipi_to_irq(int ipi)
  156. {
  157. return IIC_IRQ_TYPE_IPI + 0xf - ipi;
  158. }
  159. void iic_cause_IPI(int cpu, int mesg)
  160. {
  161. out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - mesg) << 4);
  162. }
  163. struct irq_host *iic_get_irq_host(int node)
  164. {
  165. return iic_host;
  166. }
  167. EXPORT_SYMBOL_GPL(iic_get_irq_host);
  168. static irqreturn_t iic_ipi_action(int irq, void *dev_id)
  169. {
  170. int ipi = (int)(long)dev_id;
  171. switch(ipi) {
  172. case PPC_MSG_CALL_FUNCTION:
  173. generic_smp_call_function_interrupt();
  174. break;
  175. case PPC_MSG_RESCHEDULE:
  176. scheduler_ipi();
  177. break;
  178. case PPC_MSG_CALL_FUNC_SINGLE:
  179. generic_smp_call_function_single_interrupt();
  180. break;
  181. case PPC_MSG_DEBUGGER_BREAK:
  182. debug_ipi_action(0, NULL);
  183. break;
  184. }
  185. return IRQ_HANDLED;
  186. }
  187. static void iic_request_ipi(int ipi, const char *name)
  188. {
  189. int virq;
  190. virq = irq_create_mapping(iic_host, iic_ipi_to_irq(ipi));
  191. if (virq == NO_IRQ) {
  192. printk(KERN_ERR
  193. "iic: failed to map IPI %s\n", name);
  194. return;
  195. }
  196. if (request_irq(virq, iic_ipi_action, IRQF_DISABLED, name,
  197. (void *)(long)ipi))
  198. printk(KERN_ERR
  199. "iic: failed to request IPI %s\n", name);
  200. }
  201. void iic_request_IPIs(void)
  202. {
  203. iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call");
  204. iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched");
  205. iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE, "IPI-call-single");
  206. #ifdef CONFIG_DEBUGGER
  207. iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug");
  208. #endif /* CONFIG_DEBUGGER */
  209. }
  210. #endif /* CONFIG_SMP */
  211. static int iic_host_match(struct irq_host *h, struct device_node *node)
  212. {
  213. return of_device_is_compatible(node,
  214. "IBM,CBEA-Internal-Interrupt-Controller");
  215. }
  216. static int iic_host_map(struct irq_host *h, unsigned int virq,
  217. irq_hw_number_t hw)
  218. {
  219. switch (hw & IIC_IRQ_TYPE_MASK) {
  220. case IIC_IRQ_TYPE_IPI:
  221. irq_set_chip_and_handler(virq, &iic_chip, handle_percpu_irq);
  222. break;
  223. case IIC_IRQ_TYPE_IOEXC:
  224. irq_set_chip_and_handler(virq, &iic_ioexc_chip,
  225. handle_edge_eoi_irq);
  226. break;
  227. default:
  228. irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq);
  229. }
  230. return 0;
  231. }
  232. static int iic_host_xlate(struct irq_host *h, struct device_node *ct,
  233. const u32 *intspec, unsigned int intsize,
  234. irq_hw_number_t *out_hwirq, unsigned int *out_flags)
  235. {
  236. unsigned int node, ext, unit, class;
  237. const u32 *val;
  238. if (!of_device_is_compatible(ct,
  239. "IBM,CBEA-Internal-Interrupt-Controller"))
  240. return -ENODEV;
  241. if (intsize != 1)
  242. return -ENODEV;
  243. val = of_get_property(ct, "#interrupt-cells", NULL);
  244. if (val == NULL || *val != 1)
  245. return -ENODEV;
  246. node = intspec[0] >> 24;
  247. ext = (intspec[0] >> 16) & 0xff;
  248. class = (intspec[0] >> 8) & 0xff;
  249. unit = intspec[0] & 0xff;
  250. /* Check if node is in supported range */
  251. if (node > 1)
  252. return -EINVAL;
  253. /* Build up interrupt number, special case for IO exceptions */
  254. *out_hwirq = (node << IIC_IRQ_NODE_SHIFT);
  255. if (unit == IIC_UNIT_IIC && class == 1)
  256. *out_hwirq |= IIC_IRQ_TYPE_IOEXC | ext;
  257. else
  258. *out_hwirq |= IIC_IRQ_TYPE_NORMAL |
  259. (class << IIC_IRQ_CLASS_SHIFT) | unit;
  260. /* Dummy flags, ignored by iic code */
  261. *out_flags = IRQ_TYPE_EDGE_RISING;
  262. return 0;
  263. }
  264. static struct irq_host_ops iic_host_ops = {
  265. .match = iic_host_match,
  266. .map = iic_host_map,
  267. .xlate = iic_host_xlate,
  268. };
  269. static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
  270. struct device_node *node)
  271. {
  272. /* XXX FIXME: should locate the linux CPU number from the HW cpu
  273. * number properly. We are lucky for now
  274. */
  275. struct iic *iic = &per_cpu(cpu_iic, hw_cpu);
  276. iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs));
  277. BUG_ON(iic->regs == NULL);
  278. iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe);
  279. iic->eoi_stack[0] = 0xff;
  280. iic->node = of_node_get(node);
  281. out_be64(&iic->regs->prio, 0);
  282. printk(KERN_INFO "IIC for CPU %d target id 0x%x : %s\n",
  283. hw_cpu, iic->target_id, node->full_name);
  284. }
  285. static int __init setup_iic(void)
  286. {
  287. struct device_node *dn;
  288. struct resource r0, r1;
  289. unsigned int node, cascade, found = 0;
  290. struct cbe_iic_regs __iomem *node_iic;
  291. const u32 *np;
  292. for (dn = NULL;
  293. (dn = of_find_node_by_name(dn,"interrupt-controller")) != NULL;) {
  294. if (!of_device_is_compatible(dn,
  295. "IBM,CBEA-Internal-Interrupt-Controller"))
  296. continue;
  297. np = of_get_property(dn, "ibm,interrupt-server-ranges", NULL);
  298. if (np == NULL) {
  299. printk(KERN_WARNING "IIC: CPU association not found\n");
  300. of_node_put(dn);
  301. return -ENODEV;
  302. }
  303. if (of_address_to_resource(dn, 0, &r0) ||
  304. of_address_to_resource(dn, 1, &r1)) {
  305. printk(KERN_WARNING "IIC: Can't resolve addresses\n");
  306. of_node_put(dn);
  307. return -ENODEV;
  308. }
  309. found++;
  310. init_one_iic(np[0], r0.start, dn);
  311. init_one_iic(np[1], r1.start, dn);
  312. /* Setup cascade for IO exceptions. XXX cleanup tricks to get
  313. * node vs CPU etc...
  314. * Note that we configure the IIC_IRR here with a hard coded
  315. * priority of 1. We might want to improve that later.
  316. */
  317. node = np[0] >> 1;
  318. node_iic = cbe_get_cpu_iic_regs(np[0]);
  319. cascade = node << IIC_IRQ_NODE_SHIFT;
  320. cascade |= 1 << IIC_IRQ_CLASS_SHIFT;
  321. cascade |= IIC_UNIT_IIC;
  322. cascade = irq_create_mapping(iic_host, cascade);
  323. if (cascade == NO_IRQ)
  324. continue;
  325. /*
  326. * irq_data is a generic pointer that gets passed back
  327. * to us later, so the forced cast is fine.
  328. */
  329. irq_set_handler_data(cascade, (void __force *)node_iic);
  330. irq_set_chained_handler(cascade, iic_ioexc_cascade);
  331. out_be64(&node_iic->iic_ir,
  332. (1 << 12) /* priority */ |
  333. (node << 4) /* dest node */ |
  334. IIC_UNIT_THREAD_0 /* route them to thread 0 */);
  335. /* Flush pending (make sure it triggers if there is
  336. * anything pending
  337. */
  338. out_be64(&node_iic->iic_is, 0xfffffffffffffffful);
  339. }
  340. if (found)
  341. return 0;
  342. else
  343. return -ENODEV;
  344. }
  345. void __init iic_init_IRQ(void)
  346. {
  347. /* Setup an irq host data structure */
  348. iic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_LINEAR, IIC_SOURCE_COUNT,
  349. &iic_host_ops, IIC_IRQ_INVALID);
  350. BUG_ON(iic_host == NULL);
  351. irq_set_default_host(iic_host);
  352. /* Discover and initialize iics */
  353. if (setup_iic() < 0)
  354. panic("IIC: Failed to initialize !\n");
  355. /* Set master interrupt handling function */
  356. ppc_md.get_irq = iic_get_irq;
  357. /* Enable on current CPU */
  358. iic_setup_cpu();
  359. }
  360. void iic_set_interrupt_routing(int cpu, int thread, int priority)
  361. {
  362. struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu);
  363. u64 iic_ir = 0;
  364. int node = cpu >> 1;
  365. /* Set which node and thread will handle the next interrupt */
  366. iic_ir |= CBE_IIC_IR_PRIO(priority) |
  367. CBE_IIC_IR_DEST_NODE(node);
  368. if (thread == 0)
  369. iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0);
  370. else
  371. iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1);
  372. out_be64(&iic_regs->iic_ir, iic_ir);
  373. }