xics.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733
  1. /*
  2. * arch/powerpc/platforms/pseries/xics.c
  3. *
  4. * Copyright 2000 IBM Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/config.h>
  12. #include <linux/types.h>
  13. #include <linux/threads.h>
  14. #include <linux/kernel.h>
  15. #include <linux/irq.h>
  16. #include <linux/smp.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/signal.h>
  19. #include <linux/init.h>
  20. #include <linux/gfp.h>
  21. #include <linux/radix-tree.h>
  22. #include <linux/cpu.h>
  23. #include <asm/prom.h>
  24. #include <asm/io.h>
  25. #include <asm/pgtable.h>
  26. #include <asm/smp.h>
  27. #include <asm/rtas.h>
  28. #include <asm/hvcall.h>
  29. #include <asm/machdep.h>
  30. #include <asm/i8259.h>
  31. #include "xics.h"
  32. static unsigned int xics_startup(unsigned int irq);
  33. static void xics_enable_irq(unsigned int irq);
  34. static void xics_disable_irq(unsigned int irq);
  35. static void xics_mask_and_ack_irq(unsigned int irq);
  36. static void xics_end_irq(unsigned int irq);
  37. static void xics_set_affinity(unsigned int irq_nr, cpumask_t cpumask);
  38. static struct hw_interrupt_type xics_pic = {
  39. .typename = " XICS ",
  40. .startup = xics_startup,
  41. .enable = xics_enable_irq,
  42. .disable = xics_disable_irq,
  43. .ack = xics_mask_and_ack_irq,
  44. .end = xics_end_irq,
  45. .set_affinity = xics_set_affinity
  46. };
  47. /* This is used to map real irq numbers to virtual */
  48. static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC);
  49. #define XICS_IPI 2
  50. #define XICS_IRQ_SPURIOUS 0
  51. /* Want a priority other than 0. Various HW issues require this. */
  52. #define DEFAULT_PRIORITY 5
  53. /*
  54. * Mark IPIs as higher priority so we can take them inside interrupts that
  55. * arent marked SA_INTERRUPT
  56. */
  57. #define IPI_PRIORITY 4
  58. struct xics_ipl {
  59. union {
  60. u32 word;
  61. u8 bytes[4];
  62. } xirr_poll;
  63. union {
  64. u32 word;
  65. u8 bytes[4];
  66. } xirr;
  67. u32 dummy;
  68. union {
  69. u32 word;
  70. u8 bytes[4];
  71. } qirr;
  72. };
  73. static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS];
  74. static int xics_irq_8259_cascade = 0;
  75. static int xics_irq_8259_cascade_real = 0;
  76. static unsigned int default_server = 0xFF;
  77. static unsigned int default_distrib_server = 0;
  78. static unsigned int interrupt_server_size = 8;
  79. /*
  80. * XICS only has a single IPI, so encode the messages per CPU
  81. */
  82. struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
  83. /* RTAS service tokens */
  84. static int ibm_get_xive;
  85. static int ibm_set_xive;
  86. static int ibm_int_on;
  87. static int ibm_int_off;
  88. typedef struct {
  89. int (*xirr_info_get)(int cpu);
  90. void (*xirr_info_set)(int cpu, int val);
  91. void (*cppr_info)(int cpu, u8 val);
  92. void (*qirr_info)(int cpu, u8 val);
  93. } xics_ops;
  94. /* SMP */
  95. static int pSeries_xirr_info_get(int n_cpu)
  96. {
  97. return in_be32(&xics_per_cpu[n_cpu]->xirr.word);
  98. }
  99. static void pSeries_xirr_info_set(int n_cpu, int value)
  100. {
  101. out_be32(&xics_per_cpu[n_cpu]->xirr.word, value);
  102. }
  103. static void pSeries_cppr_info(int n_cpu, u8 value)
  104. {
  105. out_8(&xics_per_cpu[n_cpu]->xirr.bytes[0], value);
  106. }
  107. static void pSeries_qirr_info(int n_cpu, u8 value)
  108. {
  109. out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value);
  110. }
  111. static xics_ops pSeries_ops = {
  112. pSeries_xirr_info_get,
  113. pSeries_xirr_info_set,
  114. pSeries_cppr_info,
  115. pSeries_qirr_info
  116. };
  117. static xics_ops *ops = &pSeries_ops;
  118. /* LPAR */
  119. static inline long plpar_eoi(unsigned long xirr)
  120. {
  121. return plpar_hcall_norets(H_EOI, xirr);
  122. }
  123. static inline long plpar_cppr(unsigned long cppr)
  124. {
  125. return plpar_hcall_norets(H_CPPR, cppr);
  126. }
  127. static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr)
  128. {
  129. return plpar_hcall_norets(H_IPI, servernum, mfrr);
  130. }
  131. static inline long plpar_xirr(unsigned long *xirr_ret)
  132. {
  133. unsigned long dummy;
  134. return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy);
  135. }
  136. static int pSeriesLP_xirr_info_get(int n_cpu)
  137. {
  138. unsigned long lpar_rc;
  139. unsigned long return_value;
  140. lpar_rc = plpar_xirr(&return_value);
  141. if (lpar_rc != H_Success)
  142. panic(" bad return code xirr - rc = %lx \n", lpar_rc);
  143. return (int)return_value;
  144. }
  145. static void pSeriesLP_xirr_info_set(int n_cpu, int value)
  146. {
  147. unsigned long lpar_rc;
  148. unsigned long val64 = value & 0xffffffff;
  149. lpar_rc = plpar_eoi(val64);
  150. if (lpar_rc != H_Success)
  151. panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc,
  152. val64);
  153. }
  154. void pSeriesLP_cppr_info(int n_cpu, u8 value)
  155. {
  156. unsigned long lpar_rc;
  157. lpar_rc = plpar_cppr(value);
  158. if (lpar_rc != H_Success)
  159. panic("bad return code cppr - rc = %lx\n", lpar_rc);
  160. }
  161. static void pSeriesLP_qirr_info(int n_cpu , u8 value)
  162. {
  163. unsigned long lpar_rc;
  164. lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
  165. if (lpar_rc != H_Success)
  166. panic("bad return code qirr - rc = %lx\n", lpar_rc);
  167. }
  168. xics_ops pSeriesLP_ops = {
  169. pSeriesLP_xirr_info_get,
  170. pSeriesLP_xirr_info_set,
  171. pSeriesLP_cppr_info,
  172. pSeriesLP_qirr_info
  173. };
  174. static unsigned int xics_startup(unsigned int virq)
  175. {
  176. unsigned int irq;
  177. irq = irq_offset_down(virq);
  178. if (radix_tree_insert(&irq_map, virt_irq_to_real(irq),
  179. &virt_irq_to_real_map[irq]) == -ENOMEM)
  180. printk(KERN_CRIT "Out of memory creating real -> virtual"
  181. " IRQ mapping for irq %u (real 0x%x)\n",
  182. virq, virt_irq_to_real(irq));
  183. xics_enable_irq(virq);
  184. return 0; /* return value is ignored */
  185. }
  186. static unsigned int real_irq_to_virt(unsigned int real_irq)
  187. {
  188. unsigned int *ptr;
  189. ptr = radix_tree_lookup(&irq_map, real_irq);
  190. if (ptr == NULL)
  191. return NO_IRQ;
  192. return ptr - virt_irq_to_real_map;
  193. }
  194. #ifdef CONFIG_SMP
  195. static int get_irq_server(unsigned int irq)
  196. {
  197. unsigned int server;
  198. /* For the moment only implement delivery to all cpus or one cpu */
  199. cpumask_t cpumask = irq_affinity[irq];
  200. cpumask_t tmp = CPU_MASK_NONE;
  201. if (!distribute_irqs)
  202. return default_server;
  203. if (cpus_equal(cpumask, CPU_MASK_ALL)) {
  204. server = default_distrib_server;
  205. } else {
  206. cpus_and(tmp, cpu_online_map, cpumask);
  207. if (cpus_empty(tmp))
  208. server = default_distrib_server;
  209. else
  210. server = get_hard_smp_processor_id(first_cpu(tmp));
  211. }
  212. return server;
  213. }
  214. #else
  215. static int get_irq_server(unsigned int irq)
  216. {
  217. return default_server;
  218. }
  219. #endif
  220. static void xics_enable_irq(unsigned int virq)
  221. {
  222. unsigned int irq;
  223. int call_status;
  224. unsigned int server;
  225. irq = virt_irq_to_real(irq_offset_down(virq));
  226. if (irq == XICS_IPI)
  227. return;
  228. server = get_irq_server(virq);
  229. call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
  230. DEFAULT_PRIORITY);
  231. if (call_status != 0) {
  232. printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_set_xive "
  233. "returned %d\n", irq, call_status);
  234. printk("set_xive %x, server %x\n", ibm_set_xive, server);
  235. return;
  236. }
  237. /* Now unmask the interrupt (often a no-op) */
  238. call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
  239. if (call_status != 0) {
  240. printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_int_on "
  241. "returned %d\n", irq, call_status);
  242. return;
  243. }
  244. }
  245. static void xics_disable_real_irq(unsigned int irq)
  246. {
  247. int call_status;
  248. unsigned int server;
  249. if (irq == XICS_IPI)
  250. return;
  251. call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
  252. if (call_status != 0) {
  253. printk(KERN_ERR "xics_disable_real_irq: irq=%u: "
  254. "ibm_int_off returned %d\n", irq, call_status);
  255. return;
  256. }
  257. server = get_irq_server(irq);
  258. /* Have to set XIVE to 0xff to be able to remove a slot */
  259. call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff);
  260. if (call_status != 0) {
  261. printk(KERN_ERR "xics_disable_irq: irq=%u: ibm_set_xive(0xff)"
  262. " returned %d\n", irq, call_status);
  263. return;
  264. }
  265. }
  266. static void xics_disable_irq(unsigned int virq)
  267. {
  268. unsigned int irq;
  269. irq = virt_irq_to_real(irq_offset_down(virq));
  270. xics_disable_real_irq(irq);
  271. }
  272. static void xics_end_irq(unsigned int irq)
  273. {
  274. int cpu = smp_processor_id();
  275. iosync();
  276. ops->xirr_info_set(cpu, ((0xff << 24) |
  277. (virt_irq_to_real(irq_offset_down(irq)))));
  278. }
  279. static void xics_mask_and_ack_irq(unsigned int irq)
  280. {
  281. int cpu = smp_processor_id();
  282. if (irq < irq_offset_value()) {
  283. i8259_pic.ack(irq);
  284. iosync();
  285. ops->xirr_info_set(cpu, ((0xff<<24) |
  286. xics_irq_8259_cascade_real));
  287. iosync();
  288. }
  289. }
  290. int xics_get_irq(struct pt_regs *regs)
  291. {
  292. unsigned int cpu = smp_processor_id();
  293. unsigned int vec;
  294. int irq;
  295. vec = ops->xirr_info_get(cpu);
  296. /* (vec >> 24) == old priority */
  297. vec &= 0x00ffffff;
  298. /* for sanity, this had better be < NR_IRQS - 16 */
  299. if (vec == xics_irq_8259_cascade_real) {
  300. irq = i8259_irq(regs);
  301. xics_end_irq(irq_offset_up(xics_irq_8259_cascade));
  302. } else if (vec == XICS_IRQ_SPURIOUS) {
  303. irq = -1;
  304. } else {
  305. irq = real_irq_to_virt(vec);
  306. if (irq == NO_IRQ)
  307. irq = real_irq_to_virt_slowpath(vec);
  308. if (irq == NO_IRQ) {
  309. printk(KERN_ERR "Interrupt %u (real) is invalid,"
  310. " disabling it.\n", vec);
  311. xics_disable_real_irq(vec);
  312. } else
  313. irq = irq_offset_up(irq);
  314. }
  315. return irq;
  316. }
  317. #ifdef CONFIG_SMP
  318. static irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
  319. {
  320. int cpu = smp_processor_id();
  321. ops->qirr_info(cpu, 0xff);
  322. WARN_ON(cpu_is_offline(cpu));
  323. while (xics_ipi_message[cpu].value) {
  324. if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION,
  325. &xics_ipi_message[cpu].value)) {
  326. mb();
  327. smp_message_recv(PPC_MSG_CALL_FUNCTION, regs);
  328. }
  329. if (test_and_clear_bit(PPC_MSG_RESCHEDULE,
  330. &xics_ipi_message[cpu].value)) {
  331. mb();
  332. smp_message_recv(PPC_MSG_RESCHEDULE, regs);
  333. }
  334. #if 0
  335. if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK,
  336. &xics_ipi_message[cpu].value)) {
  337. mb();
  338. smp_message_recv(PPC_MSG_MIGRATE_TASK, regs);
  339. }
  340. #endif
  341. #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
  342. if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
  343. &xics_ipi_message[cpu].value)) {
  344. mb();
  345. smp_message_recv(PPC_MSG_DEBUGGER_BREAK, regs);
  346. }
  347. #endif
  348. }
  349. return IRQ_HANDLED;
  350. }
  351. void xics_cause_IPI(int cpu)
  352. {
  353. ops->qirr_info(cpu, IPI_PRIORITY);
  354. }
  355. #endif /* CONFIG_SMP */
  356. void xics_setup_cpu(void)
  357. {
  358. int cpu = smp_processor_id();
  359. ops->cppr_info(cpu, 0xff);
  360. iosync();
  361. /*
  362. * Put the calling processor into the GIQ. This is really only
  363. * necessary from a secondary thread as the OF start-cpu interface
  364. * performs this function for us on primary threads.
  365. *
  366. * XXX: undo of teardown on kexec needs this too, as may hotplug
  367. */
  368. rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
  369. (1UL << interrupt_server_size) - 1 - default_distrib_server, 1);
  370. }
  371. void xics_init_IRQ(void)
  372. {
  373. int i;
  374. unsigned long intr_size = 0;
  375. struct device_node *np;
  376. uint *ireg, ilen, indx = 0;
  377. unsigned long intr_base = 0;
  378. struct xics_interrupt_node {
  379. unsigned long addr;
  380. unsigned long size;
  381. } intnodes[NR_CPUS];
  382. ppc64_boot_msg(0x20, "XICS Init");
  383. ibm_get_xive = rtas_token("ibm,get-xive");
  384. ibm_set_xive = rtas_token("ibm,set-xive");
  385. ibm_int_on = rtas_token("ibm,int-on");
  386. ibm_int_off = rtas_token("ibm,int-off");
  387. np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation");
  388. if (!np)
  389. panic("xics_init_IRQ: can't find interrupt presentation");
  390. nextnode:
  391. ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", NULL);
  392. if (ireg) {
  393. /*
  394. * set node starting index for this node
  395. */
  396. indx = *ireg;
  397. }
  398. ireg = (uint *)get_property(np, "reg", &ilen);
  399. if (!ireg)
  400. panic("xics_init_IRQ: can't find interrupt reg property");
  401. while (ilen) {
  402. intnodes[indx].addr = (unsigned long)*ireg++ << 32;
  403. ilen -= sizeof(uint);
  404. intnodes[indx].addr |= *ireg++;
  405. ilen -= sizeof(uint);
  406. intnodes[indx].size = (unsigned long)*ireg++ << 32;
  407. ilen -= sizeof(uint);
  408. intnodes[indx].size |= *ireg++;
  409. ilen -= sizeof(uint);
  410. indx++;
  411. if (indx >= NR_CPUS) break;
  412. }
  413. np = of_find_node_by_type(np, "PowerPC-External-Interrupt-Presentation");
  414. if ((indx < NR_CPUS) && np) goto nextnode;
  415. /* Find the server numbers for the boot cpu. */
  416. for (np = of_find_node_by_type(NULL, "cpu");
  417. np;
  418. np = of_find_node_by_type(np, "cpu")) {
  419. ireg = (uint *)get_property(np, "reg", &ilen);
  420. if (ireg && ireg[0] == boot_cpuid_phys) {
  421. ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s",
  422. &ilen);
  423. i = ilen / sizeof(int);
  424. if (ireg && i > 0) {
  425. default_server = ireg[0];
  426. default_distrib_server = ireg[i-1]; /* take last element */
  427. }
  428. ireg = (uint *)get_property(np,
  429. "ibm,interrupt-server#-size", NULL);
  430. if (ireg)
  431. interrupt_server_size = *ireg;
  432. break;
  433. }
  434. }
  435. of_node_put(np);
  436. intr_base = intnodes[0].addr;
  437. intr_size = intnodes[0].size;
  438. np = of_find_node_by_type(NULL, "interrupt-controller");
  439. if (!np) {
  440. printk(KERN_WARNING "xics: no ISA interrupt controller\n");
  441. xics_irq_8259_cascade_real = -1;
  442. xics_irq_8259_cascade = -1;
  443. } else {
  444. ireg = (uint *) get_property(np, "interrupts", NULL);
  445. if (!ireg)
  446. panic("xics_init_IRQ: can't find ISA interrupts property");
  447. xics_irq_8259_cascade_real = *ireg;
  448. xics_irq_8259_cascade
  449. = virt_irq_create_mapping(xics_irq_8259_cascade_real);
  450. i8259_init(0, 0);
  451. of_node_put(np);
  452. }
  453. if (platform_is_lpar())
  454. ops = &pSeriesLP_ops;
  455. else {
  456. #ifdef CONFIG_SMP
  457. for_each_cpu(i) {
  458. int hard_id;
  459. /* FIXME: Do this dynamically! --RR */
  460. if (!cpu_present(i))
  461. continue;
  462. hard_id = get_hard_smp_processor_id(i);
  463. xics_per_cpu[i] = ioremap(intnodes[hard_id].addr,
  464. intnodes[hard_id].size);
  465. }
  466. #else
  467. xics_per_cpu[0] = ioremap(intr_base, intr_size);
  468. #endif /* CONFIG_SMP */
  469. }
  470. for (i = irq_offset_value(); i < NR_IRQS; ++i)
  471. get_irq_desc(i)->handler = &xics_pic;
  472. xics_setup_cpu();
  473. ppc64_boot_msg(0x21, "XICS Done");
  474. }
  475. /*
  476. * We cant do this in init_IRQ because we need the memory subsystem up for
  477. * request_irq()
  478. */
  479. static int __init xics_setup_i8259(void)
  480. {
  481. if (ppc64_interrupt_controller == IC_PPC_XIC &&
  482. xics_irq_8259_cascade != -1) {
  483. if (request_irq(irq_offset_up(xics_irq_8259_cascade),
  484. no_action, 0, "8259 cascade", NULL))
  485. printk(KERN_ERR "xics_setup_i8259: couldn't get 8259 "
  486. "cascade\n");
  487. }
  488. return 0;
  489. }
  490. arch_initcall(xics_setup_i8259);
  491. #ifdef CONFIG_SMP
  492. void xics_request_IPIs(void)
  493. {
  494. virt_irq_to_real_map[XICS_IPI] = XICS_IPI;
  495. /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
  496. request_irq(irq_offset_up(XICS_IPI), xics_ipi_action, SA_INTERRUPT,
  497. "IPI", NULL);
  498. get_irq_desc(irq_offset_up(XICS_IPI))->status |= IRQ_PER_CPU;
  499. }
  500. #endif
  501. static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
  502. {
  503. unsigned int irq;
  504. int status;
  505. int xics_status[2];
  506. unsigned long newmask;
  507. cpumask_t tmp = CPU_MASK_NONE;
  508. irq = virt_irq_to_real(irq_offset_down(virq));
  509. if (irq == XICS_IPI || irq == NO_IRQ)
  510. return;
  511. status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
  512. if (status) {
  513. printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive "
  514. "returns %d\n", irq, status);
  515. return;
  516. }
  517. /* For the moment only implement delivery to all cpus or one cpu */
  518. if (cpus_equal(cpumask, CPU_MASK_ALL)) {
  519. newmask = default_distrib_server;
  520. } else {
  521. cpus_and(tmp, cpu_online_map, cpumask);
  522. if (cpus_empty(tmp))
  523. return;
  524. newmask = get_hard_smp_processor_id(first_cpu(tmp));
  525. }
  526. status = rtas_call(ibm_set_xive, 3, 1, NULL,
  527. irq, newmask, xics_status[1]);
  528. if (status) {
  529. printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive "
  530. "returns %d\n", irq, status);
  531. return;
  532. }
  533. }
  534. void xics_teardown_cpu(int secondary)
  535. {
  536. int cpu = smp_processor_id();
  537. ops->cppr_info(cpu, 0x00);
  538. iosync();
  539. /*
  540. * Some machines need to have at least one cpu in the GIQ,
  541. * so leave the master cpu in the group.
  542. */
  543. if (secondary) {
  544. /*
  545. * we need to EOI the IPI if we got here from kexec down IPI
  546. *
  547. * probably need to check all the other interrupts too
  548. * should we be flagging idle loop instead?
  549. * or creating some task to be scheduled?
  550. */
  551. ops->xirr_info_set(cpu, XICS_IPI);
  552. rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
  553. (1UL << interrupt_server_size) - 1 -
  554. default_distrib_server, 0);
  555. }
  556. }
  557. #ifdef CONFIG_HOTPLUG_CPU
  558. /* Interrupts are disabled. */
  559. void xics_migrate_irqs_away(void)
  560. {
  561. int status;
  562. unsigned int irq, virq, cpu = smp_processor_id();
  563. /* Reject any interrupt that was queued to us... */
  564. ops->cppr_info(cpu, 0);
  565. iosync();
  566. /* remove ourselves from the global interrupt queue */
  567. status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
  568. (1UL << interrupt_server_size) - 1 - default_distrib_server, 0);
  569. WARN_ON(status < 0);
  570. /* Allow IPIs again... */
  571. ops->cppr_info(cpu, DEFAULT_PRIORITY);
  572. iosync();
  573. for_each_irq(virq) {
  574. irq_desc_t *desc;
  575. int xics_status[2];
  576. unsigned long flags;
  577. /* We cant set affinity on ISA interrupts */
  578. if (virq < irq_offset_value())
  579. continue;
  580. desc = get_irq_desc(virq);
  581. irq = virt_irq_to_real(irq_offset_down(virq));
  582. /* We need to get IPIs still. */
  583. if (irq == XICS_IPI || irq == NO_IRQ)
  584. continue;
  585. /* We only need to migrate enabled IRQS */
  586. if (desc == NULL || desc->handler == NULL
  587. || desc->action == NULL
  588. || desc->handler->set_affinity == NULL)
  589. continue;
  590. spin_lock_irqsave(&desc->lock, flags);
  591. status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
  592. if (status) {
  593. printk(KERN_ERR "migrate_irqs_away: irq=%u "
  594. "ibm,get-xive returns %d\n",
  595. virq, status);
  596. goto unlock;
  597. }
  598. /*
  599. * We only support delivery to all cpus or to one cpu.
  600. * The irq has to be migrated only in the single cpu
  601. * case.
  602. */
  603. if (xics_status[0] != get_hard_smp_processor_id(cpu))
  604. goto unlock;
  605. printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
  606. virq, cpu);
  607. /* Reset affinity to all cpus */
  608. desc->handler->set_affinity(virq, CPU_MASK_ALL);
  609. irq_affinity[virq] = CPU_MASK_ALL;
  610. unlock:
  611. spin_unlock_irqrestore(&desc->lock, flags);
  612. }
  613. }
  614. #endif