ics.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762
  1. /*
  2. * Copyright 2008-2011 IBM Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/cpu.h>
  10. #include <linux/init.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/irq.h>
  13. #include <linux/kernel.h>
  14. #include <linux/msi.h>
  15. #include <linux/of.h>
  16. #include <linux/slab.h>
  17. #include <linux/smp.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/types.h>
  20. #include <linux/of_address.h>
  21. #include <linux/of_irq.h>
  22. #include <asm/io.h>
  23. #include <asm/irq.h>
  24. #include <asm/xics.h>
  25. #include "wsp.h"
  26. #include "ics.h"
  27. /* WSP ICS */
  28. struct wsp_ics {
  29. struct ics ics;
  30. struct device_node *dn;
  31. void __iomem *regs;
  32. spinlock_t lock;
  33. unsigned long *bitmap;
  34. u32 chip_id;
  35. u32 lsi_base;
  36. u32 lsi_count;
  37. u64 hwirq_start;
  38. u64 count;
  39. #ifdef CONFIG_SMP
  40. int *hwirq_cpu_map;
  41. #endif
  42. };
  43. #define to_wsp_ics(ics) container_of(ics, struct wsp_ics, ics)
  44. #define INT_SRC_LAYER_BUID_REG(base) ((base) + 0x00)
  45. #define IODA_TBL_ADDR_REG(base) ((base) + 0x18)
  46. #define IODA_TBL_DATA_REG(base) ((base) + 0x20)
  47. #define XIVE_UPDATE_REG(base) ((base) + 0x28)
  48. #define ICS_INT_CAPS_REG(base) ((base) + 0x30)
  49. #define TBL_AUTO_INCREMENT ((1UL << 63) | (1UL << 15))
  50. #define TBL_SELECT_XIST (1UL << 48)
  51. #define TBL_SELECT_XIVT (1UL << 49)
  52. #define IODA_IRQ(irq) ((irq) & (0x7FFULL)) /* HRM 5.1.3.4 */
  53. #define XIST_REQUIRED 0x8
  54. #define XIST_REJECTED 0x4
  55. #define XIST_PRESENTED 0x2
  56. #define XIST_PENDING 0x1
  57. #define XIVE_SERVER_SHIFT 42
  58. #define XIVE_SERVER_MASK 0xFFFFULL
  59. #define XIVE_PRIORITY_MASK 0xFFULL
  60. #define XIVE_PRIORITY_SHIFT 32
  61. #define XIVE_WRITE_ENABLE (1ULL << 63)
  62. /*
  63. * The docs refer to a 6 bit field called ChipID, which consists of a
  64. * 3 bit NodeID and a 3 bit ChipID. On WSP the ChipID is always zero
  65. * so we ignore it, and every where we use "chip id" in this code we
  66. * mean the NodeID.
  67. */
  68. #define WSP_ICS_CHIP_SHIFT 17
  69. static struct wsp_ics *ics_list;
  70. static int num_ics;
  71. /* ICS Source controller accessors */
  72. static u64 wsp_ics_get_xive(struct wsp_ics *ics, unsigned int irq)
  73. {
  74. unsigned long flags;
  75. u64 xive;
  76. spin_lock_irqsave(&ics->lock, flags);
  77. out_be64(IODA_TBL_ADDR_REG(ics->regs), TBL_SELECT_XIVT | IODA_IRQ(irq));
  78. xive = in_be64(IODA_TBL_DATA_REG(ics->regs));
  79. spin_unlock_irqrestore(&ics->lock, flags);
  80. return xive;
  81. }
  82. static void wsp_ics_set_xive(struct wsp_ics *ics, unsigned int irq, u64 xive)
  83. {
  84. xive &= ~XIVE_ADDR_MASK;
  85. xive |= (irq & XIVE_ADDR_MASK);
  86. xive |= XIVE_WRITE_ENABLE;
  87. out_be64(XIVE_UPDATE_REG(ics->regs), xive);
  88. }
  89. static u64 xive_set_server(u64 xive, unsigned int server)
  90. {
  91. u64 mask = ~(XIVE_SERVER_MASK << XIVE_SERVER_SHIFT);
  92. xive &= mask;
  93. xive |= (server & XIVE_SERVER_MASK) << XIVE_SERVER_SHIFT;
  94. return xive;
  95. }
  96. static u64 xive_set_priority(u64 xive, unsigned int priority)
  97. {
  98. u64 mask = ~(XIVE_PRIORITY_MASK << XIVE_PRIORITY_SHIFT);
  99. xive &= mask;
  100. xive |= (priority & XIVE_PRIORITY_MASK) << XIVE_PRIORITY_SHIFT;
  101. return xive;
  102. }
  103. #ifdef CONFIG_SMP
  104. /* Find logical CPUs within mask on a given chip and store result in ret */
  105. void cpus_on_chip(int chip_id, cpumask_t *mask, cpumask_t *ret)
  106. {
  107. int cpu, chip;
  108. struct device_node *cpu_dn, *dn;
  109. const u32 *prop;
  110. cpumask_clear(ret);
  111. for_each_cpu(cpu, mask) {
  112. cpu_dn = of_get_cpu_node(cpu, NULL);
  113. if (!cpu_dn)
  114. continue;
  115. prop = of_get_property(cpu_dn, "at-node", NULL);
  116. if (!prop) {
  117. of_node_put(cpu_dn);
  118. continue;
  119. }
  120. dn = of_find_node_by_phandle(*prop);
  121. of_node_put(cpu_dn);
  122. chip = wsp_get_chip_id(dn);
  123. if (chip == chip_id)
  124. cpumask_set_cpu(cpu, ret);
  125. of_node_put(dn);
  126. }
  127. }
  128. /* Store a suitable CPU to handle a hwirq in the ics->hwirq_cpu_map cache */
  129. static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq,
  130. const cpumask_t *affinity)
  131. {
  132. cpumask_var_t avail, newmask;
  133. int ret = -ENOMEM, cpu, cpu_rover = 0, target;
  134. int index = hwirq - ics->hwirq_start;
  135. unsigned int nodeid;
  136. BUG_ON(index < 0 || index >= ics->count);
  137. if (!ics->hwirq_cpu_map)
  138. return -ENOMEM;
  139. if (!distribute_irqs) {
  140. ics->hwirq_cpu_map[hwirq - ics->hwirq_start] = xics_default_server;
  141. return 0;
  142. }
  143. /* Allocate needed CPU masks */
  144. if (!alloc_cpumask_var(&avail, GFP_KERNEL))
  145. goto ret;
  146. if (!alloc_cpumask_var(&newmask, GFP_KERNEL))
  147. goto freeavail;
  148. /* Find PBus attached to the source of this IRQ */
  149. nodeid = (hwirq >> WSP_ICS_CHIP_SHIFT) & 0x3; /* 12:14 */
  150. /* Find CPUs that could handle this IRQ */
  151. if (affinity)
  152. cpumask_and(avail, cpu_online_mask, affinity);
  153. else
  154. cpumask_copy(avail, cpu_online_mask);
  155. /* Narrow selection down to logical CPUs on the same chip */
  156. cpus_on_chip(nodeid, avail, newmask);
  157. /* Ensure we haven't narrowed it down to 0 */
  158. if (unlikely(cpumask_empty(newmask))) {
  159. if (unlikely(cpumask_empty(avail))) {
  160. ret = -1;
  161. goto out;
  162. }
  163. cpumask_copy(newmask, avail);
  164. }
  165. /* Choose a CPU out of those we narrowed it down to in round robin */
  166. target = hwirq % cpumask_weight(newmask);
  167. for_each_cpu(cpu, newmask) {
  168. if (cpu_rover++ >= target) {
  169. ics->hwirq_cpu_map[index] = get_hard_smp_processor_id(cpu);
  170. ret = 0;
  171. goto out;
  172. }
  173. }
  174. /* Shouldn't happen */
  175. WARN_ON(1);
  176. out:
  177. free_cpumask_var(newmask);
  178. freeavail:
  179. free_cpumask_var(avail);
  180. ret:
  181. if (ret < 0) {
  182. ics->hwirq_cpu_map[index] = cpumask_first(cpu_online_mask);
  183. pr_warning("Error, falling hwirq 0x%x routing back to CPU %i\n",
  184. hwirq, ics->hwirq_cpu_map[index]);
  185. }
  186. return ret;
  187. }
  188. static void alloc_irq_map(struct wsp_ics *ics)
  189. {
  190. int i;
  191. ics->hwirq_cpu_map = kmalloc(sizeof(int) * ics->count, GFP_KERNEL);
  192. if (!ics->hwirq_cpu_map) {
  193. pr_warning("Allocate hwirq_cpu_map failed, "
  194. "IRQ balancing disabled\n");
  195. return;
  196. }
  197. for (i=0; i < ics->count; i++)
  198. ics->hwirq_cpu_map[i] = xics_default_server;
  199. }
  200. static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq)
  201. {
  202. int index = hwirq - ics->hwirq_start;
  203. BUG_ON(index < 0 || index >= ics->count);
  204. if (!ics->hwirq_cpu_map)
  205. return xics_default_server;
  206. return ics->hwirq_cpu_map[index];
  207. }
  208. #else /* !CONFIG_SMP */
  209. static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq,
  210. const cpumask_t *affinity)
  211. {
  212. return 0;
  213. }
  214. static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq)
  215. {
  216. return xics_default_server;
  217. }
  218. static void alloc_irq_map(struct wsp_ics *ics) { }
  219. #endif
  220. static void wsp_chip_unmask_irq(struct irq_data *d)
  221. {
  222. unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
  223. struct wsp_ics *ics;
  224. int server;
  225. u64 xive;
  226. if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
  227. return;
  228. ics = d->chip_data;
  229. if (WARN_ON(!ics))
  230. return;
  231. server = get_irq_server(ics, hw_irq);
  232. xive = wsp_ics_get_xive(ics, hw_irq);
  233. xive = xive_set_server(xive, server);
  234. xive = xive_set_priority(xive, DEFAULT_PRIORITY);
  235. wsp_ics_set_xive(ics, hw_irq, xive);
  236. }
  237. static unsigned int wsp_chip_startup(struct irq_data *d)
  238. {
  239. /* unmask it */
  240. wsp_chip_unmask_irq(d);
  241. return 0;
  242. }
  243. static void wsp_mask_real_irq(unsigned int hw_irq, struct wsp_ics *ics)
  244. {
  245. u64 xive;
  246. if (hw_irq == XICS_IPI)
  247. return;
  248. if (WARN_ON(!ics))
  249. return;
  250. xive = wsp_ics_get_xive(ics, hw_irq);
  251. xive = xive_set_server(xive, xics_default_server);
  252. xive = xive_set_priority(xive, LOWEST_PRIORITY);
  253. wsp_ics_set_xive(ics, hw_irq, xive);
  254. }
  255. static void wsp_chip_mask_irq(struct irq_data *d)
  256. {
  257. unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
  258. struct wsp_ics *ics = d->chip_data;
  259. if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
  260. return;
  261. wsp_mask_real_irq(hw_irq, ics);
  262. }
  263. static int wsp_chip_set_affinity(struct irq_data *d,
  264. const struct cpumask *cpumask, bool force)
  265. {
  266. unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
  267. struct wsp_ics *ics;
  268. int ret;
  269. u64 xive;
  270. if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
  271. return -1;
  272. ics = d->chip_data;
  273. if (WARN_ON(!ics))
  274. return -1;
  275. xive = wsp_ics_get_xive(ics, hw_irq);
  276. /*
  277. * For the moment only implement delivery to all cpus or one cpu.
  278. * Get current irq_server for the given irq
  279. */
  280. ret = cache_hwirq_map(ics, hw_irq, cpumask);
  281. if (ret == -1) {
  282. char cpulist[128];
  283. cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
  284. pr_warning("%s: No online cpus in the mask %s for irq %d\n",
  285. __func__, cpulist, d->irq);
  286. return -1;
  287. } else if (ret == -ENOMEM) {
  288. pr_warning("%s: Out of memory\n", __func__);
  289. return -1;
  290. }
  291. xive = xive_set_server(xive, get_irq_server(ics, hw_irq));
  292. wsp_ics_set_xive(ics, hw_irq, xive);
  293. return IRQ_SET_MASK_OK;
  294. }
  295. static struct irq_chip wsp_irq_chip = {
  296. .name = "WSP ICS",
  297. .irq_startup = wsp_chip_startup,
  298. .irq_mask = wsp_chip_mask_irq,
  299. .irq_unmask = wsp_chip_unmask_irq,
  300. .irq_set_affinity = wsp_chip_set_affinity
  301. };
  302. static int wsp_ics_host_match(struct ics *ics, struct device_node *dn)
  303. {
  304. /* All ICSs in the system implement a global irq number space,
  305. * so match against them all. */
  306. return of_device_is_compatible(dn, "ibm,ppc-xics");
  307. }
  308. static int wsp_ics_match_hwirq(struct wsp_ics *wsp_ics, unsigned int hwirq)
  309. {
  310. if (hwirq >= wsp_ics->hwirq_start &&
  311. hwirq < wsp_ics->hwirq_start + wsp_ics->count)
  312. return 1;
  313. return 0;
  314. }
  315. static int wsp_ics_map(struct ics *ics, unsigned int virq)
  316. {
  317. struct wsp_ics *wsp_ics = to_wsp_ics(ics);
  318. unsigned int hw_irq = virq_to_hw(virq);
  319. unsigned long flags;
  320. if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
  321. return -ENOENT;
  322. irq_set_chip_and_handler(virq, &wsp_irq_chip, handle_fasteoi_irq);
  323. irq_set_chip_data(virq, wsp_ics);
  324. spin_lock_irqsave(&wsp_ics->lock, flags);
  325. bitmap_allocate_region(wsp_ics->bitmap, hw_irq - wsp_ics->hwirq_start, 0);
  326. spin_unlock_irqrestore(&wsp_ics->lock, flags);
  327. return 0;
  328. }
  329. static void wsp_ics_mask_unknown(struct ics *ics, unsigned long hw_irq)
  330. {
  331. struct wsp_ics *wsp_ics = to_wsp_ics(ics);
  332. if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
  333. return;
  334. pr_err("%s: IRQ %lu (real) is invalid, disabling it.\n", __func__, hw_irq);
  335. wsp_mask_real_irq(hw_irq, wsp_ics);
  336. }
  337. static long wsp_ics_get_server(struct ics *ics, unsigned long hw_irq)
  338. {
  339. struct wsp_ics *wsp_ics = to_wsp_ics(ics);
  340. if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
  341. return -ENOENT;
  342. return get_irq_server(wsp_ics, hw_irq);
  343. }
  344. /* HW Number allocation API */
  345. static struct wsp_ics *wsp_ics_find_dn_ics(struct device_node *dn)
  346. {
  347. struct device_node *iparent;
  348. int i;
  349. iparent = of_irq_find_parent(dn);
  350. if (!iparent) {
  351. pr_err("wsp_ics: Failed to find interrupt parent!\n");
  352. return NULL;
  353. }
  354. for(i = 0; i < num_ics; i++) {
  355. if(ics_list[i].dn == iparent)
  356. break;
  357. }
  358. if (i >= num_ics) {
  359. pr_err("wsp_ics: Unable to find parent bitmap!\n");
  360. return NULL;
  361. }
  362. return &ics_list[i];
  363. }
  364. int wsp_ics_alloc_irq(struct device_node *dn, int num)
  365. {
  366. struct wsp_ics *ics;
  367. int order, offset;
  368. ics = wsp_ics_find_dn_ics(dn);
  369. if (!ics)
  370. return -ENODEV;
  371. /* Fast, but overly strict if num isn't a power of two */
  372. order = get_count_order(num);
  373. spin_lock_irq(&ics->lock);
  374. offset = bitmap_find_free_region(ics->bitmap, ics->count, order);
  375. spin_unlock_irq(&ics->lock);
  376. if (offset < 0)
  377. return offset;
  378. return offset + ics->hwirq_start;
  379. }
  380. void wsp_ics_free_irq(struct device_node *dn, unsigned int irq)
  381. {
  382. struct wsp_ics *ics;
  383. ics = wsp_ics_find_dn_ics(dn);
  384. if (WARN_ON(!ics))
  385. return;
  386. spin_lock_irq(&ics->lock);
  387. bitmap_release_region(ics->bitmap, irq, 0);
  388. spin_unlock_irq(&ics->lock);
  389. }
  390. /* Initialisation */
  391. static int __init wsp_ics_bitmap_setup(struct wsp_ics *ics,
  392. struct device_node *dn)
  393. {
  394. int len, i, j, size;
  395. u32 start, count;
  396. const u32 *p;
  397. size = BITS_TO_LONGS(ics->count) * sizeof(long);
  398. ics->bitmap = kzalloc(size, GFP_KERNEL);
  399. if (!ics->bitmap) {
  400. pr_err("wsp_ics: ENOMEM allocating IRQ bitmap!\n");
  401. return -ENOMEM;
  402. }
  403. spin_lock_init(&ics->lock);
  404. p = of_get_property(dn, "available-ranges", &len);
  405. if (!p || !len) {
  406. /* FIXME this should be a WARN() once mambo is updated */
  407. pr_err("wsp_ics: No available-ranges defined for %s\n",
  408. dn->full_name);
  409. return 0;
  410. }
  411. if (len % (2 * sizeof(u32)) != 0) {
  412. /* FIXME this should be a WARN() once mambo is updated */
  413. pr_err("wsp_ics: Invalid available-ranges for %s\n",
  414. dn->full_name);
  415. return 0;
  416. }
  417. bitmap_fill(ics->bitmap, ics->count);
  418. for (i = 0; i < len / sizeof(u32); i += 2) {
  419. start = of_read_number(p + i, 1);
  420. count = of_read_number(p + i + 1, 1);
  421. pr_devel("%s: start: %d count: %d\n", __func__, start, count);
  422. if ((start + count) > (ics->hwirq_start + ics->count) ||
  423. start < ics->hwirq_start) {
  424. pr_err("wsp_ics: Invalid range! -> %d to %d\n",
  425. start, start + count);
  426. break;
  427. }
  428. for (j = 0; j < count; j++)
  429. bitmap_release_region(ics->bitmap,
  430. (start + j) - ics->hwirq_start, 0);
  431. }
  432. /* Ensure LSIs are not available for allocation */
  433. bitmap_allocate_region(ics->bitmap, ics->lsi_base,
  434. get_count_order(ics->lsi_count));
  435. return 0;
  436. }
  437. static int __init wsp_ics_setup(struct wsp_ics *ics, struct device_node *dn)
  438. {
  439. u32 lsi_buid, msi_buid, msi_base, msi_count;
  440. void __iomem *regs;
  441. const u32 *p;
  442. int rc, len, i;
  443. u64 caps, buid;
  444. p = of_get_property(dn, "interrupt-ranges", &len);
  445. if (!p || len < (2 * sizeof(u32))) {
  446. pr_err("wsp_ics: No/bad interrupt-ranges found on %s\n",
  447. dn->full_name);
  448. return -ENOENT;
  449. }
  450. if (len > (2 * sizeof(u32))) {
  451. pr_err("wsp_ics: Multiple ics ranges not supported.\n");
  452. return -EINVAL;
  453. }
  454. regs = of_iomap(dn, 0);
  455. if (!regs) {
  456. pr_err("wsp_ics: of_iomap(%s) failed\n", dn->full_name);
  457. return -ENXIO;
  458. }
  459. ics->hwirq_start = of_read_number(p, 1);
  460. ics->count = of_read_number(p + 1, 1);
  461. ics->regs = regs;
  462. ics->chip_id = wsp_get_chip_id(dn);
  463. if (WARN_ON(ics->chip_id < 0))
  464. ics->chip_id = 0;
  465. /* Get some informations about the critter */
  466. caps = in_be64(ICS_INT_CAPS_REG(ics->regs));
  467. buid = in_be64(INT_SRC_LAYER_BUID_REG(ics->regs));
  468. ics->lsi_count = caps >> 56;
  469. msi_count = (caps >> 44) & 0x7ff;
  470. /* Note: LSI BUID is 9 bits, but really only 3 are BUID and the
  471. * rest is mixed in the interrupt number. We store the whole
  472. * thing though
  473. */
  474. lsi_buid = (buid >> 48) & 0x1ff;
  475. ics->lsi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | lsi_buid << 5;
  476. msi_buid = (buid >> 37) & 0x7;
  477. msi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | msi_buid << 11;
  478. pr_info("wsp_ics: Found %s\n", dn->full_name);
  479. pr_info("wsp_ics: irq range : 0x%06llx..0x%06llx\n",
  480. ics->hwirq_start, ics->hwirq_start + ics->count - 1);
  481. pr_info("wsp_ics: %4d LSIs : 0x%06x..0x%06x\n",
  482. ics->lsi_count, ics->lsi_base,
  483. ics->lsi_base + ics->lsi_count - 1);
  484. pr_info("wsp_ics: %4d MSIs : 0x%06x..0x%06x\n",
  485. msi_count, msi_base,
  486. msi_base + msi_count - 1);
  487. /* Let's check the HW config is sane */
  488. if (ics->lsi_base < ics->hwirq_start ||
  489. (ics->lsi_base + ics->lsi_count) > (ics->hwirq_start + ics->count))
  490. pr_warning("wsp_ics: WARNING ! LSIs out of interrupt-ranges !\n");
  491. if (msi_base < ics->hwirq_start ||
  492. (msi_base + msi_count) > (ics->hwirq_start + ics->count))
  493. pr_warning("wsp_ics: WARNING ! MSIs out of interrupt-ranges !\n");
  494. /* We don't check for overlap between LSI and MSI, which will happen
  495. * if we use the same BUID, I'm not sure yet how legit that is.
  496. */
  497. rc = wsp_ics_bitmap_setup(ics, dn);
  498. if (rc) {
  499. iounmap(regs);
  500. return rc;
  501. }
  502. ics->dn = of_node_get(dn);
  503. alloc_irq_map(ics);
  504. for(i = 0; i < ics->count; i++)
  505. wsp_mask_real_irq(ics->hwirq_start + i, ics);
  506. ics->ics.map = wsp_ics_map;
  507. ics->ics.mask_unknown = wsp_ics_mask_unknown;
  508. ics->ics.get_server = wsp_ics_get_server;
  509. ics->ics.host_match = wsp_ics_host_match;
  510. xics_register_ics(&ics->ics);
  511. return 0;
  512. }
  513. static void __init wsp_ics_set_default_server(void)
  514. {
  515. struct device_node *np;
  516. u32 hwid;
  517. /* Find the server number for the boot cpu. */
  518. np = of_get_cpu_node(boot_cpuid, NULL);
  519. BUG_ON(!np);
  520. hwid = get_hard_smp_processor_id(boot_cpuid);
  521. pr_info("wsp_ics: default server is %#x, CPU %s\n", hwid, np->full_name);
  522. xics_default_server = hwid;
  523. of_node_put(np);
  524. }
  525. static int __init wsp_ics_init(void)
  526. {
  527. struct device_node *dn;
  528. struct wsp_ics *ics;
  529. int rc, found;
  530. wsp_ics_set_default_server();
  531. found = 0;
  532. for_each_compatible_node(dn, NULL, "ibm,ppc-xics")
  533. found++;
  534. if (found == 0) {
  535. pr_err("wsp_ics: No ICS's found!\n");
  536. return -ENODEV;
  537. }
  538. ics_list = kmalloc(sizeof(*ics) * found, GFP_KERNEL);
  539. if (!ics_list) {
  540. pr_err("wsp_ics: No memory for structs.\n");
  541. return -ENOMEM;
  542. }
  543. num_ics = 0;
  544. ics = ics_list;
  545. for_each_compatible_node(dn, NULL, "ibm,wsp-xics") {
  546. rc = wsp_ics_setup(ics, dn);
  547. if (rc == 0) {
  548. ics++;
  549. num_ics++;
  550. }
  551. }
  552. if (found != num_ics) {
  553. pr_err("wsp_ics: Failed setting up %d ICS's\n",
  554. found - num_ics);
  555. return -1;
  556. }
  557. return 0;
  558. }
  559. void __init wsp_init_irq(void)
  560. {
  561. wsp_ics_init();
  562. xics_init();
  563. /* We need to patch our irq chip's EOI to point to the right ICP */
  564. wsp_irq_chip.irq_eoi = icp_ops->eoi;
  565. }
  566. #ifdef CONFIG_PCI_MSI
  567. static void wsp_ics_msi_unmask_irq(struct irq_data *d)
  568. {
  569. wsp_chip_unmask_irq(d);
  570. unmask_msi_irq(d);
  571. }
  572. static unsigned int wsp_ics_msi_startup(struct irq_data *d)
  573. {
  574. wsp_ics_msi_unmask_irq(d);
  575. return 0;
  576. }
  577. static void wsp_ics_msi_mask_irq(struct irq_data *d)
  578. {
  579. mask_msi_irq(d);
  580. wsp_chip_mask_irq(d);
  581. }
  582. /*
  583. * we do it this way because we reassinge default EOI handling in
  584. * irq_init() above
  585. */
  586. static void wsp_ics_eoi(struct irq_data *data)
  587. {
  588. wsp_irq_chip.irq_eoi(data);
  589. }
  590. static struct irq_chip wsp_ics_msi = {
  591. .name = "WSP ICS MSI",
  592. .irq_startup = wsp_ics_msi_startup,
  593. .irq_mask = wsp_ics_msi_mask_irq,
  594. .irq_unmask = wsp_ics_msi_unmask_irq,
  595. .irq_eoi = wsp_ics_eoi,
  596. .irq_set_affinity = wsp_chip_set_affinity
  597. };
  598. void wsp_ics_set_msi_chip(unsigned int irq)
  599. {
  600. irq_set_chip(irq, &wsp_ics_msi);
  601. }
  602. void wsp_ics_set_std_chip(unsigned int irq)
  603. {
  604. irq_set_chip(irq, &wsp_irq_chip);
  605. }
  606. #endif /* CONFIG_PCI_MSI */