mpic.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066
  1. /*
  2. * arch/powerpc/kernel/mpic.c
  3. *
  4. * Driver for interrupt controllers following the OpenPIC standard, the
  5. * common implementation beeing IBM's MPIC. This driver also can deal
  6. * with various broken implementations of this HW.
  7. *
  8. * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
  9. *
  10. * This file is subject to the terms and conditions of the GNU General Public
  11. * License. See the file COPYING in the main directory of this archive
  12. * for more details.
  13. */
  14. #undef DEBUG
  15. #undef DEBUG_IPI
  16. #undef DEBUG_IRQ
  17. #undef DEBUG_LOW
  18. #include <linux/types.h>
  19. #include <linux/kernel.h>
  20. #include <linux/init.h>
  21. #include <linux/irq.h>
  22. #include <linux/smp.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/bootmem.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/pci.h>
  27. #include <asm/ptrace.h>
  28. #include <asm/signal.h>
  29. #include <asm/io.h>
  30. #include <asm/pgtable.h>
  31. #include <asm/irq.h>
  32. #include <asm/machdep.h>
  33. #include <asm/mpic.h>
  34. #include <asm/smp.h>
  35. #ifdef DEBUG
  36. #define DBG(fmt...) printk(fmt)
  37. #else
  38. #define DBG(fmt...)
  39. #endif
  40. static struct mpic *mpics;
  41. static struct mpic *mpic_primary;
  42. static DEFINE_SPINLOCK(mpic_lock);
  43. #ifdef CONFIG_PPC32 /* XXX for now */
  44. #ifdef CONFIG_IRQ_ALL_CPUS
  45. #define distribute_irqs (1)
  46. #else
  47. #define distribute_irqs (0)
  48. #endif
  49. #endif
  50. /*
  51. * Register accessor functions
  52. */
  53. static inline u32 _mpic_read(unsigned int be, volatile u32 __iomem *base,
  54. unsigned int reg)
  55. {
  56. if (be)
  57. return in_be32(base + (reg >> 2));
  58. else
  59. return in_le32(base + (reg >> 2));
  60. }
  61. static inline void _mpic_write(unsigned int be, volatile u32 __iomem *base,
  62. unsigned int reg, u32 value)
  63. {
  64. if (be)
  65. out_be32(base + (reg >> 2), value);
  66. else
  67. out_le32(base + (reg >> 2), value);
  68. }
  69. static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi)
  70. {
  71. unsigned int be = (mpic->flags & MPIC_BIG_ENDIAN) != 0;
  72. unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10);
  73. if (mpic->flags & MPIC_BROKEN_IPI)
  74. be = !be;
  75. return _mpic_read(be, mpic->gregs, offset);
  76. }
  77. static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value)
  78. {
  79. unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10);
  80. _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->gregs, offset, value);
  81. }
  82. static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
  83. {
  84. unsigned int cpu = 0;
  85. if (mpic->flags & MPIC_PRIMARY)
  86. cpu = hard_smp_processor_id();
  87. return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg);
  88. }
  89. static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value)
  90. {
  91. unsigned int cpu = 0;
  92. if (mpic->flags & MPIC_PRIMARY)
  93. cpu = hard_smp_processor_id();
  94. _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg, value);
  95. }
  96. static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg)
  97. {
  98. unsigned int isu = src_no >> mpic->isu_shift;
  99. unsigned int idx = src_no & mpic->isu_mask;
  100. return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
  101. reg + (idx * MPIC_IRQ_STRIDE));
  102. }
  103. static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
  104. unsigned int reg, u32 value)
  105. {
  106. unsigned int isu = src_no >> mpic->isu_shift;
  107. unsigned int idx = src_no & mpic->isu_mask;
  108. _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
  109. reg + (idx * MPIC_IRQ_STRIDE), value);
  110. }
  111. #define mpic_read(b,r) _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,(b),(r))
  112. #define mpic_write(b,r,v) _mpic_write(mpic->flags & MPIC_BIG_ENDIAN,(b),(r),(v))
  113. #define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i))
  114. #define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v))
  115. #define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i))
  116. #define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v))
  117. #define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r))
  118. #define mpic_irq_write(s,r,v) _mpic_irq_write(mpic,(s),(r),(v))
  119. /*
  120. * Low level utility functions
  121. */
  122. /* Check if we have one of those nice broken MPICs with a flipped endian on
  123. * reads from IPI registers
  124. */
  125. static void __init mpic_test_broken_ipi(struct mpic *mpic)
  126. {
  127. u32 r;
  128. mpic_write(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0, MPIC_VECPRI_MASK);
  129. r = mpic_read(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0);
  130. if (r == le32_to_cpu(MPIC_VECPRI_MASK)) {
  131. printk(KERN_INFO "mpic: Detected reversed IPI registers\n");
  132. mpic->flags |= MPIC_BROKEN_IPI;
  133. }
  134. }
  135. #ifdef CONFIG_MPIC_BROKEN_U3
  136. /* Test if an interrupt is sourced from HyperTransport (used on broken U3s)
  137. * to force the edge setting on the MPIC and do the ack workaround.
  138. */
  139. static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source)
  140. {
  141. if (source >= 128 || !mpic->fixups)
  142. return 0;
  143. return mpic->fixups[source].base != NULL;
  144. }
  145. static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source)
  146. {
  147. struct mpic_irq_fixup *fixup = &mpic->fixups[source];
  148. if (fixup->applebase) {
  149. unsigned int soff = (fixup->index >> 3) & ~3;
  150. unsigned int mask = 1U << (fixup->index & 0x1f);
  151. writel(mask, fixup->applebase + soff);
  152. } else {
  153. spin_lock(&mpic->fixup_lock);
  154. writeb(0x11 + 2 * fixup->index, fixup->base + 2);
  155. writel(fixup->data, fixup->base + 4);
  156. spin_unlock(&mpic->fixup_lock);
  157. }
  158. }
  159. static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
  160. unsigned int irqflags)
  161. {
  162. struct mpic_irq_fixup *fixup = &mpic->fixups[source];
  163. unsigned long flags;
  164. u32 tmp;
  165. if (fixup->base == NULL)
  166. return;
  167. DBG("startup_ht_interrupt(%u, %u) index: %d\n",
  168. source, irqflags, fixup->index);
  169. spin_lock_irqsave(&mpic->fixup_lock, flags);
  170. /* Enable and configure */
  171. writeb(0x10 + 2 * fixup->index, fixup->base + 2);
  172. tmp = readl(fixup->base + 4);
  173. tmp &= ~(0x23U);
  174. if (irqflags & IRQ_LEVEL)
  175. tmp |= 0x22;
  176. writel(tmp, fixup->base + 4);
  177. spin_unlock_irqrestore(&mpic->fixup_lock, flags);
  178. }
  179. static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source,
  180. unsigned int irqflags)
  181. {
  182. struct mpic_irq_fixup *fixup = &mpic->fixups[source];
  183. unsigned long flags;
  184. u32 tmp;
  185. if (fixup->base == NULL)
  186. return;
  187. DBG("shutdown_ht_interrupt(%u, %u)\n", source, irqflags);
  188. /* Disable */
  189. spin_lock_irqsave(&mpic->fixup_lock, flags);
  190. writeb(0x10 + 2 * fixup->index, fixup->base + 2);
  191. tmp = readl(fixup->base + 4);
  192. tmp |= 1;
  193. writel(tmp, fixup->base + 4);
  194. spin_unlock_irqrestore(&mpic->fixup_lock, flags);
  195. }
  196. static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase,
  197. unsigned int devfn, u32 vdid)
  198. {
  199. int i, irq, n;
  200. u8 __iomem *base;
  201. u32 tmp;
  202. u8 pos;
  203. for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0;
  204. pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) {
  205. u8 id = readb(devbase + pos + PCI_CAP_LIST_ID);
  206. if (id == PCI_CAP_ID_HT_IRQCONF) {
  207. id = readb(devbase + pos + 3);
  208. if (id == 0x80)
  209. break;
  210. }
  211. }
  212. if (pos == 0)
  213. return;
  214. base = devbase + pos;
  215. writeb(0x01, base + 2);
  216. n = (readl(base + 4) >> 16) & 0xff;
  217. printk(KERN_INFO "mpic: - HT:%02x.%x [0x%02x] vendor %04x device %04x"
  218. " has %d irqs\n",
  219. devfn >> 3, devfn & 0x7, pos, vdid & 0xffff, vdid >> 16, n + 1);
  220. for (i = 0; i <= n; i++) {
  221. writeb(0x10 + 2 * i, base + 2);
  222. tmp = readl(base + 4);
  223. irq = (tmp >> 16) & 0xff;
  224. DBG("HT PIC index 0x%x, irq 0x%x, tmp: %08x\n", i, irq, tmp);
  225. /* mask it , will be unmasked later */
  226. tmp |= 0x1;
  227. writel(tmp, base + 4);
  228. mpic->fixups[irq].index = i;
  229. mpic->fixups[irq].base = base;
  230. /* Apple HT PIC has a non-standard way of doing EOIs */
  231. if ((vdid & 0xffff) == 0x106b)
  232. mpic->fixups[irq].applebase = devbase + 0x60;
  233. else
  234. mpic->fixups[irq].applebase = NULL;
  235. writeb(0x11 + 2 * i, base + 2);
  236. mpic->fixups[irq].data = readl(base + 4) | 0x80000000;
  237. }
  238. }
  239. static void __init mpic_scan_ht_pics(struct mpic *mpic)
  240. {
  241. unsigned int devfn;
  242. u8 __iomem *cfgspace;
  243. printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n");
  244. /* Allocate fixups array */
  245. mpic->fixups = alloc_bootmem(128 * sizeof(struct mpic_irq_fixup));
  246. BUG_ON(mpic->fixups == NULL);
  247. memset(mpic->fixups, 0, 128 * sizeof(struct mpic_irq_fixup));
  248. /* Init spinlock */
  249. spin_lock_init(&mpic->fixup_lock);
  250. /* Map U3 config space. We assume all IO-APICs are on the primary bus
  251. * so we only need to map 64kB.
  252. */
  253. cfgspace = ioremap(0xf2000000, 0x10000);
  254. BUG_ON(cfgspace == NULL);
  255. /* Now we scan all slots. We do a very quick scan, we read the header
  256. * type, vendor ID and device ID only, that's plenty enough
  257. */
  258. for (devfn = 0; devfn < 0x100; devfn++) {
  259. u8 __iomem *devbase = cfgspace + (devfn << 8);
  260. u8 hdr_type = readb(devbase + PCI_HEADER_TYPE);
  261. u32 l = readl(devbase + PCI_VENDOR_ID);
  262. u16 s;
  263. DBG("devfn %x, l: %x\n", devfn, l);
  264. /* If no device, skip */
  265. if (l == 0xffffffff || l == 0x00000000 ||
  266. l == 0x0000ffff || l == 0xffff0000)
  267. goto next;
  268. /* Check if is supports capability lists */
  269. s = readw(devbase + PCI_STATUS);
  270. if (!(s & PCI_STATUS_CAP_LIST))
  271. goto next;
  272. mpic_scan_ht_pic(mpic, devbase, devfn, l);
  273. next:
  274. /* next device, if function 0 */
  275. if (PCI_FUNC(devfn) == 0 && (hdr_type & 0x80) == 0)
  276. devfn += 7;
  277. }
  278. }
  279. #endif /* CONFIG_MPIC_BROKEN_U3 */
  280. /* Find an mpic associated with a given linux interrupt */
  281. static struct mpic *mpic_find(unsigned int irq, unsigned int *is_ipi)
  282. {
  283. struct mpic *mpic = mpics;
  284. while(mpic) {
  285. /* search IPIs first since they may override the main interrupts */
  286. if (irq >= mpic->ipi_offset && irq < (mpic->ipi_offset + 4)) {
  287. if (is_ipi)
  288. *is_ipi = 1;
  289. return mpic;
  290. }
  291. if (irq >= mpic->irq_offset &&
  292. irq < (mpic->irq_offset + mpic->irq_count)) {
  293. if (is_ipi)
  294. *is_ipi = 0;
  295. return mpic;
  296. }
  297. mpic = mpic -> next;
  298. }
  299. return NULL;
  300. }
  301. /* Convert a cpu mask from logical to physical cpu numbers. */
  302. static inline u32 mpic_physmask(u32 cpumask)
  303. {
  304. int i;
  305. u32 mask = 0;
  306. for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1)
  307. mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
  308. return mask;
  309. }
  310. #ifdef CONFIG_SMP
  311. /* Get the mpic structure from the IPI number */
  312. static inline struct mpic * mpic_from_ipi(unsigned int ipi)
  313. {
  314. return container_of(irq_desc[ipi].chip, struct mpic, hc_ipi);
  315. }
  316. #endif
  317. /* Get the mpic structure from the irq number */
  318. static inline struct mpic * mpic_from_irq(unsigned int irq)
  319. {
  320. return container_of(irq_desc[irq].chip, struct mpic, hc_irq);
  321. }
  322. /* Send an EOI */
  323. static inline void mpic_eoi(struct mpic *mpic)
  324. {
  325. mpic_cpu_write(MPIC_CPU_EOI, 0);
  326. (void)mpic_cpu_read(MPIC_CPU_WHOAMI);
  327. }
  328. #ifdef CONFIG_SMP
  329. static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
  330. {
  331. struct mpic *mpic = dev_id;
  332. smp_message_recv(irq - mpic->ipi_offset, regs);
  333. return IRQ_HANDLED;
  334. }
  335. #endif /* CONFIG_SMP */
  336. /*
  337. * Linux descriptor level callbacks
  338. */
  339. static void mpic_enable_irq(unsigned int irq)
  340. {
  341. unsigned int loops = 100000;
  342. struct mpic *mpic = mpic_from_irq(irq);
  343. unsigned int src = irq - mpic->irq_offset;
  344. DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src);
  345. mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
  346. mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) &
  347. ~MPIC_VECPRI_MASK);
  348. /* make sure mask gets to controller before we return to user */
  349. do {
  350. if (!loops--) {
  351. printk(KERN_ERR "mpic_enable_irq timeout\n");
  352. break;
  353. }
  354. } while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK);
  355. #ifdef CONFIG_MPIC_BROKEN_U3
  356. if (mpic->flags & MPIC_BROKEN_U3) {
  357. unsigned int src = irq - mpic->irq_offset;
  358. if (mpic_is_ht_interrupt(mpic, src) &&
  359. (irq_desc[irq].status & IRQ_LEVEL))
  360. mpic_ht_end_irq(mpic, src);
  361. }
  362. #endif /* CONFIG_MPIC_BROKEN_U3 */
  363. }
  364. static unsigned int mpic_startup_irq(unsigned int irq)
  365. {
  366. #ifdef CONFIG_MPIC_BROKEN_U3
  367. struct mpic *mpic = mpic_from_irq(irq);
  368. unsigned int src = irq - mpic->irq_offset;
  369. #endif /* CONFIG_MPIC_BROKEN_U3 */
  370. mpic_enable_irq(irq);
  371. #ifdef CONFIG_MPIC_BROKEN_U3
  372. if (mpic_is_ht_interrupt(mpic, src))
  373. mpic_startup_ht_interrupt(mpic, src, irq_desc[irq].status);
  374. #endif /* CONFIG_MPIC_BROKEN_U3 */
  375. return 0;
  376. }
  377. static void mpic_disable_irq(unsigned int irq)
  378. {
  379. unsigned int loops = 100000;
  380. struct mpic *mpic = mpic_from_irq(irq);
  381. unsigned int src = irq - mpic->irq_offset;
  382. DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src);
  383. mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
  384. mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) |
  385. MPIC_VECPRI_MASK);
  386. /* make sure mask gets to controller before we return to user */
  387. do {
  388. if (!loops--) {
  389. printk(KERN_ERR "mpic_enable_irq timeout\n");
  390. break;
  391. }
  392. } while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK));
  393. }
  394. static void mpic_shutdown_irq(unsigned int irq)
  395. {
  396. #ifdef CONFIG_MPIC_BROKEN_U3
  397. struct mpic *mpic = mpic_from_irq(irq);
  398. unsigned int src = irq - mpic->irq_offset;
  399. if (mpic_is_ht_interrupt(mpic, src))
  400. mpic_shutdown_ht_interrupt(mpic, src, irq_desc[irq].status);
  401. #endif /* CONFIG_MPIC_BROKEN_U3 */
  402. mpic_disable_irq(irq);
  403. }
  404. static void mpic_end_irq(unsigned int irq)
  405. {
  406. struct mpic *mpic = mpic_from_irq(irq);
  407. #ifdef DEBUG_IRQ
  408. DBG("%s: end_irq: %d\n", mpic->name, irq);
  409. #endif
  410. /* We always EOI on end_irq() even for edge interrupts since that
  411. * should only lower the priority, the MPIC should have properly
  412. * latched another edge interrupt coming in anyway
  413. */
  414. #ifdef CONFIG_MPIC_BROKEN_U3
  415. if (mpic->flags & MPIC_BROKEN_U3) {
  416. unsigned int src = irq - mpic->irq_offset;
  417. if (mpic_is_ht_interrupt(mpic, src) &&
  418. (irq_desc[irq].status & IRQ_LEVEL))
  419. mpic_ht_end_irq(mpic, src);
  420. }
  421. #endif /* CONFIG_MPIC_BROKEN_U3 */
  422. mpic_eoi(mpic);
  423. }
  424. #ifdef CONFIG_SMP
  425. static void mpic_enable_ipi(unsigned int irq)
  426. {
  427. struct mpic *mpic = mpic_from_ipi(irq);
  428. unsigned int src = irq - mpic->ipi_offset;
  429. DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src);
  430. mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
  431. }
  432. static void mpic_disable_ipi(unsigned int irq)
  433. {
  434. /* NEVER disable an IPI... that's just plain wrong! */
  435. }
  436. static void mpic_end_ipi(unsigned int irq)
  437. {
  438. struct mpic *mpic = mpic_from_ipi(irq);
  439. /*
  440. * IPIs are marked IRQ_PER_CPU. This has the side effect of
  441. * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from
  442. * applying to them. We EOI them late to avoid re-entering.
  443. * We mark IPI's with IRQF_DISABLED as they must run with
  444. * irqs disabled.
  445. */
  446. mpic_eoi(mpic);
  447. }
  448. #endif /* CONFIG_SMP */
  449. static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
  450. {
  451. struct mpic *mpic = mpic_from_irq(irq);
  452. cpumask_t tmp;
  453. cpus_and(tmp, cpumask, cpu_online_map);
  454. mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_DESTINATION,
  455. mpic_physmask(cpus_addr(tmp)[0]));
  456. }
  457. /*
  458. * Exported functions
  459. */
  460. struct mpic * __init mpic_alloc(unsigned long phys_addr,
  461. unsigned int flags,
  462. unsigned int isu_size,
  463. unsigned int irq_offset,
  464. unsigned int irq_count,
  465. unsigned int ipi_offset,
  466. unsigned char *senses,
  467. unsigned int senses_count,
  468. const char *name)
  469. {
  470. struct mpic *mpic;
  471. u32 reg;
  472. const char *vers;
  473. int i;
  474. mpic = alloc_bootmem(sizeof(struct mpic));
  475. if (mpic == NULL)
  476. return NULL;
  477. memset(mpic, 0, sizeof(struct mpic));
  478. mpic->name = name;
  479. mpic->hc_irq.typename = name;
  480. mpic->hc_irq.startup = mpic_startup_irq;
  481. mpic->hc_irq.shutdown = mpic_shutdown_irq;
  482. mpic->hc_irq.enable = mpic_enable_irq;
  483. mpic->hc_irq.disable = mpic_disable_irq;
  484. mpic->hc_irq.end = mpic_end_irq;
  485. if (flags & MPIC_PRIMARY)
  486. mpic->hc_irq.set_affinity = mpic_set_affinity;
  487. #ifdef CONFIG_SMP
  488. mpic->hc_ipi.typename = name;
  489. mpic->hc_ipi.enable = mpic_enable_ipi;
  490. mpic->hc_ipi.disable = mpic_disable_ipi;
  491. mpic->hc_ipi.end = mpic_end_ipi;
  492. #endif /* CONFIG_SMP */
  493. mpic->flags = flags;
  494. mpic->isu_size = isu_size;
  495. mpic->irq_offset = irq_offset;
  496. mpic->irq_count = irq_count;
  497. mpic->ipi_offset = ipi_offset;
  498. mpic->num_sources = 0; /* so far */
  499. mpic->senses = senses;
  500. mpic->senses_count = senses_count;
  501. /* Map the global registers */
  502. mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000);
  503. mpic->tmregs = mpic->gregs + ((MPIC_TIMER_BASE - MPIC_GREG_BASE) >> 2);
  504. BUG_ON(mpic->gregs == NULL);
  505. /* Reset */
  506. if (flags & MPIC_WANTS_RESET) {
  507. mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0,
  508. mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
  509. | MPIC_GREG_GCONF_RESET);
  510. while( mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
  511. & MPIC_GREG_GCONF_RESET)
  512. mb();
  513. }
  514. /* Read feature register, calculate num CPUs and, for non-ISU
  515. * MPICs, num sources as well. On ISU MPICs, sources are counted
  516. * as ISUs are added
  517. */
  518. reg = mpic_read(mpic->gregs, MPIC_GREG_FEATURE_0);
  519. mpic->num_cpus = ((reg & MPIC_GREG_FEATURE_LAST_CPU_MASK)
  520. >> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1;
  521. if (isu_size == 0)
  522. mpic->num_sources = ((reg & MPIC_GREG_FEATURE_LAST_SRC_MASK)
  523. >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1;
  524. /* Map the per-CPU registers */
  525. for (i = 0; i < mpic->num_cpus; i++) {
  526. mpic->cpuregs[i] = ioremap(phys_addr + MPIC_CPU_BASE +
  527. i * MPIC_CPU_STRIDE, 0x1000);
  528. BUG_ON(mpic->cpuregs[i] == NULL);
  529. }
  530. /* Initialize main ISU if none provided */
  531. if (mpic->isu_size == 0) {
  532. mpic->isu_size = mpic->num_sources;
  533. mpic->isus[0] = ioremap(phys_addr + MPIC_IRQ_BASE,
  534. MPIC_IRQ_STRIDE * mpic->isu_size);
  535. BUG_ON(mpic->isus[0] == NULL);
  536. }
  537. mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
  538. mpic->isu_mask = (1 << mpic->isu_shift) - 1;
  539. /* Display version */
  540. switch (reg & MPIC_GREG_FEATURE_VERSION_MASK) {
  541. case 1:
  542. vers = "1.0";
  543. break;
  544. case 2:
  545. vers = "1.2";
  546. break;
  547. case 3:
  548. vers = "1.3";
  549. break;
  550. default:
  551. vers = "<unknown>";
  552. break;
  553. }
  554. printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %lx, max %d CPUs\n",
  555. name, vers, phys_addr, mpic->num_cpus);
  556. printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", mpic->isu_size,
  557. mpic->isu_shift, mpic->isu_mask);
  558. mpic->next = mpics;
  559. mpics = mpic;
  560. if (flags & MPIC_PRIMARY)
  561. mpic_primary = mpic;
  562. return mpic;
  563. }
  564. void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
  565. unsigned long phys_addr)
  566. {
  567. unsigned int isu_first = isu_num * mpic->isu_size;
  568. BUG_ON(isu_num >= MPIC_MAX_ISU);
  569. mpic->isus[isu_num] = ioremap(phys_addr, MPIC_IRQ_STRIDE * mpic->isu_size);
  570. if ((isu_first + mpic->isu_size) > mpic->num_sources)
  571. mpic->num_sources = isu_first + mpic->isu_size;
  572. }
  573. void __init mpic_setup_cascade(unsigned int irq, mpic_cascade_t handler,
  574. void *data)
  575. {
  576. struct mpic *mpic = mpic_find(irq, NULL);
  577. unsigned long flags;
  578. /* Synchronization here is a bit dodgy, so don't try to replace cascade
  579. * interrupts on the fly too often ... but normally it's set up at boot.
  580. */
  581. spin_lock_irqsave(&mpic_lock, flags);
  582. if (mpic->cascade)
  583. mpic_disable_irq(mpic->cascade_vec + mpic->irq_offset);
  584. mpic->cascade = NULL;
  585. wmb();
  586. mpic->cascade_vec = irq - mpic->irq_offset;
  587. mpic->cascade_data = data;
  588. wmb();
  589. mpic->cascade = handler;
  590. mpic_enable_irq(irq);
  591. spin_unlock_irqrestore(&mpic_lock, flags);
  592. }
  593. void __init mpic_init(struct mpic *mpic)
  594. {
  595. int i;
  596. BUG_ON(mpic->num_sources == 0);
  597. printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources);
  598. /* Set current processor priority to max */
  599. mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf);
  600. /* Initialize timers: just disable them all */
  601. for (i = 0; i < 4; i++) {
  602. mpic_write(mpic->tmregs,
  603. i * MPIC_TIMER_STRIDE + MPIC_TIMER_DESTINATION, 0);
  604. mpic_write(mpic->tmregs,
  605. i * MPIC_TIMER_STRIDE + MPIC_TIMER_VECTOR_PRI,
  606. MPIC_VECPRI_MASK |
  607. (MPIC_VEC_TIMER_0 + i));
  608. }
  609. /* Initialize IPIs to our reserved vectors and mark them disabled for now */
  610. mpic_test_broken_ipi(mpic);
  611. for (i = 0; i < 4; i++) {
  612. mpic_ipi_write(i,
  613. MPIC_VECPRI_MASK |
  614. (10 << MPIC_VECPRI_PRIORITY_SHIFT) |
  615. (MPIC_VEC_IPI_0 + i));
  616. #ifdef CONFIG_SMP
  617. if (!(mpic->flags & MPIC_PRIMARY))
  618. continue;
  619. irq_desc[mpic->ipi_offset+i].status |= IRQ_PER_CPU;
  620. irq_desc[mpic->ipi_offset+i].chip = &mpic->hc_ipi;
  621. #endif /* CONFIG_SMP */
  622. }
  623. /* Initialize interrupt sources */
  624. if (mpic->irq_count == 0)
  625. mpic->irq_count = mpic->num_sources;
  626. #ifdef CONFIG_MPIC_BROKEN_U3
  627. /* Do the HT PIC fixups on U3 broken mpic */
  628. DBG("MPIC flags: %x\n", mpic->flags);
  629. if ((mpic->flags & MPIC_BROKEN_U3) && (mpic->flags & MPIC_PRIMARY))
  630. mpic_scan_ht_pics(mpic);
  631. #endif /* CONFIG_MPIC_BROKEN_U3 */
  632. for (i = 0; i < mpic->num_sources; i++) {
  633. /* start with vector = source number, and masked */
  634. u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT);
  635. int level = 0;
  636. /* if it's an IPI, we skip it */
  637. if ((mpic->irq_offset + i) >= (mpic->ipi_offset + i) &&
  638. (mpic->irq_offset + i) < (mpic->ipi_offset + i + 4))
  639. continue;
  640. /* do senses munging */
  641. if (mpic->senses && i < mpic->senses_count) {
  642. if (mpic->senses[i] & IRQ_SENSE_LEVEL)
  643. vecpri |= MPIC_VECPRI_SENSE_LEVEL;
  644. if (mpic->senses[i] & IRQ_POLARITY_POSITIVE)
  645. vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
  646. } else
  647. vecpri |= MPIC_VECPRI_SENSE_LEVEL;
  648. /* remember if it was a level interrupts */
  649. level = (vecpri & MPIC_VECPRI_SENSE_LEVEL);
  650. /* deal with broken U3 */
  651. if (mpic->flags & MPIC_BROKEN_U3) {
  652. #ifdef CONFIG_MPIC_BROKEN_U3
  653. if (mpic_is_ht_interrupt(mpic, i)) {
  654. vecpri &= ~(MPIC_VECPRI_SENSE_MASK |
  655. MPIC_VECPRI_POLARITY_MASK);
  656. vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
  657. }
  658. #else
  659. printk(KERN_ERR "mpic: BROKEN_U3 set, but CONFIG doesn't match\n");
  660. #endif
  661. }
  662. DBG("setup source %d, vecpri: %08x, level: %d\n", i, vecpri,
  663. (level != 0));
  664. /* init hw */
  665. mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri);
  666. mpic_irq_write(i, MPIC_IRQ_DESTINATION,
  667. 1 << hard_smp_processor_id());
  668. /* init linux descriptors */
  669. if (i < mpic->irq_count) {
  670. irq_desc[mpic->irq_offset+i].status = level ? IRQ_LEVEL : 0;
  671. irq_desc[mpic->irq_offset+i].chip = &mpic->hc_irq;
  672. }
  673. }
  674. /* Init spurrious vector */
  675. mpic_write(mpic->gregs, MPIC_GREG_SPURIOUS, MPIC_VEC_SPURRIOUS);
  676. /* Disable 8259 passthrough */
  677. mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0,
  678. mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
  679. | MPIC_GREG_GCONF_8259_PTHROU_DIS);
  680. /* Set current processor priority to 0 */
  681. mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
  682. }
  683. void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio)
  684. {
  685. u32 v;
  686. v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
  687. v &= ~MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK;
  688. v |= MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO(clock_ratio);
  689. mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
  690. }
  691. void __init mpic_set_serial_int(struct mpic *mpic, int enable)
  692. {
  693. u32 v;
  694. v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
  695. if (enable)
  696. v |= MPIC_GREG_GLOBAL_CONF_1_SIE;
  697. else
  698. v &= ~MPIC_GREG_GLOBAL_CONF_1_SIE;
  699. mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
  700. }
  701. void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
  702. {
  703. int is_ipi;
  704. struct mpic *mpic = mpic_find(irq, &is_ipi);
  705. unsigned long flags;
  706. u32 reg;
  707. spin_lock_irqsave(&mpic_lock, flags);
  708. if (is_ipi) {
  709. reg = mpic_ipi_read(irq - mpic->ipi_offset) &
  710. ~MPIC_VECPRI_PRIORITY_MASK;
  711. mpic_ipi_write(irq - mpic->ipi_offset,
  712. reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
  713. } else {
  714. reg = mpic_irq_read(irq - mpic->irq_offset,MPIC_IRQ_VECTOR_PRI)
  715. & ~MPIC_VECPRI_PRIORITY_MASK;
  716. mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI,
  717. reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
  718. }
  719. spin_unlock_irqrestore(&mpic_lock, flags);
  720. }
  721. unsigned int mpic_irq_get_priority(unsigned int irq)
  722. {
  723. int is_ipi;
  724. struct mpic *mpic = mpic_find(irq, &is_ipi);
  725. unsigned long flags;
  726. u32 reg;
  727. spin_lock_irqsave(&mpic_lock, flags);
  728. if (is_ipi)
  729. reg = mpic_ipi_read(irq - mpic->ipi_offset);
  730. else
  731. reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI);
  732. spin_unlock_irqrestore(&mpic_lock, flags);
  733. return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT;
  734. }
  735. void mpic_setup_this_cpu(void)
  736. {
  737. #ifdef CONFIG_SMP
  738. struct mpic *mpic = mpic_primary;
  739. unsigned long flags;
  740. u32 msk = 1 << hard_smp_processor_id();
  741. unsigned int i;
  742. BUG_ON(mpic == NULL);
  743. DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
  744. spin_lock_irqsave(&mpic_lock, flags);
  745. /* let the mpic know we want intrs. default affinity is 0xffffffff
  746. * until changed via /proc. That's how it's done on x86. If we want
  747. * it differently, then we should make sure we also change the default
  748. * values of irq_desc[].affinity in irq.c.
  749. */
  750. if (distribute_irqs) {
  751. for (i = 0; i < mpic->num_sources ; i++)
  752. mpic_irq_write(i, MPIC_IRQ_DESTINATION,
  753. mpic_irq_read(i, MPIC_IRQ_DESTINATION) | msk);
  754. }
  755. /* Set current processor priority to 0 */
  756. mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
  757. spin_unlock_irqrestore(&mpic_lock, flags);
  758. #endif /* CONFIG_SMP */
  759. }
  760. int mpic_cpu_get_priority(void)
  761. {
  762. struct mpic *mpic = mpic_primary;
  763. return mpic_cpu_read(MPIC_CPU_CURRENT_TASK_PRI);
  764. }
  765. void mpic_cpu_set_priority(int prio)
  766. {
  767. struct mpic *mpic = mpic_primary;
  768. prio &= MPIC_CPU_TASKPRI_MASK;
  769. mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, prio);
  770. }
  771. /*
  772. * XXX: someone who knows mpic should check this.
  773. * do we need to eoi the ipi including for kexec cpu here (see xics comments)?
  774. * or can we reset the mpic in the new kernel?
  775. */
  776. void mpic_teardown_this_cpu(int secondary)
  777. {
  778. struct mpic *mpic = mpic_primary;
  779. unsigned long flags;
  780. u32 msk = 1 << hard_smp_processor_id();
  781. unsigned int i;
  782. BUG_ON(mpic == NULL);
  783. DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
  784. spin_lock_irqsave(&mpic_lock, flags);
  785. /* let the mpic know we don't want intrs. */
  786. for (i = 0; i < mpic->num_sources ; i++)
  787. mpic_irq_write(i, MPIC_IRQ_DESTINATION,
  788. mpic_irq_read(i, MPIC_IRQ_DESTINATION) & ~msk);
  789. /* Set current processor priority to max */
  790. mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf);
  791. spin_unlock_irqrestore(&mpic_lock, flags);
  792. }
  793. void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask)
  794. {
  795. struct mpic *mpic = mpic_primary;
  796. BUG_ON(mpic == NULL);
  797. #ifdef DEBUG_IPI
  798. DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
  799. #endif
  800. mpic_cpu_write(MPIC_CPU_IPI_DISPATCH_0 + ipi_no * 0x10,
  801. mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0]));
  802. }
  803. int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs)
  804. {
  805. u32 irq;
  806. irq = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK;
  807. #ifdef DEBUG_LOW
  808. DBG("%s: get_one_irq(): %d\n", mpic->name, irq);
  809. #endif
  810. if (mpic->cascade && irq == mpic->cascade_vec) {
  811. #ifdef DEBUG_LOW
  812. DBG("%s: cascading ...\n", mpic->name);
  813. #endif
  814. irq = mpic->cascade(regs, mpic->cascade_data);
  815. mpic_eoi(mpic);
  816. return irq;
  817. }
  818. if (unlikely(irq == MPIC_VEC_SPURRIOUS))
  819. return -1;
  820. if (irq < MPIC_VEC_IPI_0) {
  821. #ifdef DEBUG_IRQ
  822. DBG("%s: irq %d\n", mpic->name, irq + mpic->irq_offset);
  823. #endif
  824. return irq + mpic->irq_offset;
  825. }
  826. #ifdef DEBUG_IPI
  827. DBG("%s: ipi %d !\n", mpic->name, irq - MPIC_VEC_IPI_0);
  828. #endif
  829. return irq - MPIC_VEC_IPI_0 + mpic->ipi_offset;
  830. }
  831. int mpic_get_irq(struct pt_regs *regs)
  832. {
  833. struct mpic *mpic = mpic_primary;
  834. BUG_ON(mpic == NULL);
  835. return mpic_get_one_irq(mpic, regs);
  836. }
  837. #ifdef CONFIG_SMP
  838. void mpic_request_ipis(void)
  839. {
  840. struct mpic *mpic = mpic_primary;
  841. BUG_ON(mpic == NULL);
  842. printk("requesting IPIs ... \n");
  843. /*
  844. * IPIs are marked IRQF_DISABLED as they must run with irqs
  845. * disabled
  846. */
  847. request_irq(mpic->ipi_offset+0, mpic_ipi_action, IRQF_DISABLED,
  848. "IPI0 (call function)", mpic);
  849. request_irq(mpic->ipi_offset+1, mpic_ipi_action, IRQF_DISABLED,
  850. "IPI1 (reschedule)", mpic);
  851. request_irq(mpic->ipi_offset+2, mpic_ipi_action, IRQF_DISABLED,
  852. "IPI2 (unused)", mpic);
  853. request_irq(mpic->ipi_offset+3, mpic_ipi_action, IRQF_DISABLED,
  854. "IPI3 (debugger break)", mpic);
  855. printk("IPIs requested... \n");
  856. }
  857. void smp_mpic_message_pass(int target, int msg)
  858. {
  859. /* make sure we're sending something that translates to an IPI */
  860. if ((unsigned int)msg > 3) {
  861. printk("SMP %d: smp_message_pass: unknown msg %d\n",
  862. smp_processor_id(), msg);
  863. return;
  864. }
  865. switch (target) {
  866. case MSG_ALL:
  867. mpic_send_ipi(msg, 0xffffffff);
  868. break;
  869. case MSG_ALL_BUT_SELF:
  870. mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id()));
  871. break;
  872. default:
  873. mpic_send_ipi(msg, 1 << target);
  874. break;
  875. }
  876. }
  877. #endif /* CONFIG_SMP */