irq-gic.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. #undef DEBUG
  2. #include <linux/bitmap.h>
  3. #include <linux/init.h>
  4. #include <asm/io.h>
  5. #include <asm/gic.h>
  6. #include <asm/gcmpregs.h>
  7. #include <asm/mips-boards/maltaint.h>
  8. #include <asm/irq.h>
  9. #include <linux/hardirq.h>
  10. #include <asm-generic/bitops/find.h>
  11. static unsigned long _gic_base;
  12. static unsigned int _irqbase, _mapsize, numvpes, numintrs;
  13. static struct gic_intr_map *_intrmap;
  14. static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
  15. static struct gic_pending_regs pending_regs[NR_CPUS];
  16. static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
  17. #define gic_wedgeb2bok 0 /*
  18. * Can GIC handle b2b writes to wedge register?
  19. */
  20. #if gic_wedgeb2bok == 0
  21. static DEFINE_SPINLOCK(gic_wedgeb2b_lock);
  22. #endif
  23. void gic_send_ipi(unsigned int intr)
  24. {
  25. #if gic_wedgeb2bok == 0
  26. unsigned long flags;
  27. #endif
  28. pr_debug("CPU%d: %s status %08x\n", smp_processor_id(), __func__,
  29. read_c0_status());
  30. if (!gic_wedgeb2bok)
  31. spin_lock_irqsave(&gic_wedgeb2b_lock, flags);
  32. GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr);
  33. if (!gic_wedgeb2bok) {
  34. (void) GIC_REG(SHARED, GIC_SH_CONFIG);
  35. spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags);
  36. }
  37. }
  38. /* This is Malta specific and needs to be exported */
  39. static void vpe_local_setup(unsigned int numvpes)
  40. {
  41. int i;
  42. unsigned long timer_interrupt = 5, perf_interrupt = 5;
  43. unsigned int vpe_ctl;
  44. /*
  45. * Setup the default performance counter timer interrupts
  46. * for all VPEs
  47. */
  48. for (i = 0; i < numvpes; i++) {
  49. GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
  50. /* Are Interrupts locally routable? */
  51. GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl);
  52. if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK)
  53. GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
  54. GIC_MAP_TO_PIN_MSK | timer_interrupt);
  55. if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK)
  56. GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
  57. GIC_MAP_TO_PIN_MSK | perf_interrupt);
  58. }
  59. }
  60. unsigned int gic_get_int(void)
  61. {
  62. unsigned int i;
  63. unsigned long *pending, *intrmask, *pcpu_mask;
  64. unsigned long *pending_abs, *intrmask_abs;
  65. /* Get per-cpu bitmaps */
  66. pending = pending_regs[smp_processor_id()].pending;
  67. intrmask = intrmask_regs[smp_processor_id()].intrmask;
  68. pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
  69. pending_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
  70. GIC_SH_PEND_31_0_OFS);
  71. intrmask_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
  72. GIC_SH_MASK_31_0_OFS);
  73. for (i = 0; i < BITS_TO_LONGS(GIC_NUM_INTRS); i++) {
  74. GICREAD(*pending_abs, pending[i]);
  75. GICREAD(*intrmask_abs, intrmask[i]);
  76. pending_abs++;
  77. intrmask_abs++;
  78. }
  79. bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS);
  80. bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS);
  81. i = find_first_bit(pending, GIC_NUM_INTRS);
  82. pr_debug("CPU%d: %s pend=%d\n", smp_processor_id(), __func__, i);
  83. return i;
  84. }
  85. static unsigned int gic_irq_startup(unsigned int irq)
  86. {
  87. pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
  88. irq -= _irqbase;
  89. /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
  90. GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_SMASK_31_0_OFS + (irq / 32))),
  91. 1 << (irq % 32));
  92. return 0;
  93. }
  94. static void gic_irq_ack(unsigned int irq)
  95. {
  96. #if gic_wedgeb2bok == 0
  97. unsigned long flags;
  98. #endif
  99. pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
  100. irq -= _irqbase;
  101. GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_RMASK_31_0_OFS + (irq / 32))),
  102. 1 << (irq % 32));
  103. if (_intrmap[irq].trigtype == GIC_TRIG_EDGE) {
  104. if (!gic_wedgeb2bok)
  105. spin_lock_irqsave(&gic_wedgeb2b_lock, flags);
  106. GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
  107. if (!gic_wedgeb2bok) {
  108. (void) GIC_REG(SHARED, GIC_SH_CONFIG);
  109. spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags);
  110. }
  111. }
  112. }
  113. static void gic_mask_irq(unsigned int irq)
  114. {
  115. pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
  116. irq -= _irqbase;
  117. /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
  118. GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_RMASK_31_0_OFS + (irq / 32))),
  119. 1 << (irq % 32));
  120. }
  121. static void gic_unmask_irq(unsigned int irq)
  122. {
  123. pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
  124. irq -= _irqbase;
  125. /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
  126. GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_SMASK_31_0_OFS + (irq / 32))),
  127. 1 << (irq % 32));
  128. }
  129. #ifdef CONFIG_SMP
  130. static DEFINE_SPINLOCK(gic_lock);
  131. static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
  132. {
  133. cpumask_t tmp = CPU_MASK_NONE;
  134. unsigned long flags;
  135. int i;
  136. pr_debug(KERN_DEBUG "%s called\n", __func__);
  137. irq -= _irqbase;
  138. cpumask_and(&tmp, cpumask, cpu_online_mask);
  139. if (cpus_empty(tmp))
  140. return;
  141. /* Assumption : cpumask refers to a single CPU */
  142. spin_lock_irqsave(&gic_lock, flags);
  143. for (;;) {
  144. /* Re-route this IRQ */
  145. GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
  146. /*
  147. * FIXME: assumption that _intrmap is ordered and has no holes
  148. */
  149. /* Update the intr_map */
  150. _intrmap[irq].cpunum = first_cpu(tmp);
  151. /* Update the pcpu_masks */
  152. for (i = 0; i < NR_CPUS; i++)
  153. clear_bit(irq, pcpu_masks[i].pcpu_mask);
  154. set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
  155. }
  156. cpumask_copy(irq_desc[irq].affinity, cpumask);
  157. spin_unlock_irqrestore(&gic_lock, flags);
  158. }
  159. #endif
  160. static struct irq_chip gic_irq_controller = {
  161. .name = "MIPS GIC",
  162. .startup = gic_irq_startup,
  163. .ack = gic_irq_ack,
  164. .mask = gic_mask_irq,
  165. .mask_ack = gic_mask_irq,
  166. .unmask = gic_unmask_irq,
  167. .eoi = gic_unmask_irq,
  168. #ifdef CONFIG_SMP
  169. .set_affinity = gic_set_affinity,
  170. #endif
  171. };
  172. static void __init setup_intr(unsigned int intr, unsigned int cpu,
  173. unsigned int pin, unsigned int polarity, unsigned int trigtype)
  174. {
  175. /* Setup Intr to Pin mapping */
  176. if (pin & GIC_MAP_TO_NMI_MSK) {
  177. GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
  178. /* FIXME: hack to route NMI to all cpu's */
  179. for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
  180. GICWRITE(GIC_REG_ADDR(SHARED,
  181. GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
  182. 0xffffffff);
  183. }
  184. } else {
  185. GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)),
  186. GIC_MAP_TO_PIN_MSK | pin);
  187. /* Setup Intr to CPU mapping */
  188. GIC_SH_MAP_TO_VPE_SMASK(intr, cpu);
  189. }
  190. /* Setup Intr Polarity */
  191. GIC_SET_POLARITY(intr, polarity);
  192. /* Setup Intr Trigger Type */
  193. GIC_SET_TRIGGER(intr, trigtype);
  194. /* Init Intr Masks */
  195. GIC_SET_INTR_MASK(intr, 0);
  196. }
  197. static void __init gic_basic_init(void)
  198. {
  199. unsigned int i, cpu;
  200. /* Setup defaults */
  201. for (i = 0; i < GIC_NUM_INTRS; i++) {
  202. GIC_SET_POLARITY(i, GIC_POL_POS);
  203. GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL);
  204. GIC_SET_INTR_MASK(i, 0);
  205. }
  206. /* Setup specifics */
  207. for (i = 0; i < _mapsize; i++) {
  208. cpu = _intrmap[i].cpunum;
  209. if (cpu == X)
  210. continue;
  211. setup_intr(_intrmap[i].intrnum,
  212. _intrmap[i].cpunum,
  213. _intrmap[i].pin,
  214. _intrmap[i].polarity,
  215. _intrmap[i].trigtype);
  216. /* Initialise per-cpu Interrupt software masks */
  217. if (_intrmap[i].ipiflag)
  218. set_bit(_intrmap[i].intrnum, pcpu_masks[cpu].pcpu_mask);
  219. }
  220. vpe_local_setup(numvpes);
  221. for (i = _irqbase; i < (_irqbase + numintrs); i++)
  222. set_irq_chip(i, &gic_irq_controller);
  223. }
  224. void __init gic_init(unsigned long gic_base_addr,
  225. unsigned long gic_addrspace_size,
  226. struct gic_intr_map *intr_map, unsigned int intr_map_size,
  227. unsigned int irqbase)
  228. {
  229. unsigned int gicconfig;
  230. _gic_base = (unsigned long) ioremap_nocache(gic_base_addr,
  231. gic_addrspace_size);
  232. _irqbase = irqbase;
  233. _intrmap = intr_map;
  234. _mapsize = intr_map_size;
  235. GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
  236. numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
  237. GIC_SH_CONFIG_NUMINTRS_SHF;
  238. numintrs = ((numintrs + 1) * 8);
  239. numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
  240. GIC_SH_CONFIG_NUMVPES_SHF;
  241. pr_debug("%s called\n", __func__);
  242. gic_basic_init();
  243. }