irq-gic.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. #undef DEBUG
  2. #include <linux/bitmap.h>
  3. #include <linux/init.h>
  4. #include <linux/smp.h>
  5. #include <asm/io.h>
  6. #include <asm/gic.h>
  7. #include <asm/gcmpregs.h>
  8. #include <asm/mips-boards/maltaint.h>
  9. #include <asm/irq.h>
  10. #include <linux/hardirq.h>
  11. #include <asm-generic/bitops/find.h>
  12. static unsigned long _gic_base;
  13. static unsigned int _irqbase, _mapsize, numvpes, numintrs;
  14. static struct gic_intr_map *_intrmap;
  15. static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
  16. static struct gic_pending_regs pending_regs[NR_CPUS];
  17. static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
  18. #define gic_wedgeb2bok 0 /*
  19. * Can GIC handle b2b writes to wedge register?
  20. */
  21. #if gic_wedgeb2bok == 0
  22. static DEFINE_SPINLOCK(gic_wedgeb2b_lock);
  23. #endif
  24. void gic_send_ipi(unsigned int intr)
  25. {
  26. #if gic_wedgeb2bok == 0
  27. unsigned long flags;
  28. #endif
  29. pr_debug("CPU%d: %s status %08x\n", smp_processor_id(), __func__,
  30. read_c0_status());
  31. if (!gic_wedgeb2bok)
  32. spin_lock_irqsave(&gic_wedgeb2b_lock, flags);
  33. GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr);
  34. if (!gic_wedgeb2bok) {
  35. (void) GIC_REG(SHARED, GIC_SH_CONFIG);
  36. spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags);
  37. }
  38. }
  39. /* This is Malta specific and needs to be exported */
  40. static void vpe_local_setup(unsigned int numvpes)
  41. {
  42. int i;
  43. unsigned long timer_interrupt = 5, perf_interrupt = 5;
  44. unsigned int vpe_ctl;
  45. /*
  46. * Setup the default performance counter timer interrupts
  47. * for all VPEs
  48. */
  49. for (i = 0; i < numvpes; i++) {
  50. GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
  51. /* Are Interrupts locally routable? */
  52. GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl);
  53. if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK)
  54. GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
  55. GIC_MAP_TO_PIN_MSK | timer_interrupt);
  56. if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK)
  57. GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
  58. GIC_MAP_TO_PIN_MSK | perf_interrupt);
  59. }
  60. }
  61. unsigned int gic_get_int(void)
  62. {
  63. unsigned int i;
  64. unsigned long *pending, *intrmask, *pcpu_mask;
  65. unsigned long *pending_abs, *intrmask_abs;
  66. /* Get per-cpu bitmaps */
  67. pending = pending_regs[smp_processor_id()].pending;
  68. intrmask = intrmask_regs[smp_processor_id()].intrmask;
  69. pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
  70. pending_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
  71. GIC_SH_PEND_31_0_OFS);
  72. intrmask_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
  73. GIC_SH_MASK_31_0_OFS);
  74. for (i = 0; i < BITS_TO_LONGS(GIC_NUM_INTRS); i++) {
  75. GICREAD(*pending_abs, pending[i]);
  76. GICREAD(*intrmask_abs, intrmask[i]);
  77. pending_abs++;
  78. intrmask_abs++;
  79. }
  80. bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS);
  81. bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS);
  82. i = find_first_bit(pending, GIC_NUM_INTRS);
  83. pr_debug("CPU%d: %s pend=%d\n", smp_processor_id(), __func__, i);
  84. return i;
  85. }
  86. static unsigned int gic_irq_startup(unsigned int irq)
  87. {
  88. pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
  89. irq -= _irqbase;
  90. /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
  91. GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_SMASK_31_0_OFS + (irq / 32))),
  92. 1 << (irq % 32));
  93. return 0;
  94. }
  95. static void gic_irq_ack(unsigned int irq)
  96. {
  97. #if gic_wedgeb2bok == 0
  98. unsigned long flags;
  99. #endif
  100. pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
  101. irq -= _irqbase;
  102. GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_RMASK_31_0_OFS + (irq / 32))),
  103. 1 << (irq % 32));
  104. if (_intrmap[irq].trigtype == GIC_TRIG_EDGE) {
  105. if (!gic_wedgeb2bok)
  106. spin_lock_irqsave(&gic_wedgeb2b_lock, flags);
  107. GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
  108. if (!gic_wedgeb2bok) {
  109. (void) GIC_REG(SHARED, GIC_SH_CONFIG);
  110. spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags);
  111. }
  112. }
  113. }
  114. static void gic_mask_irq(unsigned int irq)
  115. {
  116. pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
  117. irq -= _irqbase;
  118. /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
  119. GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_RMASK_31_0_OFS + (irq / 32))),
  120. 1 << (irq % 32));
  121. }
  122. static void gic_unmask_irq(unsigned int irq)
  123. {
  124. pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
  125. irq -= _irqbase;
  126. /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
  127. GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_SMASK_31_0_OFS + (irq / 32))),
  128. 1 << (irq % 32));
  129. }
  130. #ifdef CONFIG_SMP
  131. static DEFINE_SPINLOCK(gic_lock);
  132. static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
  133. {
  134. cpumask_t tmp = CPU_MASK_NONE;
  135. unsigned long flags;
  136. int i;
  137. pr_debug(KERN_DEBUG "%s called\n", __func__);
  138. irq -= _irqbase;
  139. cpumask_and(&tmp, cpumask, cpu_online_mask);
  140. if (cpus_empty(tmp))
  141. return -1;
  142. /* Assumption : cpumask refers to a single CPU */
  143. spin_lock_irqsave(&gic_lock, flags);
  144. for (;;) {
  145. /* Re-route this IRQ */
  146. GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
  147. /*
  148. * FIXME: assumption that _intrmap is ordered and has no holes
  149. */
  150. /* Update the intr_map */
  151. _intrmap[irq].cpunum = first_cpu(tmp);
  152. /* Update the pcpu_masks */
  153. for (i = 0; i < NR_CPUS; i++)
  154. clear_bit(irq, pcpu_masks[i].pcpu_mask);
  155. set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
  156. }
  157. cpumask_copy(irq_desc[irq].affinity, cpumask);
  158. spin_unlock_irqrestore(&gic_lock, flags);
  159. return 0;
  160. }
  161. #endif
  162. static struct irq_chip gic_irq_controller = {
  163. .name = "MIPS GIC",
  164. .startup = gic_irq_startup,
  165. .ack = gic_irq_ack,
  166. .mask = gic_mask_irq,
  167. .mask_ack = gic_mask_irq,
  168. .unmask = gic_unmask_irq,
  169. .eoi = gic_unmask_irq,
  170. #ifdef CONFIG_SMP
  171. .set_affinity = gic_set_affinity,
  172. #endif
  173. };
  174. static void __init setup_intr(unsigned int intr, unsigned int cpu,
  175. unsigned int pin, unsigned int polarity, unsigned int trigtype)
  176. {
  177. /* Setup Intr to Pin mapping */
  178. if (pin & GIC_MAP_TO_NMI_MSK) {
  179. GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
  180. /* FIXME: hack to route NMI to all cpu's */
  181. for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
  182. GICWRITE(GIC_REG_ADDR(SHARED,
  183. GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
  184. 0xffffffff);
  185. }
  186. } else {
  187. GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)),
  188. GIC_MAP_TO_PIN_MSK | pin);
  189. /* Setup Intr to CPU mapping */
  190. GIC_SH_MAP_TO_VPE_SMASK(intr, cpu);
  191. }
  192. /* Setup Intr Polarity */
  193. GIC_SET_POLARITY(intr, polarity);
  194. /* Setup Intr Trigger Type */
  195. GIC_SET_TRIGGER(intr, trigtype);
  196. /* Init Intr Masks */
  197. GIC_SET_INTR_MASK(intr, 0);
  198. }
  199. static void __init gic_basic_init(void)
  200. {
  201. unsigned int i, cpu;
  202. /* Setup defaults */
  203. for (i = 0; i < GIC_NUM_INTRS; i++) {
  204. GIC_SET_POLARITY(i, GIC_POL_POS);
  205. GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL);
  206. GIC_SET_INTR_MASK(i, 0);
  207. }
  208. /* Setup specifics */
  209. for (i = 0; i < _mapsize; i++) {
  210. cpu = _intrmap[i].cpunum;
  211. if (cpu == X)
  212. continue;
  213. setup_intr(_intrmap[i].intrnum,
  214. _intrmap[i].cpunum,
  215. _intrmap[i].pin,
  216. _intrmap[i].polarity,
  217. _intrmap[i].trigtype);
  218. /* Initialise per-cpu Interrupt software masks */
  219. if (_intrmap[i].ipiflag)
  220. set_bit(_intrmap[i].intrnum, pcpu_masks[cpu].pcpu_mask);
  221. }
  222. vpe_local_setup(numvpes);
  223. for (i = _irqbase; i < (_irqbase + numintrs); i++)
  224. set_irq_chip(i, &gic_irq_controller);
  225. }
  226. void __init gic_init(unsigned long gic_base_addr,
  227. unsigned long gic_addrspace_size,
  228. struct gic_intr_map *intr_map, unsigned int intr_map_size,
  229. unsigned int irqbase)
  230. {
  231. unsigned int gicconfig;
  232. _gic_base = (unsigned long) ioremap_nocache(gic_base_addr,
  233. gic_addrspace_size);
  234. _irqbase = irqbase;
  235. _intrmap = intr_map;
  236. _mapsize = intr_map_size;
  237. GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
  238. numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
  239. GIC_SH_CONFIG_NUMINTRS_SHF;
  240. numintrs = ((numintrs + 1) * 8);
  241. numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
  242. GIC_SH_CONFIG_NUMVPES_SHF;
  243. pr_debug("%s called\n", __func__);
  244. gic_basic_init();
  245. }