irq-gic.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. #undef DEBUG
  2. #include <linux/bitmap.h>
  3. #include <linux/init.h>
  4. #include <linux/smp.h>
  5. #include <asm/io.h>
  6. #include <asm/gic.h>
  7. #include <asm/gcmpregs.h>
  8. #include <asm/mips-boards/maltaint.h>
  9. #include <asm/irq.h>
  10. #include <linux/hardirq.h>
  11. #include <asm-generic/bitops/find.h>
  12. static unsigned long _gic_base;
  13. static unsigned int _irqbase, _mapsize, numvpes, numintrs;
  14. static struct gic_intr_map *_intrmap;
  15. static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
  16. static struct gic_pending_regs pending_regs[NR_CPUS];
  17. static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
  18. #define gic_wedgeb2bok 0 /*
  19. * Can GIC handle b2b writes to wedge register?
  20. */
  21. #if gic_wedgeb2bok == 0
  22. static DEFINE_SPINLOCK(gic_wedgeb2b_lock);
  23. #endif
  24. void gic_send_ipi(unsigned int intr)
  25. {
  26. #if gic_wedgeb2bok == 0
  27. unsigned long flags;
  28. #endif
  29. pr_debug("CPU%d: %s status %08x\n", smp_processor_id(), __func__,
  30. read_c0_status());
  31. if (!gic_wedgeb2bok)
  32. spin_lock_irqsave(&gic_wedgeb2b_lock, flags);
  33. GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr);
  34. if (!gic_wedgeb2bok) {
  35. (void) GIC_REG(SHARED, GIC_SH_CONFIG);
  36. spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags);
  37. }
  38. }
  39. /* This is Malta specific and needs to be exported */
  40. static void vpe_local_setup(unsigned int numvpes)
  41. {
  42. int i;
  43. unsigned long timer_interrupt = 5, perf_interrupt = 5;
  44. unsigned int vpe_ctl;
  45. /*
  46. * Setup the default performance counter timer interrupts
  47. * for all VPEs
  48. */
  49. for (i = 0; i < numvpes; i++) {
  50. GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
  51. /* Are Interrupts locally routable? */
  52. GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl);
  53. if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK)
  54. GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
  55. GIC_MAP_TO_PIN_MSK | timer_interrupt);
  56. if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK)
  57. GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
  58. GIC_MAP_TO_PIN_MSK | perf_interrupt);
  59. }
  60. }
  61. unsigned int gic_get_int(void)
  62. {
  63. unsigned int i;
  64. unsigned long *pending, *intrmask, *pcpu_mask;
  65. unsigned long *pending_abs, *intrmask_abs;
  66. /* Get per-cpu bitmaps */
  67. pending = pending_regs[smp_processor_id()].pending;
  68. intrmask = intrmask_regs[smp_processor_id()].intrmask;
  69. pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
  70. pending_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
  71. GIC_SH_PEND_31_0_OFS);
  72. intrmask_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
  73. GIC_SH_MASK_31_0_OFS);
  74. for (i = 0; i < BITS_TO_LONGS(GIC_NUM_INTRS); i++) {
  75. GICREAD(*pending_abs, pending[i]);
  76. GICREAD(*intrmask_abs, intrmask[i]);
  77. pending_abs++;
  78. intrmask_abs++;
  79. }
  80. bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS);
  81. bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS);
  82. i = find_first_bit(pending, GIC_NUM_INTRS);
  83. pr_debug("CPU%d: %s pend=%d\n", smp_processor_id(), __func__, i);
  84. return i;
  85. }
  86. static unsigned int gic_irq_startup(unsigned int irq)
  87. {
  88. pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
  89. irq -= _irqbase;
  90. GIC_SET_INTR_MASK(irq, 1);
  91. return 0;
  92. }
  93. static void gic_irq_ack(unsigned int irq)
  94. {
  95. #if gic_wedgeb2bok == 0
  96. unsigned long flags;
  97. #endif
  98. pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
  99. irq -= _irqbase;
  100. GIC_CLR_INTR_MASK(irq, 1);
  101. if (_intrmap[irq].trigtype == GIC_TRIG_EDGE) {
  102. if (!gic_wedgeb2bok)
  103. spin_lock_irqsave(&gic_wedgeb2b_lock, flags);
  104. GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
  105. if (!gic_wedgeb2bok) {
  106. (void) GIC_REG(SHARED, GIC_SH_CONFIG);
  107. spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags);
  108. }
  109. }
  110. }
  111. static void gic_mask_irq(unsigned int irq)
  112. {
  113. pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
  114. irq -= _irqbase;
  115. GIC_CLR_INTR_MASK(irq, 1);
  116. }
  117. static void gic_unmask_irq(unsigned int irq)
  118. {
  119. pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
  120. irq -= _irqbase;
  121. GIC_SET_INTR_MASK(irq, 1);
  122. }
  123. #ifdef CONFIG_SMP
  124. static DEFINE_SPINLOCK(gic_lock);
  125. static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
  126. {
  127. cpumask_t tmp = CPU_MASK_NONE;
  128. unsigned long flags;
  129. int i;
  130. pr_debug(KERN_DEBUG "%s called\n", __func__);
  131. irq -= _irqbase;
  132. cpumask_and(&tmp, cpumask, cpu_online_mask);
  133. if (cpus_empty(tmp))
  134. return -1;
  135. /* Assumption : cpumask refers to a single CPU */
  136. spin_lock_irqsave(&gic_lock, flags);
  137. for (;;) {
  138. /* Re-route this IRQ */
  139. GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
  140. /*
  141. * FIXME: assumption that _intrmap is ordered and has no holes
  142. */
  143. /* Update the intr_map */
  144. _intrmap[irq].cpunum = first_cpu(tmp);
  145. /* Update the pcpu_masks */
  146. for (i = 0; i < NR_CPUS; i++)
  147. clear_bit(irq, pcpu_masks[i].pcpu_mask);
  148. set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
  149. }
  150. cpumask_copy(irq_desc[irq].affinity, cpumask);
  151. spin_unlock_irqrestore(&gic_lock, flags);
  152. return 0;
  153. }
  154. #endif
  155. static struct irq_chip gic_irq_controller = {
  156. .name = "MIPS GIC",
  157. .startup = gic_irq_startup,
  158. .ack = gic_irq_ack,
  159. .mask = gic_mask_irq,
  160. .mask_ack = gic_mask_irq,
  161. .unmask = gic_unmask_irq,
  162. .eoi = gic_unmask_irq,
  163. #ifdef CONFIG_SMP
  164. .set_affinity = gic_set_affinity,
  165. #endif
  166. };
  167. static void __init setup_intr(unsigned int intr, unsigned int cpu,
  168. unsigned int pin, unsigned int polarity, unsigned int trigtype)
  169. {
  170. /* Setup Intr to Pin mapping */
  171. if (pin & GIC_MAP_TO_NMI_MSK) {
  172. GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
  173. /* FIXME: hack to route NMI to all cpu's */
  174. for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
  175. GICWRITE(GIC_REG_ADDR(SHARED,
  176. GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
  177. 0xffffffff);
  178. }
  179. } else {
  180. GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)),
  181. GIC_MAP_TO_PIN_MSK | pin);
  182. /* Setup Intr to CPU mapping */
  183. GIC_SH_MAP_TO_VPE_SMASK(intr, cpu);
  184. }
  185. /* Setup Intr Polarity */
  186. GIC_SET_POLARITY(intr, polarity);
  187. /* Setup Intr Trigger Type */
  188. GIC_SET_TRIGGER(intr, trigtype);
  189. /* Init Intr Masks */
  190. GIC_SET_INTR_MASK(intr, 0);
  191. }
  192. static void __init gic_basic_init(void)
  193. {
  194. unsigned int i, cpu;
  195. /* Setup defaults */
  196. for (i = 0; i < GIC_NUM_INTRS; i++) {
  197. GIC_SET_POLARITY(i, GIC_POL_POS);
  198. GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL);
  199. GIC_SET_INTR_MASK(i, 0);
  200. }
  201. /* Setup specifics */
  202. for (i = 0; i < _mapsize; i++) {
  203. cpu = _intrmap[i].cpunum;
  204. if (cpu == X)
  205. continue;
  206. if (cpu == 0 && i != 0 && _intrmap[i].intrnum == 0 &&
  207. _intrmap[i].ipiflag == 0)
  208. continue;
  209. setup_intr(_intrmap[i].intrnum,
  210. _intrmap[i].cpunum,
  211. _intrmap[i].pin,
  212. _intrmap[i].polarity,
  213. _intrmap[i].trigtype);
  214. /* Initialise per-cpu Interrupt software masks */
  215. if (_intrmap[i].ipiflag)
  216. set_bit(_intrmap[i].intrnum, pcpu_masks[cpu].pcpu_mask);
  217. }
  218. vpe_local_setup(numvpes);
  219. for (i = _irqbase; i < (_irqbase + numintrs); i++)
  220. set_irq_chip(i, &gic_irq_controller);
  221. }
  222. void __init gic_init(unsigned long gic_base_addr,
  223. unsigned long gic_addrspace_size,
  224. struct gic_intr_map *intr_map, unsigned int intr_map_size,
  225. unsigned int irqbase)
  226. {
  227. unsigned int gicconfig;
  228. _gic_base = (unsigned long) ioremap_nocache(gic_base_addr,
  229. gic_addrspace_size);
  230. _irqbase = irqbase;
  231. _intrmap = intr_map;
  232. _mapsize = intr_map_size;
  233. GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
  234. numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
  235. GIC_SH_CONFIG_NUMINTRS_SHF;
  236. numintrs = ((numintrs + 1) * 8);
  237. numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
  238. GIC_SH_CONFIG_NUMVPES_SHF;
  239. pr_debug("%s called\n", __func__);
  240. gic_basic_init();
  241. }