gic.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396
  1. /*
  2. * linux/arch/arm/common/gic.c
  3. *
  4. * Copyright (C) 2002 ARM Limited, All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * Interrupt architecture for the GIC:
  11. *
  12. * o There is one Interrupt Distributor, which receives interrupts
  13. * from system devices and sends them to the Interrupt Controllers.
  14. *
  15. * o There is one CPU Interface per CPU, which sends interrupts sent
  16. * by the Distributor, and interrupts generated locally, to the
  17. * associated CPU. The base address of the CPU interface is usually
  18. * aliased so that the same address points to different chips depending
  19. * on the CPU it is accessed from.
  20. *
  21. * Note that IRQs 0-31 are special - they are local to each CPU.
  22. * As such, the enable set/clear, pending set/clear and active bit
  23. * registers are banked per-cpu for these sources.
  24. */
  25. #include <linux/init.h>
  26. #include <linux/kernel.h>
  27. #include <linux/list.h>
  28. #include <linux/smp.h>
  29. #include <linux/cpumask.h>
  30. #include <linux/io.h>
  31. #include <asm/irq.h>
  32. #include <asm/mach/irq.h>
  33. #include <asm/hardware/gic.h>
  34. static DEFINE_SPINLOCK(irq_controller_lock);
  35. /* Address of GIC 0 CPU interface */
  36. void __iomem *gic_cpu_base_addr __read_mostly;
  37. /*
  38. * Supported arch specific GIC irq extension.
  39. * Default make them NULL.
  40. */
  41. struct irq_chip gic_arch_extn = {
  42. .irq_eoi = NULL,
  43. .irq_mask = NULL,
  44. .irq_unmask = NULL,
  45. .irq_retrigger = NULL,
  46. .irq_set_type = NULL,
  47. .irq_set_wake = NULL,
  48. };
  49. #ifndef MAX_GIC_NR
  50. #define MAX_GIC_NR 1
  51. #endif
  52. static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
  53. static inline void __iomem *gic_dist_base(struct irq_data *d)
  54. {
  55. struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
  56. return gic_data->dist_base;
  57. }
  58. static inline void __iomem *gic_cpu_base(struct irq_data *d)
  59. {
  60. struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
  61. return gic_data->cpu_base;
  62. }
  63. static inline unsigned int gic_irq(struct irq_data *d)
  64. {
  65. struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
  66. return d->irq - gic_data->irq_offset;
  67. }
  68. /*
  69. * Routines to acknowledge, disable and enable interrupts
  70. */
  71. static void gic_mask_irq(struct irq_data *d)
  72. {
  73. u32 mask = 1 << (d->irq % 32);
  74. spin_lock(&irq_controller_lock);
  75. writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
  76. if (gic_arch_extn.irq_mask)
  77. gic_arch_extn.irq_mask(d);
  78. spin_unlock(&irq_controller_lock);
  79. }
  80. static void gic_unmask_irq(struct irq_data *d)
  81. {
  82. u32 mask = 1 << (d->irq % 32);
  83. spin_lock(&irq_controller_lock);
  84. if (gic_arch_extn.irq_unmask)
  85. gic_arch_extn.irq_unmask(d);
  86. writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
  87. spin_unlock(&irq_controller_lock);
  88. }
  89. static void gic_eoi_irq(struct irq_data *d)
  90. {
  91. if (gic_arch_extn.irq_eoi) {
  92. spin_lock(&irq_controller_lock);
  93. gic_arch_extn.irq_eoi(d);
  94. spin_unlock(&irq_controller_lock);
  95. }
  96. writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
  97. }
  98. static int gic_set_type(struct irq_data *d, unsigned int type)
  99. {
  100. void __iomem *base = gic_dist_base(d);
  101. unsigned int gicirq = gic_irq(d);
  102. u32 enablemask = 1 << (gicirq % 32);
  103. u32 enableoff = (gicirq / 32) * 4;
  104. u32 confmask = 0x2 << ((gicirq % 16) * 2);
  105. u32 confoff = (gicirq / 16) * 4;
  106. bool enabled = false;
  107. u32 val;
  108. /* Interrupt configuration for SGIs can't be changed */
  109. if (gicirq < 16)
  110. return -EINVAL;
  111. if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
  112. return -EINVAL;
  113. spin_lock(&irq_controller_lock);
  114. if (gic_arch_extn.irq_set_type)
  115. gic_arch_extn.irq_set_type(d, type);
  116. val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
  117. if (type == IRQ_TYPE_LEVEL_HIGH)
  118. val &= ~confmask;
  119. else if (type == IRQ_TYPE_EDGE_RISING)
  120. val |= confmask;
  121. /*
  122. * As recommended by the spec, disable the interrupt before changing
  123. * the configuration
  124. */
  125. if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
  126. writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
  127. enabled = true;
  128. }
  129. writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
  130. if (enabled)
  131. writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
  132. spin_unlock(&irq_controller_lock);
  133. return 0;
  134. }
  135. static int gic_retrigger(struct irq_data *d)
  136. {
  137. if (gic_arch_extn.irq_retrigger)
  138. return gic_arch_extn.irq_retrigger(d);
  139. return -ENXIO;
  140. }
  141. #ifdef CONFIG_SMP
  142. static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
  143. bool force)
  144. {
  145. void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
  146. unsigned int shift = (d->irq % 4) * 8;
  147. unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
  148. u32 val, mask, bit;
  149. if (cpu >= 8 || cpu >= nr_cpu_ids)
  150. return -EINVAL;
  151. mask = 0xff << shift;
  152. bit = 1 << (cpu + shift);
  153. spin_lock(&irq_controller_lock);
  154. val = readl_relaxed(reg) & ~mask;
  155. writel_relaxed(val | bit, reg);
  156. spin_unlock(&irq_controller_lock);
  157. return IRQ_SET_MASK_OK;
  158. }
  159. #endif
  160. #ifdef CONFIG_PM
  161. static int gic_set_wake(struct irq_data *d, unsigned int on)
  162. {
  163. int ret = -ENXIO;
  164. if (gic_arch_extn.irq_set_wake)
  165. ret = gic_arch_extn.irq_set_wake(d, on);
  166. return ret;
  167. }
  168. #else
  169. #define gic_set_wake NULL
  170. #endif
  171. static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
  172. {
  173. struct gic_chip_data *chip_data = irq_get_handler_data(irq);
  174. struct irq_chip *chip = irq_get_chip(irq);
  175. unsigned int cascade_irq, gic_irq;
  176. unsigned long status;
  177. chained_irq_enter(chip, desc);
  178. spin_lock(&irq_controller_lock);
  179. status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK);
  180. spin_unlock(&irq_controller_lock);
  181. gic_irq = (status & 0x3ff);
  182. if (gic_irq == 1023)
  183. goto out;
  184. cascade_irq = gic_irq + chip_data->irq_offset;
  185. if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS))
  186. do_bad_IRQ(cascade_irq, desc);
  187. else
  188. generic_handle_irq(cascade_irq);
  189. out:
  190. chained_irq_exit(chip, desc);
  191. }
  192. static struct irq_chip gic_chip = {
  193. .name = "GIC",
  194. .irq_mask = gic_mask_irq,
  195. .irq_unmask = gic_unmask_irq,
  196. .irq_eoi = gic_eoi_irq,
  197. .irq_set_type = gic_set_type,
  198. .irq_retrigger = gic_retrigger,
  199. #ifdef CONFIG_SMP
  200. .irq_set_affinity = gic_set_affinity,
  201. #endif
  202. .irq_set_wake = gic_set_wake,
  203. };
  204. void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
  205. {
  206. if (gic_nr >= MAX_GIC_NR)
  207. BUG();
  208. if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
  209. BUG();
  210. irq_set_chained_handler(irq, gic_handle_cascade_irq);
  211. }
  212. static void __init gic_dist_init(struct gic_chip_data *gic,
  213. unsigned int irq_start)
  214. {
  215. unsigned int gic_irqs, irq_limit, i;
  216. void __iomem *base = gic->dist_base;
  217. u32 cpumask = 1 << smp_processor_id();
  218. cpumask |= cpumask << 8;
  219. cpumask |= cpumask << 16;
  220. writel_relaxed(0, base + GIC_DIST_CTRL);
  221. /*
  222. * Find out how many interrupts are supported.
  223. * The GIC only supports up to 1020 interrupt sources.
  224. */
  225. gic_irqs = readl_relaxed(base + GIC_DIST_CTR) & 0x1f;
  226. gic_irqs = (gic_irqs + 1) * 32;
  227. if (gic_irqs > 1020)
  228. gic_irqs = 1020;
  229. /*
  230. * Set all global interrupts to be level triggered, active low.
  231. */
  232. for (i = 32; i < gic_irqs; i += 16)
  233. writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
  234. /*
  235. * Set all global interrupts to this CPU only.
  236. */
  237. for (i = 32; i < gic_irqs; i += 4)
  238. writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
  239. /*
  240. * Set priority on all global interrupts.
  241. */
  242. for (i = 32; i < gic_irqs; i += 4)
  243. writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
  244. /*
  245. * Disable all interrupts. Leave the PPI and SGIs alone
  246. * as these enables are banked registers.
  247. */
  248. for (i = 32; i < gic_irqs; i += 32)
  249. writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
  250. /*
  251. * Limit number of interrupts registered to the platform maximum
  252. */
  253. irq_limit = gic->irq_offset + gic_irqs;
  254. if (WARN_ON(irq_limit > NR_IRQS))
  255. irq_limit = NR_IRQS;
  256. /*
  257. * Setup the Linux IRQ subsystem.
  258. */
  259. for (i = irq_start; i < irq_limit; i++) {
  260. irq_set_chip_and_handler(i, &gic_chip, handle_fasteoi_irq);
  261. irq_set_chip_data(i, gic);
  262. set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
  263. }
  264. writel_relaxed(1, base + GIC_DIST_CTRL);
  265. }
  266. static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
  267. {
  268. void __iomem *dist_base = gic->dist_base;
  269. void __iomem *base = gic->cpu_base;
  270. int i;
  271. /*
  272. * Deal with the banked PPI and SGI interrupts - disable all
  273. * PPI interrupts, ensure all SGI interrupts are enabled.
  274. */
  275. writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
  276. writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
  277. /*
  278. * Set priority on PPI and SGI interrupts
  279. */
  280. for (i = 0; i < 32; i += 4)
  281. writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
  282. writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
  283. writel_relaxed(1, base + GIC_CPU_CTRL);
  284. }
  285. void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
  286. void __iomem *dist_base, void __iomem *cpu_base)
  287. {
  288. struct gic_chip_data *gic;
  289. BUG_ON(gic_nr >= MAX_GIC_NR);
  290. gic = &gic_data[gic_nr];
  291. gic->dist_base = dist_base;
  292. gic->cpu_base = cpu_base;
  293. gic->irq_offset = (irq_start - 1) & ~31;
  294. if (gic_nr == 0)
  295. gic_cpu_base_addr = cpu_base;
  296. gic_dist_init(gic, irq_start);
  297. gic_cpu_init(gic);
  298. }
  299. void __cpuinit gic_secondary_init(unsigned int gic_nr)
  300. {
  301. BUG_ON(gic_nr >= MAX_GIC_NR);
  302. gic_cpu_init(&gic_data[gic_nr]);
  303. }
  304. void __cpuinit gic_enable_ppi(unsigned int irq)
  305. {
  306. unsigned long flags;
  307. local_irq_save(flags);
  308. irq_set_status_flags(irq, IRQ_NOPROBE);
  309. gic_unmask_irq(irq_get_irq_data(irq));
  310. local_irq_restore(flags);
  311. }
  312. #ifdef CONFIG_SMP
  313. void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
  314. {
  315. unsigned long map = *cpus_addr(*mask);
  316. /*
  317. * Ensure that stores to Normal memory are visible to the
  318. * other CPUs before issuing the IPI.
  319. */
  320. dsb();
  321. /* this always happens on GIC0 */
  322. writel_relaxed(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
  323. }
  324. #endif