shirq.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
  1. /*
  2. * arch/arm/plat-spear/shirq.c
  3. *
  4. * SPEAr platform shared irq layer source file
  5. *
  6. * Copyright (C) 2009 ST Microelectronics
  7. * Viresh Kumar <viresh.linux@gmail.com>
  8. *
  9. * This file is licensed under the terms of the GNU General Public
  10. * License version 2. This program is licensed "as is" without any
  11. * warranty of any kind, whether express or implied.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/err.h>
  15. #include <linux/export.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/io.h>
  18. #include <linux/irq.h>
  19. #include <linux/irqdomain.h>
  20. #include <linux/of.h>
  21. #include <linux/of_address.h>
  22. #include <linux/of_irq.h>
  23. #include <linux/spinlock.h>
  24. #include <plat/shirq.h>
  25. static DEFINE_SPINLOCK(lock);
  26. /* spear300 shared irq registers offsets and masks */
  27. #define SPEAR300_INT_ENB_MASK_REG 0x54
  28. #define SPEAR300_INT_STS_MASK_REG 0x58
  29. static struct spear_shirq spear300_shirq_ras1 = {
  30. .irq_nr = 9,
  31. .irq_bit_off = 0,
  32. .regs = {
  33. .enb_reg = SPEAR300_INT_ENB_MASK_REG,
  34. .status_reg = SPEAR300_INT_STS_MASK_REG,
  35. .clear_reg = -1,
  36. },
  37. };
  38. static struct spear_shirq *spear300_shirq_blocks[] = {
  39. &spear300_shirq_ras1,
  40. };
  41. /* spear310 shared irq registers offsets and masks */
  42. #define SPEAR310_INT_STS_MASK_REG 0x04
  43. static struct spear_shirq spear310_shirq_ras1 = {
  44. .irq_nr = 8,
  45. .irq_bit_off = 0,
  46. .regs = {
  47. .enb_reg = -1,
  48. .status_reg = SPEAR310_INT_STS_MASK_REG,
  49. .clear_reg = -1,
  50. },
  51. };
  52. static struct spear_shirq spear310_shirq_ras2 = {
  53. .irq_nr = 5,
  54. .irq_bit_off = 8,
  55. .regs = {
  56. .enb_reg = -1,
  57. .status_reg = SPEAR310_INT_STS_MASK_REG,
  58. .clear_reg = -1,
  59. },
  60. };
  61. static struct spear_shirq spear310_shirq_ras3 = {
  62. .irq_nr = 1,
  63. .irq_bit_off = 13,
  64. .regs = {
  65. .enb_reg = -1,
  66. .status_reg = SPEAR310_INT_STS_MASK_REG,
  67. .clear_reg = -1,
  68. },
  69. };
  70. static struct spear_shirq spear310_shirq_intrcomm_ras = {
  71. .irq_nr = 3,
  72. .irq_bit_off = 14,
  73. .regs = {
  74. .enb_reg = -1,
  75. .status_reg = SPEAR310_INT_STS_MASK_REG,
  76. .clear_reg = -1,
  77. },
  78. };
  79. static struct spear_shirq *spear310_shirq_blocks[] = {
  80. &spear310_shirq_ras1,
  81. &spear310_shirq_ras2,
  82. &spear310_shirq_ras3,
  83. &spear310_shirq_intrcomm_ras,
  84. };
  85. /* spear320 shared irq registers offsets and masks */
  86. #define SPEAR320_INT_STS_MASK_REG 0x04
  87. #define SPEAR320_INT_CLR_MASK_REG 0x04
  88. #define SPEAR320_INT_ENB_MASK_REG 0x08
  89. static struct spear_shirq spear320_shirq_ras1 = {
  90. .irq_nr = 3,
  91. .irq_bit_off = 7,
  92. .regs = {
  93. .enb_reg = -1,
  94. .status_reg = SPEAR320_INT_STS_MASK_REG,
  95. .clear_reg = SPEAR320_INT_CLR_MASK_REG,
  96. .reset_to_clear = 1,
  97. },
  98. };
  99. static struct spear_shirq spear320_shirq_ras2 = {
  100. .irq_nr = 1,
  101. .irq_bit_off = 10,
  102. .regs = {
  103. .enb_reg = -1,
  104. .status_reg = SPEAR320_INT_STS_MASK_REG,
  105. .clear_reg = SPEAR320_INT_CLR_MASK_REG,
  106. .reset_to_clear = 1,
  107. },
  108. };
  109. static struct spear_shirq spear320_shirq_ras3 = {
  110. .irq_nr = 3,
  111. .irq_bit_off = 0,
  112. .invalid_irq = 1,
  113. .regs = {
  114. .enb_reg = SPEAR320_INT_ENB_MASK_REG,
  115. .reset_to_enb = 1,
  116. .status_reg = SPEAR320_INT_STS_MASK_REG,
  117. .clear_reg = SPEAR320_INT_CLR_MASK_REG,
  118. .reset_to_clear = 1,
  119. },
  120. };
  121. static struct spear_shirq spear320_shirq_intrcomm_ras = {
  122. .irq_nr = 11,
  123. .irq_bit_off = 11,
  124. .regs = {
  125. .enb_reg = -1,
  126. .status_reg = SPEAR320_INT_STS_MASK_REG,
  127. .clear_reg = SPEAR320_INT_CLR_MASK_REG,
  128. .reset_to_clear = 1,
  129. },
  130. };
  131. static struct spear_shirq *spear320_shirq_blocks[] = {
  132. &spear320_shirq_ras3,
  133. &spear320_shirq_ras1,
  134. &spear320_shirq_ras2,
  135. &spear320_shirq_intrcomm_ras,
  136. };
  137. static void shirq_irq_mask_unmask(struct irq_data *d, bool mask)
  138. {
  139. struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
  140. u32 val, offset = d->irq - shirq->irq_base;
  141. unsigned long flags;
  142. if (shirq->regs.enb_reg == -1)
  143. return;
  144. spin_lock_irqsave(&lock, flags);
  145. val = readl(shirq->base + shirq->regs.enb_reg);
  146. if (mask ^ shirq->regs.reset_to_enb)
  147. val &= ~(0x1 << shirq->irq_bit_off << offset);
  148. else
  149. val |= 0x1 << shirq->irq_bit_off << offset;
  150. writel(val, shirq->base + shirq->regs.enb_reg);
  151. spin_unlock_irqrestore(&lock, flags);
  152. }
  153. static void shirq_irq_mask(struct irq_data *d)
  154. {
  155. shirq_irq_mask_unmask(d, 1);
  156. }
  157. static void shirq_irq_unmask(struct irq_data *d)
  158. {
  159. shirq_irq_mask_unmask(d, 0);
  160. }
  161. static struct irq_chip shirq_chip = {
  162. .name = "spear-shirq",
  163. .irq_ack = shirq_irq_mask,
  164. .irq_mask = shirq_irq_mask,
  165. .irq_unmask = shirq_irq_unmask,
  166. };
  167. static void shirq_handler(unsigned irq, struct irq_desc *desc)
  168. {
  169. u32 i, j, val, mask, tmp;
  170. struct irq_chip *chip;
  171. struct spear_shirq *shirq = irq_get_handler_data(irq);
  172. chip = irq_get_chip(irq);
  173. chip->irq_ack(&desc->irq_data);
  174. mask = ((0x1 << shirq->irq_nr) - 1) << shirq->irq_bit_off;
  175. while ((val = readl(shirq->base + shirq->regs.status_reg) &
  176. mask)) {
  177. val >>= shirq->irq_bit_off;
  178. for (i = 0, j = 1; i < shirq->irq_nr; i++, j <<= 1) {
  179. if (!(j & val))
  180. continue;
  181. generic_handle_irq(shirq->irq_base + i);
  182. /* clear interrupt */
  183. if (shirq->regs.clear_reg == -1)
  184. continue;
  185. tmp = readl(shirq->base + shirq->regs.clear_reg);
  186. if (shirq->regs.reset_to_clear)
  187. tmp &= ~(j << shirq->irq_bit_off);
  188. else
  189. tmp |= (j << shirq->irq_bit_off);
  190. writel(tmp, shirq->base + shirq->regs.clear_reg);
  191. }
  192. }
  193. chip->irq_unmask(&desc->irq_data);
  194. }
  195. static void __init spear_shirq_register(struct spear_shirq *shirq)
  196. {
  197. int i;
  198. if (shirq->invalid_irq)
  199. return;
  200. irq_set_chained_handler(shirq->irq, shirq_handler);
  201. for (i = 0; i < shirq->irq_nr; i++) {
  202. irq_set_chip_and_handler(shirq->irq_base + i,
  203. &shirq_chip, handle_simple_irq);
  204. set_irq_flags(shirq->irq_base + i, IRQF_VALID);
  205. irq_set_chip_data(shirq->irq_base + i, shirq);
  206. }
  207. irq_set_handler_data(shirq->irq, shirq);
  208. }
  209. static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
  210. struct device_node *np)
  211. {
  212. int i, irq_base, hwirq = 0, irq_nr = 0;
  213. static struct irq_domain *shirq_domain;
  214. void __iomem *base;
  215. base = of_iomap(np, 0);
  216. if (!base) {
  217. pr_err("%s: failed to map shirq registers\n", __func__);
  218. return -ENXIO;
  219. }
  220. for (i = 0; i < block_nr; i++)
  221. irq_nr += shirq_blocks[i]->irq_nr;
  222. irq_base = irq_alloc_descs(-1, 0, irq_nr, 0);
  223. if (IS_ERR_VALUE(irq_base)) {
  224. pr_err("%s: irq desc alloc failed\n", __func__);
  225. goto err_unmap;
  226. }
  227. shirq_domain = irq_domain_add_legacy(np, irq_nr, irq_base, 0,
  228. &irq_domain_simple_ops, NULL);
  229. if (WARN_ON(!shirq_domain)) {
  230. pr_warn("%s: irq domain init failed\n", __func__);
  231. goto err_free_desc;
  232. }
  233. for (i = 0; i < block_nr; i++) {
  234. shirq_blocks[i]->base = base;
  235. shirq_blocks[i]->irq_base = irq_find_mapping(shirq_domain,
  236. hwirq);
  237. shirq_blocks[i]->irq = irq_of_parse_and_map(np, i);
  238. spear_shirq_register(shirq_blocks[i]);
  239. hwirq += shirq_blocks[i]->irq_nr;
  240. }
  241. return 0;
  242. err_free_desc:
  243. irq_free_descs(irq_base, irq_nr);
  244. err_unmap:
  245. iounmap(base);
  246. return -ENXIO;
  247. }
  248. int __init spear300_shirq_of_init(struct device_node *np,
  249. struct device_node *parent)
  250. {
  251. return shirq_init(spear300_shirq_blocks,
  252. ARRAY_SIZE(spear300_shirq_blocks), np);
  253. }
  254. int __init spear310_shirq_of_init(struct device_node *np,
  255. struct device_node *parent)
  256. {
  257. return shirq_init(spear310_shirq_blocks,
  258. ARRAY_SIZE(spear310_shirq_blocks), np);
  259. }
  260. int __init spear320_shirq_of_init(struct device_node *np,
  261. struct device_node *parent)
  262. {
  263. return shirq_init(spear320_shirq_blocks,
  264. ARRAY_SIZE(spear320_shirq_blocks), np);
  265. }