spear-shirq.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. /*
  2. * SPEAr platform shared irq layer source file
  3. *
  4. * Copyright (C) 2009-2012 ST Microelectronics
  5. * Viresh Kumar <viresh.linux@gmail.com>
  6. *
  7. * Copyright (C) 2012 ST Microelectronics
  8. * Shiraz Hashim <shiraz.hashim@st.com>
  9. *
  10. * This file is licensed under the terms of the GNU General Public
  11. * License version 2. This program is licensed "as is" without any
  12. * warranty of any kind, whether express or implied.
  13. */
  14. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15. #include <linux/err.h>
  16. #include <linux/export.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/io.h>
  19. #include <linux/irq.h>
  20. #include <linux/irqdomain.h>
  21. #include <linux/irqchip/spear-shirq.h>
  22. #include <linux/of.h>
  23. #include <linux/of_address.h>
  24. #include <linux/of_irq.h>
  25. #include <linux/spinlock.h>
  26. static DEFINE_SPINLOCK(lock);
  27. /* spear300 shared irq registers offsets and masks */
  28. #define SPEAR300_INT_ENB_MASK_REG 0x54
  29. #define SPEAR300_INT_STS_MASK_REG 0x58
  30. static struct spear_shirq spear300_shirq_ras1 = {
  31. .irq_nr = 9,
  32. .irq_bit_off = 0,
  33. .regs = {
  34. .enb_reg = SPEAR300_INT_ENB_MASK_REG,
  35. .status_reg = SPEAR300_INT_STS_MASK_REG,
  36. .clear_reg = -1,
  37. },
  38. };
  39. static struct spear_shirq *spear300_shirq_blocks[] = {
  40. &spear300_shirq_ras1,
  41. };
  42. /* spear310 shared irq registers offsets and masks */
  43. #define SPEAR310_INT_STS_MASK_REG 0x04
  44. static struct spear_shirq spear310_shirq_ras1 = {
  45. .irq_nr = 8,
  46. .irq_bit_off = 0,
  47. .regs = {
  48. .enb_reg = -1,
  49. .status_reg = SPEAR310_INT_STS_MASK_REG,
  50. .clear_reg = -1,
  51. },
  52. };
  53. static struct spear_shirq spear310_shirq_ras2 = {
  54. .irq_nr = 5,
  55. .irq_bit_off = 8,
  56. .regs = {
  57. .enb_reg = -1,
  58. .status_reg = SPEAR310_INT_STS_MASK_REG,
  59. .clear_reg = -1,
  60. },
  61. };
  62. static struct spear_shirq spear310_shirq_ras3 = {
  63. .irq_nr = 1,
  64. .irq_bit_off = 13,
  65. .regs = {
  66. .enb_reg = -1,
  67. .status_reg = SPEAR310_INT_STS_MASK_REG,
  68. .clear_reg = -1,
  69. },
  70. };
  71. static struct spear_shirq spear310_shirq_intrcomm_ras = {
  72. .irq_nr = 3,
  73. .irq_bit_off = 14,
  74. .regs = {
  75. .enb_reg = -1,
  76. .status_reg = SPEAR310_INT_STS_MASK_REG,
  77. .clear_reg = -1,
  78. },
  79. };
  80. static struct spear_shirq *spear310_shirq_blocks[] = {
  81. &spear310_shirq_ras1,
  82. &spear310_shirq_ras2,
  83. &spear310_shirq_ras3,
  84. &spear310_shirq_intrcomm_ras,
  85. };
  86. /* spear320 shared irq registers offsets and masks */
  87. #define SPEAR320_INT_STS_MASK_REG 0x04
  88. #define SPEAR320_INT_CLR_MASK_REG 0x04
  89. #define SPEAR320_INT_ENB_MASK_REG 0x08
  90. static struct spear_shirq spear320_shirq_ras1 = {
  91. .irq_nr = 3,
  92. .irq_bit_off = 7,
  93. .regs = {
  94. .enb_reg = -1,
  95. .status_reg = SPEAR320_INT_STS_MASK_REG,
  96. .clear_reg = SPEAR320_INT_CLR_MASK_REG,
  97. .reset_to_clear = 1,
  98. },
  99. };
  100. static struct spear_shirq spear320_shirq_ras2 = {
  101. .irq_nr = 1,
  102. .irq_bit_off = 10,
  103. .regs = {
  104. .enb_reg = -1,
  105. .status_reg = SPEAR320_INT_STS_MASK_REG,
  106. .clear_reg = SPEAR320_INT_CLR_MASK_REG,
  107. .reset_to_clear = 1,
  108. },
  109. };
  110. static struct spear_shirq spear320_shirq_ras3 = {
  111. .irq_nr = 3,
  112. .irq_bit_off = 0,
  113. .invalid_irq = 1,
  114. .regs = {
  115. .enb_reg = SPEAR320_INT_ENB_MASK_REG,
  116. .reset_to_enb = 1,
  117. .status_reg = SPEAR320_INT_STS_MASK_REG,
  118. .clear_reg = SPEAR320_INT_CLR_MASK_REG,
  119. .reset_to_clear = 1,
  120. },
  121. };
  122. static struct spear_shirq spear320_shirq_intrcomm_ras = {
  123. .irq_nr = 11,
  124. .irq_bit_off = 11,
  125. .regs = {
  126. .enb_reg = -1,
  127. .status_reg = SPEAR320_INT_STS_MASK_REG,
  128. .clear_reg = SPEAR320_INT_CLR_MASK_REG,
  129. .reset_to_clear = 1,
  130. },
  131. };
  132. static struct spear_shirq *spear320_shirq_blocks[] = {
  133. &spear320_shirq_ras3,
  134. &spear320_shirq_ras1,
  135. &spear320_shirq_ras2,
  136. &spear320_shirq_intrcomm_ras,
  137. };
  138. static void shirq_irq_mask_unmask(struct irq_data *d, bool mask)
  139. {
  140. struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
  141. u32 val, offset = d->irq - shirq->irq_base;
  142. unsigned long flags;
  143. if (shirq->regs.enb_reg == -1)
  144. return;
  145. spin_lock_irqsave(&lock, flags);
  146. val = readl(shirq->base + shirq->regs.enb_reg);
  147. if (mask ^ shirq->regs.reset_to_enb)
  148. val &= ~(0x1 << shirq->irq_bit_off << offset);
  149. else
  150. val |= 0x1 << shirq->irq_bit_off << offset;
  151. writel(val, shirq->base + shirq->regs.enb_reg);
  152. spin_unlock_irqrestore(&lock, flags);
  153. }
  154. static void shirq_irq_mask(struct irq_data *d)
  155. {
  156. shirq_irq_mask_unmask(d, 1);
  157. }
  158. static void shirq_irq_unmask(struct irq_data *d)
  159. {
  160. shirq_irq_mask_unmask(d, 0);
  161. }
  162. static struct irq_chip shirq_chip = {
  163. .name = "spear-shirq",
  164. .irq_ack = shirq_irq_mask,
  165. .irq_mask = shirq_irq_mask,
  166. .irq_unmask = shirq_irq_unmask,
  167. };
  168. static void shirq_handler(unsigned irq, struct irq_desc *desc)
  169. {
  170. u32 i, j, val, mask, tmp;
  171. struct irq_chip *chip;
  172. struct spear_shirq *shirq = irq_get_handler_data(irq);
  173. chip = irq_get_chip(irq);
  174. chip->irq_ack(&desc->irq_data);
  175. mask = ((0x1 << shirq->irq_nr) - 1) << shirq->irq_bit_off;
  176. while ((val = readl(shirq->base + shirq->regs.status_reg) &
  177. mask)) {
  178. val >>= shirq->irq_bit_off;
  179. for (i = 0, j = 1; i < shirq->irq_nr; i++, j <<= 1) {
  180. if (!(j & val))
  181. continue;
  182. generic_handle_irq(shirq->irq_base + i);
  183. /* clear interrupt */
  184. if (shirq->regs.clear_reg == -1)
  185. continue;
  186. tmp = readl(shirq->base + shirq->regs.clear_reg);
  187. if (shirq->regs.reset_to_clear)
  188. tmp &= ~(j << shirq->irq_bit_off);
  189. else
  190. tmp |= (j << shirq->irq_bit_off);
  191. writel(tmp, shirq->base + shirq->regs.clear_reg);
  192. }
  193. }
  194. chip->irq_unmask(&desc->irq_data);
  195. }
  196. static void __init spear_shirq_register(struct spear_shirq *shirq)
  197. {
  198. int i;
  199. if (shirq->invalid_irq)
  200. return;
  201. irq_set_chained_handler(shirq->irq, shirq_handler);
  202. for (i = 0; i < shirq->irq_nr; i++) {
  203. irq_set_chip_and_handler(shirq->irq_base + i,
  204. &shirq_chip, handle_simple_irq);
  205. set_irq_flags(shirq->irq_base + i, IRQF_VALID);
  206. irq_set_chip_data(shirq->irq_base + i, shirq);
  207. }
  208. irq_set_handler_data(shirq->irq, shirq);
  209. }
  210. static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
  211. struct device_node *np)
  212. {
  213. int i, irq_base, hwirq = 0, irq_nr = 0;
  214. static struct irq_domain *shirq_domain;
  215. void __iomem *base;
  216. base = of_iomap(np, 0);
  217. if (!base) {
  218. pr_err("%s: failed to map shirq registers\n", __func__);
  219. return -ENXIO;
  220. }
  221. for (i = 0; i < block_nr; i++)
  222. irq_nr += shirq_blocks[i]->irq_nr;
  223. irq_base = irq_alloc_descs(-1, 0, irq_nr, 0);
  224. if (IS_ERR_VALUE(irq_base)) {
  225. pr_err("%s: irq desc alloc failed\n", __func__);
  226. goto err_unmap;
  227. }
  228. shirq_domain = irq_domain_add_legacy(np, irq_nr, irq_base, 0,
  229. &irq_domain_simple_ops, NULL);
  230. if (WARN_ON(!shirq_domain)) {
  231. pr_warn("%s: irq domain init failed\n", __func__);
  232. goto err_free_desc;
  233. }
  234. for (i = 0; i < block_nr; i++) {
  235. shirq_blocks[i]->base = base;
  236. shirq_blocks[i]->irq_base = irq_find_mapping(shirq_domain,
  237. hwirq);
  238. shirq_blocks[i]->irq = irq_of_parse_and_map(np, i);
  239. spear_shirq_register(shirq_blocks[i]);
  240. hwirq += shirq_blocks[i]->irq_nr;
  241. }
  242. return 0;
  243. err_free_desc:
  244. irq_free_descs(irq_base, irq_nr);
  245. err_unmap:
  246. iounmap(base);
  247. return -ENXIO;
  248. }
  249. int __init spear300_shirq_of_init(struct device_node *np,
  250. struct device_node *parent)
  251. {
  252. return shirq_init(spear300_shirq_blocks,
  253. ARRAY_SIZE(spear300_shirq_blocks), np);
  254. }
  255. int __init spear310_shirq_of_init(struct device_node *np,
  256. struct device_node *parent)
  257. {
  258. return shirq_init(spear310_shirq_blocks,
  259. ARRAY_SIZE(spear310_shirq_blocks), np);
  260. }
  261. int __init spear320_shirq_of_init(struct device_node *np,
  262. struct device_node *parent)
  263. {
  264. return shirq_init(spear320_shirq_blocks,
  265. ARRAY_SIZE(spear320_shirq_blocks), np);
  266. }