spear-shirq.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. /*
  2. * SPEAr platform shared irq layer source file
  3. *
  4. * Copyright (C) 2009-2012 ST Microelectronics
  5. * Viresh Kumar <viresh.linux@gmail.com>
  6. *
  7. * Copyright (C) 2012 ST Microelectronics
  8. * Shiraz Hashim <shiraz.hashim@st.com>
  9. *
  10. * This file is licensed under the terms of the GNU General Public
  11. * License version 2. This program is licensed "as is" without any
  12. * warranty of any kind, whether express or implied.
  13. */
  14. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15. #include <linux/err.h>
  16. #include <linux/export.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/io.h>
  19. #include <linux/irq.h>
  20. #include <linux/irqdomain.h>
  21. #include <linux/irqchip/spear-shirq.h>
  22. #include <linux/of.h>
  23. #include <linux/of_address.h>
  24. #include <linux/of_irq.h>
  25. #include <linux/spinlock.h>
  26. #include "irqchip.h"
  27. static DEFINE_SPINLOCK(lock);
  28. /* spear300 shared irq registers offsets and masks */
  29. #define SPEAR300_INT_ENB_MASK_REG 0x54
  30. #define SPEAR300_INT_STS_MASK_REG 0x58
  31. static struct spear_shirq spear300_shirq_ras1 = {
  32. .irq_nr = 9,
  33. .irq_bit_off = 0,
  34. .regs = {
  35. .enb_reg = SPEAR300_INT_ENB_MASK_REG,
  36. .status_reg = SPEAR300_INT_STS_MASK_REG,
  37. .clear_reg = -1,
  38. },
  39. };
  40. static struct spear_shirq *spear300_shirq_blocks[] = {
  41. &spear300_shirq_ras1,
  42. };
  43. /* spear310 shared irq registers offsets and masks */
  44. #define SPEAR310_INT_STS_MASK_REG 0x04
  45. static struct spear_shirq spear310_shirq_ras1 = {
  46. .irq_nr = 8,
  47. .irq_bit_off = 0,
  48. .regs = {
  49. .enb_reg = -1,
  50. .status_reg = SPEAR310_INT_STS_MASK_REG,
  51. .clear_reg = -1,
  52. },
  53. };
  54. static struct spear_shirq spear310_shirq_ras2 = {
  55. .irq_nr = 5,
  56. .irq_bit_off = 8,
  57. .regs = {
  58. .enb_reg = -1,
  59. .status_reg = SPEAR310_INT_STS_MASK_REG,
  60. .clear_reg = -1,
  61. },
  62. };
  63. static struct spear_shirq spear310_shirq_ras3 = {
  64. .irq_nr = 1,
  65. .irq_bit_off = 13,
  66. .regs = {
  67. .enb_reg = -1,
  68. .status_reg = SPEAR310_INT_STS_MASK_REG,
  69. .clear_reg = -1,
  70. },
  71. };
  72. static struct spear_shirq spear310_shirq_intrcomm_ras = {
  73. .irq_nr = 3,
  74. .irq_bit_off = 14,
  75. .regs = {
  76. .enb_reg = -1,
  77. .status_reg = SPEAR310_INT_STS_MASK_REG,
  78. .clear_reg = -1,
  79. },
  80. };
  81. static struct spear_shirq *spear310_shirq_blocks[] = {
  82. &spear310_shirq_ras1,
  83. &spear310_shirq_ras2,
  84. &spear310_shirq_ras3,
  85. &spear310_shirq_intrcomm_ras,
  86. };
  87. /* spear320 shared irq registers offsets and masks */
  88. #define SPEAR320_INT_STS_MASK_REG 0x04
  89. #define SPEAR320_INT_CLR_MASK_REG 0x04
  90. #define SPEAR320_INT_ENB_MASK_REG 0x08
  91. static struct spear_shirq spear320_shirq_ras1 = {
  92. .irq_nr = 3,
  93. .irq_bit_off = 7,
  94. .regs = {
  95. .enb_reg = -1,
  96. .status_reg = SPEAR320_INT_STS_MASK_REG,
  97. .clear_reg = SPEAR320_INT_CLR_MASK_REG,
  98. .reset_to_clear = 1,
  99. },
  100. };
  101. static struct spear_shirq spear320_shirq_ras2 = {
  102. .irq_nr = 1,
  103. .irq_bit_off = 10,
  104. .regs = {
  105. .enb_reg = -1,
  106. .status_reg = SPEAR320_INT_STS_MASK_REG,
  107. .clear_reg = SPEAR320_INT_CLR_MASK_REG,
  108. .reset_to_clear = 1,
  109. },
  110. };
  111. static struct spear_shirq spear320_shirq_ras3 = {
  112. .irq_nr = 3,
  113. .irq_bit_off = 0,
  114. .invalid_irq = 1,
  115. .regs = {
  116. .enb_reg = SPEAR320_INT_ENB_MASK_REG,
  117. .reset_to_enb = 1,
  118. .status_reg = SPEAR320_INT_STS_MASK_REG,
  119. .clear_reg = SPEAR320_INT_CLR_MASK_REG,
  120. .reset_to_clear = 1,
  121. },
  122. };
  123. static struct spear_shirq spear320_shirq_intrcomm_ras = {
  124. .irq_nr = 11,
  125. .irq_bit_off = 11,
  126. .regs = {
  127. .enb_reg = -1,
  128. .status_reg = SPEAR320_INT_STS_MASK_REG,
  129. .clear_reg = SPEAR320_INT_CLR_MASK_REG,
  130. .reset_to_clear = 1,
  131. },
  132. };
  133. static struct spear_shirq *spear320_shirq_blocks[] = {
  134. &spear320_shirq_ras3,
  135. &spear320_shirq_ras1,
  136. &spear320_shirq_ras2,
  137. &spear320_shirq_intrcomm_ras,
  138. };
  139. static void shirq_irq_mask_unmask(struct irq_data *d, bool mask)
  140. {
  141. struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
  142. u32 val, offset = d->irq - shirq->irq_base;
  143. unsigned long flags;
  144. if (shirq->regs.enb_reg == -1)
  145. return;
  146. spin_lock_irqsave(&lock, flags);
  147. val = readl(shirq->base + shirq->regs.enb_reg);
  148. if (mask ^ shirq->regs.reset_to_enb)
  149. val &= ~(0x1 << shirq->irq_bit_off << offset);
  150. else
  151. val |= 0x1 << shirq->irq_bit_off << offset;
  152. writel(val, shirq->base + shirq->regs.enb_reg);
  153. spin_unlock_irqrestore(&lock, flags);
  154. }
  155. static void shirq_irq_mask(struct irq_data *d)
  156. {
  157. shirq_irq_mask_unmask(d, 1);
  158. }
  159. static void shirq_irq_unmask(struct irq_data *d)
  160. {
  161. shirq_irq_mask_unmask(d, 0);
  162. }
  163. static struct irq_chip shirq_chip = {
  164. .name = "spear-shirq",
  165. .irq_ack = shirq_irq_mask,
  166. .irq_mask = shirq_irq_mask,
  167. .irq_unmask = shirq_irq_unmask,
  168. };
  169. static void shirq_handler(unsigned irq, struct irq_desc *desc)
  170. {
  171. u32 i, j, val, mask, tmp;
  172. struct irq_chip *chip;
  173. struct spear_shirq *shirq = irq_get_handler_data(irq);
  174. chip = irq_get_chip(irq);
  175. chip->irq_ack(&desc->irq_data);
  176. mask = ((0x1 << shirq->irq_nr) - 1) << shirq->irq_bit_off;
  177. while ((val = readl(shirq->base + shirq->regs.status_reg) &
  178. mask)) {
  179. val >>= shirq->irq_bit_off;
  180. for (i = 0, j = 1; i < shirq->irq_nr; i++, j <<= 1) {
  181. if (!(j & val))
  182. continue;
  183. generic_handle_irq(shirq->irq_base + i);
  184. /* clear interrupt */
  185. if (shirq->regs.clear_reg == -1)
  186. continue;
  187. tmp = readl(shirq->base + shirq->regs.clear_reg);
  188. if (shirq->regs.reset_to_clear)
  189. tmp &= ~(j << shirq->irq_bit_off);
  190. else
  191. tmp |= (j << shirq->irq_bit_off);
  192. writel(tmp, shirq->base + shirq->regs.clear_reg);
  193. }
  194. }
  195. chip->irq_unmask(&desc->irq_data);
  196. }
  197. static void __init spear_shirq_register(struct spear_shirq *shirq)
  198. {
  199. int i;
  200. if (shirq->invalid_irq)
  201. return;
  202. irq_set_chained_handler(shirq->irq, shirq_handler);
  203. for (i = 0; i < shirq->irq_nr; i++) {
  204. irq_set_chip_and_handler(shirq->irq_base + i,
  205. &shirq_chip, handle_simple_irq);
  206. set_irq_flags(shirq->irq_base + i, IRQF_VALID);
  207. irq_set_chip_data(shirq->irq_base + i, shirq);
  208. }
  209. irq_set_handler_data(shirq->irq, shirq);
  210. }
  211. static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
  212. struct device_node *np)
  213. {
  214. int i, irq_base, hwirq = 0, irq_nr = 0;
  215. static struct irq_domain *shirq_domain;
  216. void __iomem *base;
  217. base = of_iomap(np, 0);
  218. if (!base) {
  219. pr_err("%s: failed to map shirq registers\n", __func__);
  220. return -ENXIO;
  221. }
  222. for (i = 0; i < block_nr; i++)
  223. irq_nr += shirq_blocks[i]->irq_nr;
  224. irq_base = irq_alloc_descs(-1, 0, irq_nr, 0);
  225. if (IS_ERR_VALUE(irq_base)) {
  226. pr_err("%s: irq desc alloc failed\n", __func__);
  227. goto err_unmap;
  228. }
  229. shirq_domain = irq_domain_add_legacy(np, irq_nr, irq_base, 0,
  230. &irq_domain_simple_ops, NULL);
  231. if (WARN_ON(!shirq_domain)) {
  232. pr_warn("%s: irq domain init failed\n", __func__);
  233. goto err_free_desc;
  234. }
  235. for (i = 0; i < block_nr; i++) {
  236. shirq_blocks[i]->base = base;
  237. shirq_blocks[i]->irq_base = irq_find_mapping(shirq_domain,
  238. hwirq);
  239. shirq_blocks[i]->irq = irq_of_parse_and_map(np, i);
  240. spear_shirq_register(shirq_blocks[i]);
  241. hwirq += shirq_blocks[i]->irq_nr;
  242. }
  243. return 0;
  244. err_free_desc:
  245. irq_free_descs(irq_base, irq_nr);
  246. err_unmap:
  247. iounmap(base);
  248. return -ENXIO;
  249. }
  250. int __init spear300_shirq_of_init(struct device_node *np,
  251. struct device_node *parent)
  252. {
  253. return shirq_init(spear300_shirq_blocks,
  254. ARRAY_SIZE(spear300_shirq_blocks), np);
  255. }
  256. IRQCHIP_DECLARE(spear300_shirq, "st,spear300-shirq", spear300_shirq_of_init);
  257. int __init spear310_shirq_of_init(struct device_node *np,
  258. struct device_node *parent)
  259. {
  260. return shirq_init(spear310_shirq_blocks,
  261. ARRAY_SIZE(spear310_shirq_blocks), np);
  262. }
  263. IRQCHIP_DECLARE(spear310_shirq, "st,spear310-shirq", spear310_shirq_of_init);
  264. int __init spear320_shirq_of_init(struct device_node *np,
  265. struct device_node *parent)
  266. {
  267. return shirq_init(spear320_shirq_blocks,
  268. ARRAY_SIZE(spear320_shirq_blocks), np);
  269. }
  270. IRQCHIP_DECLARE(spear320_shirq, "st,spear320-shirq", spear320_shirq_of_init);