regmap-irq.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406
  1. /*
  2. * regmap based irq_chip
  3. *
  4. * Copyright 2011 Wolfson Microelectronics plc
  5. *
  6. * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/export.h>
  13. #include <linux/device.h>
  14. #include <linux/regmap.h>
  15. #include <linux/irq.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/irqdomain.h>
  18. #include <linux/slab.h>
  19. #include "internal.h"
  20. struct regmap_irq_chip_data {
  21. struct mutex lock;
  22. struct irq_chip irq_chip;
  23. struct regmap *map;
  24. const struct regmap_irq_chip *chip;
  25. int irq_base;
  26. struct irq_domain *domain;
  27. int irq;
  28. int wake_count;
  29. unsigned int *status_buf;
  30. unsigned int *mask_buf;
  31. unsigned int *mask_buf_def;
  32. unsigned int *wake_buf;
  33. unsigned int irq_reg_stride;
  34. };
  35. static inline const
  36. struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
  37. int irq)
  38. {
  39. return &data->chip->irqs[irq];
  40. }
  41. static void regmap_irq_lock(struct irq_data *data)
  42. {
  43. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  44. mutex_lock(&d->lock);
  45. }
  46. static void regmap_irq_sync_unlock(struct irq_data *data)
  47. {
  48. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  49. struct regmap *map = d->map;
  50. int i, ret;
  51. u32 reg;
  52. /*
  53. * If there's been a change in the mask write it back to the
  54. * hardware. We rely on the use of the regmap core cache to
  55. * suppress pointless writes.
  56. */
  57. for (i = 0; i < d->chip->num_regs; i++) {
  58. reg = d->chip->mask_base +
  59. (i * map->reg_stride * d->irq_reg_stride);
  60. ret = regmap_update_bits(d->map, reg,
  61. d->mask_buf_def[i], d->mask_buf[i]);
  62. if (ret != 0)
  63. dev_err(d->map->dev, "Failed to sync masks in %x\n",
  64. reg);
  65. }
  66. /* If we've changed our wakeup count propagate it to the parent */
  67. if (d->wake_count < 0)
  68. for (i = d->wake_count; i < 0; i++)
  69. irq_set_irq_wake(d->irq, 0);
  70. else if (d->wake_count > 0)
  71. for (i = 0; i < d->wake_count; i++)
  72. irq_set_irq_wake(d->irq, 1);
  73. d->wake_count = 0;
  74. mutex_unlock(&d->lock);
  75. }
  76. static void regmap_irq_enable(struct irq_data *data)
  77. {
  78. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  79. struct regmap *map = d->map;
  80. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  81. d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask;
  82. }
  83. static void regmap_irq_disable(struct irq_data *data)
  84. {
  85. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  86. struct regmap *map = d->map;
  87. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  88. d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
  89. }
  90. static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
  91. {
  92. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  93. struct regmap *map = d->map;
  94. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  95. if (!d->chip->wake_base)
  96. return -EINVAL;
  97. if (on) {
  98. d->wake_buf[irq_data->reg_offset / map->reg_stride]
  99. &= ~irq_data->mask;
  100. d->wake_count++;
  101. } else {
  102. d->wake_buf[irq_data->reg_offset / map->reg_stride]
  103. |= irq_data->mask;
  104. d->wake_count--;
  105. }
  106. return 0;
  107. }
  108. static const struct irq_chip regmap_irq_chip = {
  109. .name = "regmap",
  110. .irq_bus_lock = regmap_irq_lock,
  111. .irq_bus_sync_unlock = regmap_irq_sync_unlock,
  112. .irq_disable = regmap_irq_disable,
  113. .irq_enable = regmap_irq_enable,
  114. .irq_set_wake = regmap_irq_set_wake,
  115. };
  116. static irqreturn_t regmap_irq_thread(int irq, void *d)
  117. {
  118. struct regmap_irq_chip_data *data = d;
  119. const struct regmap_irq_chip *chip = data->chip;
  120. struct regmap *map = data->map;
  121. int ret, i;
  122. bool handled = false;
  123. u32 reg;
  124. /*
  125. * Ignore masked IRQs and ack if we need to; we ack early so
  126. * there is no race between handling and acknowleding the
  127. * interrupt. We assume that typically few of the interrupts
  128. * will fire simultaneously so don't worry about overhead from
  129. * doing a write per register.
  130. */
  131. for (i = 0; i < data->chip->num_regs; i++) {
  132. ret = regmap_read(map, chip->status_base + (i * map->reg_stride
  133. * data->irq_reg_stride),
  134. &data->status_buf[i]);
  135. if (ret != 0) {
  136. dev_err(map->dev, "Failed to read IRQ status: %d\n",
  137. ret);
  138. return IRQ_NONE;
  139. }
  140. data->status_buf[i] &= ~data->mask_buf[i];
  141. if (data->status_buf[i] && chip->ack_base) {
  142. reg = chip->ack_base +
  143. (i * map->reg_stride * data->irq_reg_stride);
  144. ret = regmap_write(map, reg, data->status_buf[i]);
  145. if (ret != 0)
  146. dev_err(map->dev, "Failed to ack 0x%x: %d\n",
  147. reg, ret);
  148. }
  149. }
  150. for (i = 0; i < chip->num_irqs; i++) {
  151. if (data->status_buf[chip->irqs[i].reg_offset /
  152. map->reg_stride] & chip->irqs[i].mask) {
  153. handle_nested_irq(irq_find_mapping(data->domain, i));
  154. handled = true;
  155. }
  156. }
  157. if (handled)
  158. return IRQ_HANDLED;
  159. else
  160. return IRQ_NONE;
  161. }
  162. static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
  163. irq_hw_number_t hw)
  164. {
  165. struct regmap_irq_chip_data *data = h->host_data;
  166. irq_set_chip_data(virq, data);
  167. irq_set_chip_and_handler(virq, &data->irq_chip, handle_edge_irq);
  168. irq_set_nested_thread(virq, 1);
  169. /* ARM needs us to explicitly flag the IRQ as valid
  170. * and will set them noprobe when we do so. */
  171. #ifdef CONFIG_ARM
  172. set_irq_flags(virq, IRQF_VALID);
  173. #else
  174. irq_set_noprobe(virq);
  175. #endif
  176. return 0;
  177. }
  178. static struct irq_domain_ops regmap_domain_ops = {
  179. .map = regmap_irq_map,
  180. .xlate = irq_domain_xlate_twocell,
  181. };
  182. /**
  183. * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
  184. *
  185. * map: The regmap for the device.
  186. * irq: The IRQ the device uses to signal interrupts
  187. * irq_flags: The IRQF_ flags to use for the primary interrupt.
  188. * chip: Configuration for the interrupt controller.
  189. * data: Runtime data structure for the controller, allocated on success
  190. *
  191. * Returns 0 on success or an errno on failure.
  192. *
  193. * In order for this to be efficient the chip really should use a
  194. * register cache. The chip driver is responsible for restoring the
  195. * register values used by the IRQ controller over suspend and resume.
  196. */
  197. int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
  198. int irq_base, const struct regmap_irq_chip *chip,
  199. struct regmap_irq_chip_data **data)
  200. {
  201. struct regmap_irq_chip_data *d;
  202. int i;
  203. int ret = -ENOMEM;
  204. u32 reg;
  205. for (i = 0; i < chip->num_irqs; i++) {
  206. if (chip->irqs[i].reg_offset % map->reg_stride)
  207. return -EINVAL;
  208. if (chip->irqs[i].reg_offset / map->reg_stride >=
  209. chip->num_regs)
  210. return -EINVAL;
  211. }
  212. if (irq_base) {
  213. irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
  214. if (irq_base < 0) {
  215. dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
  216. irq_base);
  217. return irq_base;
  218. }
  219. }
  220. d = kzalloc(sizeof(*d), GFP_KERNEL);
  221. if (!d)
  222. return -ENOMEM;
  223. *data = d;
  224. d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
  225. GFP_KERNEL);
  226. if (!d->status_buf)
  227. goto err_alloc;
  228. d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
  229. GFP_KERNEL);
  230. if (!d->mask_buf)
  231. goto err_alloc;
  232. d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs,
  233. GFP_KERNEL);
  234. if (!d->mask_buf_def)
  235. goto err_alloc;
  236. if (chip->wake_base) {
  237. d->wake_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
  238. GFP_KERNEL);
  239. if (!d->wake_buf)
  240. goto err_alloc;
  241. }
  242. d->irq_chip = regmap_irq_chip;
  243. d->irq = irq;
  244. d->map = map;
  245. d->chip = chip;
  246. d->irq_base = irq_base;
  247. if (chip->irq_reg_stride)
  248. d->irq_reg_stride = chip->irq_reg_stride;
  249. else
  250. d->irq_reg_stride = 1;
  251. mutex_init(&d->lock);
  252. for (i = 0; i < chip->num_irqs; i++)
  253. d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
  254. |= chip->irqs[i].mask;
  255. /* Mask all the interrupts by default */
  256. for (i = 0; i < chip->num_regs; i++) {
  257. d->mask_buf[i] = d->mask_buf_def[i];
  258. reg = chip->mask_base +
  259. (i * map->reg_stride * d->irq_reg_stride);
  260. ret = regmap_update_bits(map, reg,
  261. d->mask_buf[i], d->mask_buf[i]);
  262. if (ret != 0) {
  263. dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
  264. reg, ret);
  265. goto err_alloc;
  266. }
  267. }
  268. if (irq_base)
  269. d->domain = irq_domain_add_legacy(map->dev->of_node,
  270. chip->num_irqs, irq_base, 0,
  271. &regmap_domain_ops, d);
  272. else
  273. d->domain = irq_domain_add_linear(map->dev->of_node,
  274. chip->num_irqs,
  275. &regmap_domain_ops, d);
  276. if (!d->domain) {
  277. dev_err(map->dev, "Failed to create IRQ domain\n");
  278. ret = -ENOMEM;
  279. goto err_alloc;
  280. }
  281. ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
  282. chip->name, d);
  283. if (ret != 0) {
  284. dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret);
  285. goto err_domain;
  286. }
  287. return 0;
  288. err_domain:
  289. /* Should really dispose of the domain but... */
  290. err_alloc:
  291. kfree(d->wake_buf);
  292. kfree(d->mask_buf_def);
  293. kfree(d->mask_buf);
  294. kfree(d->status_buf);
  295. kfree(d);
  296. return ret;
  297. }
  298. EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
  299. /**
  300. * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip
  301. *
  302. * @irq: Primary IRQ for the device
  303. * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip()
  304. */
  305. void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
  306. {
  307. if (!d)
  308. return;
  309. free_irq(irq, d);
  310. /* We should unmap the domain but... */
  311. kfree(d->wake_buf);
  312. kfree(d->mask_buf_def);
  313. kfree(d->mask_buf);
  314. kfree(d->status_buf);
  315. kfree(d);
  316. }
  317. EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
  318. /**
  319. * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
  320. *
  321. * Useful for drivers to request their own IRQs.
  322. *
  323. * @data: regmap_irq controller to operate on.
  324. */
  325. int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
  326. {
  327. WARN_ON(!data->irq_base);
  328. return data->irq_base;
  329. }
  330. EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
  331. /**
  332. * regmap_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ
  333. *
  334. * Useful for drivers to request their own IRQs.
  335. *
  336. * @data: regmap_irq controller to operate on.
  337. * @irq: index of the interrupt requested in the chip IRQs
  338. */
  339. int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
  340. {
  341. /* Handle holes in the IRQ list */
  342. if (!data->chip->irqs[irq].mask)
  343. return -EINVAL;
  344. return irq_create_mapping(data->domain, irq);
  345. }
  346. EXPORT_SYMBOL_GPL(regmap_irq_get_virq);