regmap-irq.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479
  1. /*
  2. * regmap based irq_chip
  3. *
  4. * Copyright 2011 Wolfson Microelectronics plc
  5. *
  6. * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/export.h>
  13. #include <linux/device.h>
  14. #include <linux/regmap.h>
  15. #include <linux/irq.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/irqdomain.h>
  18. #include <linux/pm_runtime.h>
  19. #include <linux/slab.h>
  20. #include "internal.h"
  21. struct regmap_irq_chip_data {
  22. struct mutex lock;
  23. struct irq_chip irq_chip;
  24. struct regmap *map;
  25. const struct regmap_irq_chip *chip;
  26. int irq_base;
  27. struct irq_domain *domain;
  28. int irq;
  29. int wake_count;
  30. unsigned int *status_buf;
  31. unsigned int *mask_buf;
  32. unsigned int *mask_buf_def;
  33. unsigned int *wake_buf;
  34. unsigned int irq_reg_stride;
  35. };
  36. static inline const
  37. struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
  38. int irq)
  39. {
  40. return &data->chip->irqs[irq];
  41. }
  42. static void regmap_irq_lock(struct irq_data *data)
  43. {
  44. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  45. mutex_lock(&d->lock);
  46. }
  47. static void regmap_irq_sync_unlock(struct irq_data *data)
  48. {
  49. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  50. struct regmap *map = d->map;
  51. int i, ret;
  52. u32 reg;
  53. if (d->chip->runtime_pm) {
  54. ret = pm_runtime_get_sync(map->dev);
  55. if (ret < 0)
  56. dev_err(map->dev, "IRQ sync failed to resume: %d\n",
  57. ret);
  58. }
  59. /*
  60. * If there's been a change in the mask write it back to the
  61. * hardware. We rely on the use of the regmap core cache to
  62. * suppress pointless writes.
  63. */
  64. for (i = 0; i < d->chip->num_regs; i++) {
  65. reg = d->chip->mask_base +
  66. (i * map->reg_stride * d->irq_reg_stride);
  67. if (d->chip->mask_invert)
  68. ret = regmap_update_bits(d->map, reg,
  69. d->mask_buf_def[i], ~d->mask_buf[i]);
  70. else
  71. ret = regmap_update_bits(d->map, reg,
  72. d->mask_buf_def[i], d->mask_buf[i]);
  73. if (ret != 0)
  74. dev_err(d->map->dev, "Failed to sync masks in %x\n",
  75. reg);
  76. }
  77. if (d->chip->runtime_pm)
  78. pm_runtime_put(map->dev);
  79. /* If we've changed our wakeup count propagate it to the parent */
  80. if (d->wake_count < 0)
  81. for (i = d->wake_count; i < 0; i++)
  82. irq_set_irq_wake(d->irq, 0);
  83. else if (d->wake_count > 0)
  84. for (i = 0; i < d->wake_count; i++)
  85. irq_set_irq_wake(d->irq, 1);
  86. d->wake_count = 0;
  87. mutex_unlock(&d->lock);
  88. }
  89. static void regmap_irq_enable(struct irq_data *data)
  90. {
  91. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  92. struct regmap *map = d->map;
  93. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  94. d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask;
  95. }
  96. static void regmap_irq_disable(struct irq_data *data)
  97. {
  98. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  99. struct regmap *map = d->map;
  100. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  101. d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
  102. }
  103. static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
  104. {
  105. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  106. struct regmap *map = d->map;
  107. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  108. if (!d->chip->wake_base)
  109. return -EINVAL;
  110. if (on) {
  111. d->wake_buf[irq_data->reg_offset / map->reg_stride]
  112. &= ~irq_data->mask;
  113. d->wake_count++;
  114. } else {
  115. d->wake_buf[irq_data->reg_offset / map->reg_stride]
  116. |= irq_data->mask;
  117. d->wake_count--;
  118. }
  119. return 0;
  120. }
  121. static const struct irq_chip regmap_irq_chip = {
  122. .irq_bus_lock = regmap_irq_lock,
  123. .irq_bus_sync_unlock = regmap_irq_sync_unlock,
  124. .irq_disable = regmap_irq_disable,
  125. .irq_enable = regmap_irq_enable,
  126. .irq_set_wake = regmap_irq_set_wake,
  127. };
  128. static irqreturn_t regmap_irq_thread(int irq, void *d)
  129. {
  130. struct regmap_irq_chip_data *data = d;
  131. const struct regmap_irq_chip *chip = data->chip;
  132. struct regmap *map = data->map;
  133. int ret, i;
  134. bool handled = false;
  135. u32 reg;
  136. if (chip->runtime_pm) {
  137. ret = pm_runtime_get_sync(map->dev);
  138. if (ret < 0) {
  139. dev_err(map->dev, "IRQ thread failed to resume: %d\n",
  140. ret);
  141. return IRQ_NONE;
  142. }
  143. }
  144. /*
  145. * Ignore masked IRQs and ack if we need to; we ack early so
  146. * there is no race between handling and acknowleding the
  147. * interrupt. We assume that typically few of the interrupts
  148. * will fire simultaneously so don't worry about overhead from
  149. * doing a write per register.
  150. */
  151. for (i = 0; i < data->chip->num_regs; i++) {
  152. ret = regmap_read(map, chip->status_base + (i * map->reg_stride
  153. * data->irq_reg_stride),
  154. &data->status_buf[i]);
  155. if (ret != 0) {
  156. dev_err(map->dev, "Failed to read IRQ status: %d\n",
  157. ret);
  158. if (chip->runtime_pm)
  159. pm_runtime_put(map->dev);
  160. return IRQ_NONE;
  161. }
  162. data->status_buf[i] &= ~data->mask_buf[i];
  163. if (data->status_buf[i] && chip->ack_base) {
  164. reg = chip->ack_base +
  165. (i * map->reg_stride * data->irq_reg_stride);
  166. ret = regmap_write(map, reg, data->status_buf[i]);
  167. if (ret != 0)
  168. dev_err(map->dev, "Failed to ack 0x%x: %d\n",
  169. reg, ret);
  170. }
  171. }
  172. for (i = 0; i < chip->num_irqs; i++) {
  173. if (data->status_buf[chip->irqs[i].reg_offset /
  174. map->reg_stride] & chip->irqs[i].mask) {
  175. handle_nested_irq(irq_find_mapping(data->domain, i));
  176. handled = true;
  177. }
  178. }
  179. if (chip->runtime_pm)
  180. pm_runtime_put(map->dev);
  181. if (handled)
  182. return IRQ_HANDLED;
  183. else
  184. return IRQ_NONE;
  185. }
  186. static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
  187. irq_hw_number_t hw)
  188. {
  189. struct regmap_irq_chip_data *data = h->host_data;
  190. irq_set_chip_data(virq, data);
  191. irq_set_chip(virq, &data->irq_chip);
  192. irq_set_nested_thread(virq, 1);
  193. /* ARM needs us to explicitly flag the IRQ as valid
  194. * and will set them noprobe when we do so. */
  195. #ifdef CONFIG_ARM
  196. set_irq_flags(virq, IRQF_VALID);
  197. #else
  198. irq_set_noprobe(virq);
  199. #endif
  200. return 0;
  201. }
  202. static struct irq_domain_ops regmap_domain_ops = {
  203. .map = regmap_irq_map,
  204. .xlate = irq_domain_xlate_twocell,
  205. };
  206. /**
  207. * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
  208. *
  209. * map: The regmap for the device.
  210. * irq: The IRQ the device uses to signal interrupts
  211. * irq_flags: The IRQF_ flags to use for the primary interrupt.
  212. * chip: Configuration for the interrupt controller.
  213. * data: Runtime data structure for the controller, allocated on success
  214. *
  215. * Returns 0 on success or an errno on failure.
  216. *
  217. * In order for this to be efficient the chip really should use a
  218. * register cache. The chip driver is responsible for restoring the
  219. * register values used by the IRQ controller over suspend and resume.
  220. */
  221. int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
  222. int irq_base, const struct regmap_irq_chip *chip,
  223. struct regmap_irq_chip_data **data)
  224. {
  225. struct regmap_irq_chip_data *d;
  226. int i;
  227. int ret = -ENOMEM;
  228. u32 reg;
  229. for (i = 0; i < chip->num_irqs; i++) {
  230. if (chip->irqs[i].reg_offset % map->reg_stride)
  231. return -EINVAL;
  232. if (chip->irqs[i].reg_offset / map->reg_stride >=
  233. chip->num_regs)
  234. return -EINVAL;
  235. }
  236. if (irq_base) {
  237. irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
  238. if (irq_base < 0) {
  239. dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
  240. irq_base);
  241. return irq_base;
  242. }
  243. }
  244. d = kzalloc(sizeof(*d), GFP_KERNEL);
  245. if (!d)
  246. return -ENOMEM;
  247. *data = d;
  248. d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
  249. GFP_KERNEL);
  250. if (!d->status_buf)
  251. goto err_alloc;
  252. d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
  253. GFP_KERNEL);
  254. if (!d->mask_buf)
  255. goto err_alloc;
  256. d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs,
  257. GFP_KERNEL);
  258. if (!d->mask_buf_def)
  259. goto err_alloc;
  260. if (chip->wake_base) {
  261. d->wake_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
  262. GFP_KERNEL);
  263. if (!d->wake_buf)
  264. goto err_alloc;
  265. }
  266. d->irq_chip = regmap_irq_chip;
  267. d->irq_chip.name = chip->name;
  268. if (!chip->wake_base) {
  269. d->irq_chip.irq_set_wake = NULL;
  270. d->irq_chip.flags |= IRQCHIP_MASK_ON_SUSPEND |
  271. IRQCHIP_SKIP_SET_WAKE;
  272. }
  273. d->irq = irq;
  274. d->map = map;
  275. d->chip = chip;
  276. d->irq_base = irq_base;
  277. if (chip->irq_reg_stride)
  278. d->irq_reg_stride = chip->irq_reg_stride;
  279. else
  280. d->irq_reg_stride = 1;
  281. mutex_init(&d->lock);
  282. for (i = 0; i < chip->num_irqs; i++)
  283. d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
  284. |= chip->irqs[i].mask;
  285. /* Mask all the interrupts by default */
  286. for (i = 0; i < chip->num_regs; i++) {
  287. d->mask_buf[i] = d->mask_buf_def[i];
  288. reg = chip->mask_base +
  289. (i * map->reg_stride * d->irq_reg_stride);
  290. if (chip->mask_invert)
  291. ret = regmap_update_bits(map, reg,
  292. d->mask_buf[i], ~d->mask_buf[i]);
  293. else
  294. ret = regmap_update_bits(map, reg,
  295. d->mask_buf[i], d->mask_buf[i]);
  296. if (ret != 0) {
  297. dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
  298. reg, ret);
  299. goto err_alloc;
  300. }
  301. }
  302. /* Wake is disabled by default */
  303. if (d->wake_buf) {
  304. for (i = 0; i < chip->num_regs; i++) {
  305. d->wake_buf[i] = d->mask_buf_def[i];
  306. reg = chip->wake_base +
  307. (i * map->reg_stride * d->irq_reg_stride);
  308. ret = regmap_update_bits(map, reg, d->wake_buf[i],
  309. d->wake_buf[i]);
  310. if (ret != 0) {
  311. dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
  312. reg, ret);
  313. goto err_alloc;
  314. }
  315. }
  316. }
  317. if (irq_base)
  318. d->domain = irq_domain_add_legacy(map->dev->of_node,
  319. chip->num_irqs, irq_base, 0,
  320. &regmap_domain_ops, d);
  321. else
  322. d->domain = irq_domain_add_linear(map->dev->of_node,
  323. chip->num_irqs,
  324. &regmap_domain_ops, d);
  325. if (!d->domain) {
  326. dev_err(map->dev, "Failed to create IRQ domain\n");
  327. ret = -ENOMEM;
  328. goto err_alloc;
  329. }
  330. ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
  331. chip->name, d);
  332. if (ret != 0) {
  333. dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret);
  334. goto err_domain;
  335. }
  336. return 0;
  337. err_domain:
  338. /* Should really dispose of the domain but... */
  339. err_alloc:
  340. kfree(d->wake_buf);
  341. kfree(d->mask_buf_def);
  342. kfree(d->mask_buf);
  343. kfree(d->status_buf);
  344. kfree(d);
  345. return ret;
  346. }
  347. EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
  348. /**
  349. * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip
  350. *
  351. * @irq: Primary IRQ for the device
  352. * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip()
  353. */
  354. void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
  355. {
  356. if (!d)
  357. return;
  358. free_irq(irq, d);
  359. /* We should unmap the domain but... */
  360. kfree(d->wake_buf);
  361. kfree(d->mask_buf_def);
  362. kfree(d->mask_buf);
  363. kfree(d->status_buf);
  364. kfree(d);
  365. }
  366. EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
  367. /**
  368. * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
  369. *
  370. * Useful for drivers to request their own IRQs.
  371. *
  372. * @data: regmap_irq controller to operate on.
  373. */
  374. int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
  375. {
  376. WARN_ON(!data->irq_base);
  377. return data->irq_base;
  378. }
  379. EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
  380. /**
  381. * regmap_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ
  382. *
  383. * Useful for drivers to request their own IRQs.
  384. *
  385. * @data: regmap_irq controller to operate on.
  386. * @irq: index of the interrupt requested in the chip IRQs
  387. */
  388. int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
  389. {
  390. /* Handle holes in the IRQ list */
  391. if (!data->chip->irqs[irq].mask)
  392. return -EINVAL;
  393. return irq_create_mapping(data->domain, irq);
  394. }
  395. EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
  396. /**
  397. * regmap_irq_get_domain(): Retrieve the irq_domain for the chip
  398. *
  399. * Useful for drivers to request their own IRQs and for integration
  400. * with subsystems. For ease of integration NULL is accepted as a
  401. * domain, allowing devices to just call this even if no domain is
  402. * allocated.
  403. *
  404. * @data: regmap_irq controller to operate on.
  405. */
  406. struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
  407. {
  408. if (data)
  409. return data->domain;
  410. else
  411. return NULL;
  412. }
  413. EXPORT_SYMBOL_GPL(regmap_irq_get_domain);