regmap-irq.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595
  1. /*
  2. * regmap based irq_chip
  3. *
  4. * Copyright 2011 Wolfson Microelectronics plc
  5. *
  6. * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/export.h>
  13. #include <linux/device.h>
  14. #include <linux/regmap.h>
  15. #include <linux/irq.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/irqdomain.h>
  18. #include <linux/pm_runtime.h>
  19. #include <linux/slab.h>
  20. #include "internal.h"
  21. struct regmap_irq_chip_data {
  22. struct mutex lock;
  23. struct irq_chip irq_chip;
  24. struct regmap *map;
  25. const struct regmap_irq_chip *chip;
  26. int irq_base;
  27. struct irq_domain *domain;
  28. int irq;
  29. int wake_count;
  30. void *status_reg_buf;
  31. unsigned int *status_buf;
  32. unsigned int *mask_buf;
  33. unsigned int *mask_buf_def;
  34. unsigned int *wake_buf;
  35. unsigned int irq_reg_stride;
  36. };
  37. static inline const
  38. struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
  39. int irq)
  40. {
  41. return &data->chip->irqs[irq];
  42. }
  43. static void regmap_irq_lock(struct irq_data *data)
  44. {
  45. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  46. mutex_lock(&d->lock);
  47. }
  48. static void regmap_irq_sync_unlock(struct irq_data *data)
  49. {
  50. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  51. struct regmap *map = d->map;
  52. int i, ret;
  53. u32 reg;
  54. if (d->chip->runtime_pm) {
  55. ret = pm_runtime_get_sync(map->dev);
  56. if (ret < 0)
  57. dev_err(map->dev, "IRQ sync failed to resume: %d\n",
  58. ret);
  59. }
  60. /*
  61. * If there's been a change in the mask write it back to the
  62. * hardware. We rely on the use of the regmap core cache to
  63. * suppress pointless writes.
  64. */
  65. for (i = 0; i < d->chip->num_regs; i++) {
  66. reg = d->chip->mask_base +
  67. (i * map->reg_stride * d->irq_reg_stride);
  68. if (d->chip->mask_invert)
  69. ret = regmap_update_bits(d->map, reg,
  70. d->mask_buf_def[i], ~d->mask_buf[i]);
  71. else
  72. ret = regmap_update_bits(d->map, reg,
  73. d->mask_buf_def[i], d->mask_buf[i]);
  74. if (ret != 0)
  75. dev_err(d->map->dev, "Failed to sync masks in %x\n",
  76. reg);
  77. reg = d->chip->wake_base +
  78. (i * map->reg_stride * d->irq_reg_stride);
  79. if (d->wake_buf) {
  80. if (d->chip->wake_invert)
  81. ret = regmap_update_bits(d->map, reg,
  82. d->mask_buf_def[i],
  83. ~d->wake_buf[i]);
  84. else
  85. ret = regmap_update_bits(d->map, reg,
  86. d->mask_buf_def[i],
  87. d->wake_buf[i]);
  88. if (ret != 0)
  89. dev_err(d->map->dev,
  90. "Failed to sync wakes in %x: %d\n",
  91. reg, ret);
  92. }
  93. if (!d->chip->init_ack_masked)
  94. continue;
  95. /*
  96. * Ack all the masked interrupts uncondictionly,
  97. * OR if there is masked interrupt which hasn't been Acked,
  98. * it'll be ignored in irq handler, then may introduce irq storm
  99. */
  100. if (d->mask_buf[i] && d->chip->ack_base) {
  101. reg = d->chip->ack_base +
  102. (i * map->reg_stride * d->irq_reg_stride);
  103. ret = regmap_write(map, reg, d->mask_buf[i]);
  104. if (ret != 0)
  105. dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
  106. reg, ret);
  107. }
  108. }
  109. if (d->chip->runtime_pm)
  110. pm_runtime_put(map->dev);
  111. /* If we've changed our wakeup count propagate it to the parent */
  112. if (d->wake_count < 0)
  113. for (i = d->wake_count; i < 0; i++)
  114. irq_set_irq_wake(d->irq, 0);
  115. else if (d->wake_count > 0)
  116. for (i = 0; i < d->wake_count; i++)
  117. irq_set_irq_wake(d->irq, 1);
  118. d->wake_count = 0;
  119. mutex_unlock(&d->lock);
  120. }
  121. static void regmap_irq_enable(struct irq_data *data)
  122. {
  123. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  124. struct regmap *map = d->map;
  125. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  126. d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask;
  127. }
  128. static void regmap_irq_disable(struct irq_data *data)
  129. {
  130. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  131. struct regmap *map = d->map;
  132. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  133. d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
  134. }
  135. static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
  136. {
  137. struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
  138. struct regmap *map = d->map;
  139. const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
  140. if (on) {
  141. if (d->wake_buf)
  142. d->wake_buf[irq_data->reg_offset / map->reg_stride]
  143. &= ~irq_data->mask;
  144. d->wake_count++;
  145. } else {
  146. if (d->wake_buf)
  147. d->wake_buf[irq_data->reg_offset / map->reg_stride]
  148. |= irq_data->mask;
  149. d->wake_count--;
  150. }
  151. return 0;
  152. }
  153. static const struct irq_chip regmap_irq_chip = {
  154. .irq_bus_lock = regmap_irq_lock,
  155. .irq_bus_sync_unlock = regmap_irq_sync_unlock,
  156. .irq_disable = regmap_irq_disable,
  157. .irq_enable = regmap_irq_enable,
  158. .irq_set_wake = regmap_irq_set_wake,
  159. };
  160. static irqreturn_t regmap_irq_thread(int irq, void *d)
  161. {
  162. struct regmap_irq_chip_data *data = d;
  163. const struct regmap_irq_chip *chip = data->chip;
  164. struct regmap *map = data->map;
  165. int ret, i;
  166. bool handled = false;
  167. u32 reg;
  168. if (chip->runtime_pm) {
  169. ret = pm_runtime_get_sync(map->dev);
  170. if (ret < 0) {
  171. dev_err(map->dev, "IRQ thread failed to resume: %d\n",
  172. ret);
  173. pm_runtime_put(map->dev);
  174. return IRQ_NONE;
  175. }
  176. }
  177. /*
  178. * Read in the statuses, using a single bulk read if possible
  179. * in order to reduce the I/O overheads.
  180. */
  181. if (!map->use_single_rw && map->reg_stride == 1 &&
  182. data->irq_reg_stride == 1) {
  183. u8 *buf8 = data->status_reg_buf;
  184. u16 *buf16 = data->status_reg_buf;
  185. u32 *buf32 = data->status_reg_buf;
  186. BUG_ON(!data->status_reg_buf);
  187. ret = regmap_bulk_read(map, chip->status_base,
  188. data->status_reg_buf,
  189. chip->num_regs);
  190. if (ret != 0) {
  191. dev_err(map->dev, "Failed to read IRQ status: %d\n",
  192. ret);
  193. return IRQ_NONE;
  194. }
  195. for (i = 0; i < data->chip->num_regs; i++) {
  196. switch (map->format.val_bytes) {
  197. case 1:
  198. data->status_buf[i] = buf8[i];
  199. break;
  200. case 2:
  201. data->status_buf[i] = buf16[i];
  202. break;
  203. case 4:
  204. data->status_buf[i] = buf32[i];
  205. break;
  206. default:
  207. BUG();
  208. return IRQ_NONE;
  209. }
  210. }
  211. } else {
  212. for (i = 0; i < data->chip->num_regs; i++) {
  213. ret = regmap_read(map, chip->status_base +
  214. (i * map->reg_stride
  215. * data->irq_reg_stride),
  216. &data->status_buf[i]);
  217. if (ret != 0) {
  218. dev_err(map->dev,
  219. "Failed to read IRQ status: %d\n",
  220. ret);
  221. if (chip->runtime_pm)
  222. pm_runtime_put(map->dev);
  223. return IRQ_NONE;
  224. }
  225. }
  226. }
  227. /*
  228. * Ignore masked IRQs and ack if we need to; we ack early so
  229. * there is no race between handling and acknowleding the
  230. * interrupt. We assume that typically few of the interrupts
  231. * will fire simultaneously so don't worry about overhead from
  232. * doing a write per register.
  233. */
  234. for (i = 0; i < data->chip->num_regs; i++) {
  235. data->status_buf[i] &= ~data->mask_buf[i];
  236. if (data->status_buf[i] && chip->ack_base) {
  237. reg = chip->ack_base +
  238. (i * map->reg_stride * data->irq_reg_stride);
  239. ret = regmap_write(map, reg, data->status_buf[i]);
  240. if (ret != 0)
  241. dev_err(map->dev, "Failed to ack 0x%x: %d\n",
  242. reg, ret);
  243. }
  244. }
  245. for (i = 0; i < chip->num_irqs; i++) {
  246. if (data->status_buf[chip->irqs[i].reg_offset /
  247. map->reg_stride] & chip->irqs[i].mask) {
  248. handle_nested_irq(irq_find_mapping(data->domain, i));
  249. handled = true;
  250. }
  251. }
  252. if (chip->runtime_pm)
  253. pm_runtime_put(map->dev);
  254. if (handled)
  255. return IRQ_HANDLED;
  256. else
  257. return IRQ_NONE;
  258. }
  259. static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
  260. irq_hw_number_t hw)
  261. {
  262. struct regmap_irq_chip_data *data = h->host_data;
  263. irq_set_chip_data(virq, data);
  264. irq_set_chip(virq, &data->irq_chip);
  265. irq_set_nested_thread(virq, 1);
  266. /* ARM needs us to explicitly flag the IRQ as valid
  267. * and will set them noprobe when we do so. */
  268. #ifdef CONFIG_ARM
  269. set_irq_flags(virq, IRQF_VALID);
  270. #else
  271. irq_set_noprobe(virq);
  272. #endif
  273. return 0;
  274. }
  275. static struct irq_domain_ops regmap_domain_ops = {
  276. .map = regmap_irq_map,
  277. .xlate = irq_domain_xlate_twocell,
  278. };
  279. /**
  280. * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
  281. *
  282. * map: The regmap for the device.
  283. * irq: The IRQ the device uses to signal interrupts
  284. * irq_flags: The IRQF_ flags to use for the primary interrupt.
  285. * chip: Configuration for the interrupt controller.
  286. * data: Runtime data structure for the controller, allocated on success
  287. *
  288. * Returns 0 on success or an errno on failure.
  289. *
  290. * In order for this to be efficient the chip really should use a
  291. * register cache. The chip driver is responsible for restoring the
  292. * register values used by the IRQ controller over suspend and resume.
  293. */
  294. int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
  295. int irq_base, const struct regmap_irq_chip *chip,
  296. struct regmap_irq_chip_data **data)
  297. {
  298. struct regmap_irq_chip_data *d;
  299. int i;
  300. int ret = -ENOMEM;
  301. u32 reg;
  302. for (i = 0; i < chip->num_irqs; i++) {
  303. if (chip->irqs[i].reg_offset % map->reg_stride)
  304. return -EINVAL;
  305. if (chip->irqs[i].reg_offset / map->reg_stride >=
  306. chip->num_regs)
  307. return -EINVAL;
  308. }
  309. if (irq_base) {
  310. irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
  311. if (irq_base < 0) {
  312. dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
  313. irq_base);
  314. return irq_base;
  315. }
  316. }
  317. d = kzalloc(sizeof(*d), GFP_KERNEL);
  318. if (!d)
  319. return -ENOMEM;
  320. *data = d;
  321. d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
  322. GFP_KERNEL);
  323. if (!d->status_buf)
  324. goto err_alloc;
  325. d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
  326. GFP_KERNEL);
  327. if (!d->mask_buf)
  328. goto err_alloc;
  329. d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs,
  330. GFP_KERNEL);
  331. if (!d->mask_buf_def)
  332. goto err_alloc;
  333. if (chip->wake_base) {
  334. d->wake_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
  335. GFP_KERNEL);
  336. if (!d->wake_buf)
  337. goto err_alloc;
  338. }
  339. d->irq_chip = regmap_irq_chip;
  340. d->irq_chip.name = chip->name;
  341. d->irq = irq;
  342. d->map = map;
  343. d->chip = chip;
  344. d->irq_base = irq_base;
  345. if (chip->irq_reg_stride)
  346. d->irq_reg_stride = chip->irq_reg_stride;
  347. else
  348. d->irq_reg_stride = 1;
  349. if (!map->use_single_rw && map->reg_stride == 1 &&
  350. d->irq_reg_stride == 1) {
  351. d->status_reg_buf = kmalloc(map->format.val_bytes *
  352. chip->num_regs, GFP_KERNEL);
  353. if (!d->status_reg_buf)
  354. goto err_alloc;
  355. }
  356. mutex_init(&d->lock);
  357. for (i = 0; i < chip->num_irqs; i++)
  358. d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
  359. |= chip->irqs[i].mask;
  360. /* Mask all the interrupts by default */
  361. for (i = 0; i < chip->num_regs; i++) {
  362. d->mask_buf[i] = d->mask_buf_def[i];
  363. reg = chip->mask_base +
  364. (i * map->reg_stride * d->irq_reg_stride);
  365. if (chip->mask_invert)
  366. ret = regmap_update_bits(map, reg,
  367. d->mask_buf[i], ~d->mask_buf[i]);
  368. else
  369. ret = regmap_update_bits(map, reg,
  370. d->mask_buf[i], d->mask_buf[i]);
  371. if (ret != 0) {
  372. dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
  373. reg, ret);
  374. goto err_alloc;
  375. }
  376. if (!chip->init_ack_masked)
  377. continue;
  378. /* Ack masked but set interrupts */
  379. reg = chip->status_base +
  380. (i * map->reg_stride * d->irq_reg_stride);
  381. ret = regmap_read(map, reg, &d->status_buf[i]);
  382. if (ret != 0) {
  383. dev_err(map->dev, "Failed to read IRQ status: %d\n",
  384. ret);
  385. goto err_alloc;
  386. }
  387. if (d->status_buf[i] && chip->ack_base) {
  388. reg = chip->ack_base +
  389. (i * map->reg_stride * d->irq_reg_stride);
  390. ret = regmap_write(map, reg,
  391. d->status_buf[i] & d->mask_buf[i]);
  392. if (ret != 0) {
  393. dev_err(map->dev, "Failed to ack 0x%x: %d\n",
  394. reg, ret);
  395. goto err_alloc;
  396. }
  397. }
  398. }
  399. /* Wake is disabled by default */
  400. if (d->wake_buf) {
  401. for (i = 0; i < chip->num_regs; i++) {
  402. d->wake_buf[i] = d->mask_buf_def[i];
  403. reg = chip->wake_base +
  404. (i * map->reg_stride * d->irq_reg_stride);
  405. if (chip->wake_invert)
  406. ret = regmap_update_bits(map, reg,
  407. d->mask_buf_def[i],
  408. 0);
  409. else
  410. ret = regmap_update_bits(map, reg,
  411. d->mask_buf_def[i],
  412. d->wake_buf[i]);
  413. if (ret != 0) {
  414. dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
  415. reg, ret);
  416. goto err_alloc;
  417. }
  418. }
  419. }
  420. if (irq_base)
  421. d->domain = irq_domain_add_legacy(map->dev->of_node,
  422. chip->num_irqs, irq_base, 0,
  423. &regmap_domain_ops, d);
  424. else
  425. d->domain = irq_domain_add_linear(map->dev->of_node,
  426. chip->num_irqs,
  427. &regmap_domain_ops, d);
  428. if (!d->domain) {
  429. dev_err(map->dev, "Failed to create IRQ domain\n");
  430. ret = -ENOMEM;
  431. goto err_alloc;
  432. }
  433. ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
  434. chip->name, d);
  435. if (ret != 0) {
  436. dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
  437. irq, chip->name, ret);
  438. goto err_domain;
  439. }
  440. return 0;
  441. err_domain:
  442. /* Should really dispose of the domain but... */
  443. err_alloc:
  444. kfree(d->wake_buf);
  445. kfree(d->mask_buf_def);
  446. kfree(d->mask_buf);
  447. kfree(d->status_buf);
  448. kfree(d->status_reg_buf);
  449. kfree(d);
  450. return ret;
  451. }
  452. EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
  453. /**
  454. * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip
  455. *
  456. * @irq: Primary IRQ for the device
  457. * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip()
  458. */
  459. void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
  460. {
  461. if (!d)
  462. return;
  463. free_irq(irq, d);
  464. /* We should unmap the domain but... */
  465. kfree(d->wake_buf);
  466. kfree(d->mask_buf_def);
  467. kfree(d->mask_buf);
  468. kfree(d->status_reg_buf);
  469. kfree(d->status_buf);
  470. kfree(d);
  471. }
  472. EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
  473. /**
  474. * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
  475. *
  476. * Useful for drivers to request their own IRQs.
  477. *
  478. * @data: regmap_irq controller to operate on.
  479. */
  480. int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
  481. {
  482. WARN_ON(!data->irq_base);
  483. return data->irq_base;
  484. }
  485. EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
  486. /**
  487. * regmap_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ
  488. *
  489. * Useful for drivers to request their own IRQs.
  490. *
  491. * @data: regmap_irq controller to operate on.
  492. * @irq: index of the interrupt requested in the chip IRQs
  493. */
  494. int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
  495. {
  496. /* Handle holes in the IRQ list */
  497. if (!data->chip->irqs[irq].mask)
  498. return -EINVAL;
  499. return irq_create_mapping(data->domain, irq);
  500. }
  501. EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
  502. /**
  503. * regmap_irq_get_domain(): Retrieve the irq_domain for the chip
  504. *
  505. * Useful for drivers to request their own IRQs and for integration
  506. * with subsystems. For ease of integration NULL is accepted as a
  507. * domain, allowing devices to just call this even if no domain is
  508. * allocated.
  509. *
  510. * @data: regmap_irq controller to operate on.
  511. */
  512. struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
  513. {
  514. if (data)
  515. return data->domain;
  516. else
  517. return NULL;
  518. }
  519. EXPORT_SYMBOL_GPL(regmap_irq_get_domain);