irq.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728
  1. /*
  2. * Copyright (C) 2011 Texas Instruments Incorporated
  3. *
  4. * This borrows heavily from powerpc version, which is:
  5. *
  6. * Derived from arch/i386/kernel/irq.c
  7. * Copyright (C) 1992 Linus Torvalds
  8. * Adapted from arch/i386 by Gary Thomas
  9. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  10. * Updated and modified by Cort Dougan <cort@fsmlabs.com>
  11. * Copyright (C) 1996-2001 Cort Dougan
  12. * Adapted for Power Macintosh by Paul Mackerras
  13. * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
  14. *
  15. * This program is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU General Public License
  17. * as published by the Free Software Foundation; either version
  18. * 2 of the License, or (at your option) any later version.
  19. */
  20. #include <linux/slab.h>
  21. #include <linux/seq_file.h>
  22. #include <linux/radix-tree.h>
  23. #include <linux/module.h>
  24. #include <linux/of.h>
  25. #include <linux/of_irq.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/kernel_stat.h>
  28. #include <asm/megamod-pic.h>
  29. unsigned long irq_err_count;
  30. static DEFINE_RAW_SPINLOCK(core_irq_lock);
  31. static void mask_core_irq(struct irq_data *data)
  32. {
  33. unsigned int prio = data->irq;
  34. BUG_ON(prio < 4 || prio >= NR_PRIORITY_IRQS);
  35. raw_spin_lock(&core_irq_lock);
  36. and_creg(IER, ~(1 << prio));
  37. raw_spin_unlock(&core_irq_lock);
  38. }
  39. static void unmask_core_irq(struct irq_data *data)
  40. {
  41. unsigned int prio = data->irq;
  42. raw_spin_lock(&core_irq_lock);
  43. or_creg(IER, 1 << prio);
  44. raw_spin_unlock(&core_irq_lock);
  45. }
  46. static struct irq_chip core_chip = {
  47. .name = "core",
  48. .irq_mask = mask_core_irq,
  49. .irq_unmask = unmask_core_irq,
  50. };
  51. asmlinkage void c6x_do_IRQ(unsigned int prio, struct pt_regs *regs)
  52. {
  53. struct pt_regs *old_regs = set_irq_regs(regs);
  54. irq_enter();
  55. BUG_ON(prio < 4 || prio >= NR_PRIORITY_IRQS);
  56. generic_handle_irq(prio);
  57. irq_exit();
  58. set_irq_regs(old_regs);
  59. }
  60. static struct irq_host *core_host;
  61. static int core_host_map(struct irq_host *h, unsigned int virq,
  62. irq_hw_number_t hw)
  63. {
  64. if (hw < 4 || hw >= NR_PRIORITY_IRQS)
  65. return -EINVAL;
  66. irq_set_status_flags(virq, IRQ_LEVEL);
  67. irq_set_chip_and_handler(virq, &core_chip, handle_level_irq);
  68. return 0;
  69. }
  70. static struct irq_host_ops core_host_ops = {
  71. .map = core_host_map,
  72. };
  73. void __init init_IRQ(void)
  74. {
  75. struct device_node *np;
  76. /* Mask all priority IRQs */
  77. and_creg(IER, ~0xfff0);
  78. np = of_find_compatible_node(NULL, NULL, "ti,c64x+core-pic");
  79. if (np != NULL) {
  80. /* create the core host */
  81. core_host = irq_alloc_host(np, IRQ_HOST_MAP_PRIORITY, 0,
  82. &core_host_ops, 0);
  83. if (core_host)
  84. irq_set_default_host(core_host);
  85. of_node_put(np);
  86. }
  87. printk(KERN_INFO "Core interrupt controller initialized\n");
  88. /* now we're ready for other SoC controllers */
  89. megamod_pic_init();
  90. /* Clear all general IRQ flags */
  91. set_creg(ICR, 0xfff0);
  92. }
  93. void ack_bad_irq(int irq)
  94. {
  95. printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
  96. irq_err_count++;
  97. }
  98. int arch_show_interrupts(struct seq_file *p, int prec)
  99. {
  100. seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
  101. return 0;
  102. }
  103. /*
  104. * IRQ controller and virtual interrupts
  105. */
  106. /* The main irq map itself is an array of NR_IRQ entries containing the
  107. * associate host and irq number. An entry with a host of NULL is free.
  108. * An entry can be allocated if it's free, the allocator always then sets
  109. * hwirq first to the host's invalid irq number and then fills ops.
  110. */
  111. struct irq_map_entry {
  112. irq_hw_number_t hwirq;
  113. struct irq_host *host;
  114. };
  115. static LIST_HEAD(irq_hosts);
  116. static DEFINE_RAW_SPINLOCK(irq_big_lock);
  117. static DEFINE_MUTEX(revmap_trees_mutex);
  118. static struct irq_map_entry irq_map[NR_IRQS];
  119. static unsigned int irq_virq_count = NR_IRQS;
  120. static struct irq_host *irq_default_host;
  121. irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
  122. {
  123. return irq_map[d->irq].hwirq;
  124. }
  125. EXPORT_SYMBOL_GPL(irqd_to_hwirq);
  126. irq_hw_number_t virq_to_hw(unsigned int virq)
  127. {
  128. return irq_map[virq].hwirq;
  129. }
  130. EXPORT_SYMBOL_GPL(virq_to_hw);
  131. bool virq_is_host(unsigned int virq, struct irq_host *host)
  132. {
  133. return irq_map[virq].host == host;
  134. }
  135. EXPORT_SYMBOL_GPL(virq_is_host);
  136. static int default_irq_host_match(struct irq_host *h, struct device_node *np)
  137. {
  138. return h->of_node != NULL && h->of_node == np;
  139. }
  140. struct irq_host *irq_alloc_host(struct device_node *of_node,
  141. unsigned int revmap_type,
  142. unsigned int revmap_arg,
  143. struct irq_host_ops *ops,
  144. irq_hw_number_t inval_irq)
  145. {
  146. struct irq_host *host;
  147. unsigned int size = sizeof(struct irq_host);
  148. unsigned int i;
  149. unsigned int *rmap;
  150. unsigned long flags;
  151. /* Allocate structure and revmap table if using linear mapping */
  152. if (revmap_type == IRQ_HOST_MAP_LINEAR)
  153. size += revmap_arg * sizeof(unsigned int);
  154. host = kzalloc(size, GFP_KERNEL);
  155. if (host == NULL)
  156. return NULL;
  157. /* Fill structure */
  158. host->revmap_type = revmap_type;
  159. host->inval_irq = inval_irq;
  160. host->ops = ops;
  161. host->of_node = of_node_get(of_node);
  162. if (host->ops->match == NULL)
  163. host->ops->match = default_irq_host_match;
  164. raw_spin_lock_irqsave(&irq_big_lock, flags);
  165. /* Check for the priority controller. */
  166. if (revmap_type == IRQ_HOST_MAP_PRIORITY) {
  167. if (irq_map[0].host != NULL) {
  168. raw_spin_unlock_irqrestore(&irq_big_lock, flags);
  169. of_node_put(host->of_node);
  170. kfree(host);
  171. return NULL;
  172. }
  173. irq_map[0].host = host;
  174. }
  175. list_add(&host->link, &irq_hosts);
  176. raw_spin_unlock_irqrestore(&irq_big_lock, flags);
  177. /* Additional setups per revmap type */
  178. switch (revmap_type) {
  179. case IRQ_HOST_MAP_PRIORITY:
  180. /* 0 is always the invalid number for priority */
  181. host->inval_irq = 0;
  182. /* setup us as the host for all priority interrupts */
  183. for (i = 1; i < NR_PRIORITY_IRQS; i++) {
  184. irq_map[i].hwirq = i;
  185. smp_wmb();
  186. irq_map[i].host = host;
  187. smp_wmb();
  188. ops->map(host, i, i);
  189. }
  190. break;
  191. case IRQ_HOST_MAP_LINEAR:
  192. rmap = (unsigned int *)(host + 1);
  193. for (i = 0; i < revmap_arg; i++)
  194. rmap[i] = NO_IRQ;
  195. host->revmap_data.linear.size = revmap_arg;
  196. smp_wmb();
  197. host->revmap_data.linear.revmap = rmap;
  198. break;
  199. case IRQ_HOST_MAP_TREE:
  200. INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL);
  201. break;
  202. default:
  203. break;
  204. }
  205. pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
  206. return host;
  207. }
  208. struct irq_host *irq_find_host(struct device_node *node)
  209. {
  210. struct irq_host *h, *found = NULL;
  211. unsigned long flags;
  212. /* We might want to match the legacy controller last since
  213. * it might potentially be set to match all interrupts in
  214. * the absence of a device node. This isn't a problem so far
  215. * yet though...
  216. */
  217. raw_spin_lock_irqsave(&irq_big_lock, flags);
  218. list_for_each_entry(h, &irq_hosts, link)
  219. if (h->ops->match(h, node)) {
  220. found = h;
  221. break;
  222. }
  223. raw_spin_unlock_irqrestore(&irq_big_lock, flags);
  224. return found;
  225. }
  226. EXPORT_SYMBOL_GPL(irq_find_host);
  227. void irq_set_default_host(struct irq_host *host)
  228. {
  229. pr_debug("irq: Default host set to @0x%p\n", host);
  230. irq_default_host = host;
  231. }
  232. void irq_set_virq_count(unsigned int count)
  233. {
  234. pr_debug("irq: Trying to set virq count to %d\n", count);
  235. BUG_ON(count < NR_PRIORITY_IRQS);
  236. if (count < NR_IRQS)
  237. irq_virq_count = count;
  238. }
  239. static int irq_setup_virq(struct irq_host *host, unsigned int virq,
  240. irq_hw_number_t hwirq)
  241. {
  242. int res;
  243. res = irq_alloc_desc_at(virq, 0);
  244. if (res != virq) {
  245. pr_debug("irq: -> allocating desc failed\n");
  246. goto error;
  247. }
  248. /* map it */
  249. smp_wmb();
  250. irq_map[virq].hwirq = hwirq;
  251. smp_mb();
  252. if (host->ops->map(host, virq, hwirq)) {
  253. pr_debug("irq: -> mapping failed, freeing\n");
  254. goto errdesc;
  255. }
  256. irq_clear_status_flags(virq, IRQ_NOREQUEST);
  257. return 0;
  258. errdesc:
  259. irq_free_descs(virq, 1);
  260. error:
  261. irq_free_virt(virq, 1);
  262. return -1;
  263. }
  264. unsigned int irq_create_direct_mapping(struct irq_host *host)
  265. {
  266. unsigned int virq;
  267. if (host == NULL)
  268. host = irq_default_host;
  269. BUG_ON(host == NULL);
  270. WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
  271. virq = irq_alloc_virt(host, 1, 0);
  272. if (virq == NO_IRQ) {
  273. pr_debug("irq: create_direct virq allocation failed\n");
  274. return NO_IRQ;
  275. }
  276. pr_debug("irq: create_direct obtained virq %d\n", virq);
  277. if (irq_setup_virq(host, virq, virq))
  278. return NO_IRQ;
  279. return virq;
  280. }
  281. unsigned int irq_create_mapping(struct irq_host *host,
  282. irq_hw_number_t hwirq)
  283. {
  284. unsigned int virq, hint;
  285. pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
  286. /* Look for default host if nececssary */
  287. if (host == NULL)
  288. host = irq_default_host;
  289. if (host == NULL) {
  290. printk(KERN_WARNING "irq_create_mapping called for"
  291. " NULL host, hwirq=%lx\n", hwirq);
  292. WARN_ON(1);
  293. return NO_IRQ;
  294. }
  295. pr_debug("irq: -> using host @%p\n", host);
  296. /* Check if mapping already exists */
  297. virq = irq_find_mapping(host, hwirq);
  298. if (virq != NO_IRQ) {
  299. pr_debug("irq: -> existing mapping on virq %d\n", virq);
  300. return virq;
  301. }
  302. /* Allocate a virtual interrupt number */
  303. hint = hwirq % irq_virq_count;
  304. virq = irq_alloc_virt(host, 1, hint);
  305. if (virq == NO_IRQ) {
  306. pr_debug("irq: -> virq allocation failed\n");
  307. return NO_IRQ;
  308. }
  309. if (irq_setup_virq(host, virq, hwirq))
  310. return NO_IRQ;
  311. pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n",
  312. hwirq, host->of_node ? host->of_node->full_name : "null", virq);
  313. return virq;
  314. }
  315. EXPORT_SYMBOL_GPL(irq_create_mapping);
  316. unsigned int irq_create_of_mapping(struct device_node *controller,
  317. const u32 *intspec, unsigned int intsize)
  318. {
  319. struct irq_host *host;
  320. irq_hw_number_t hwirq;
  321. unsigned int type = IRQ_TYPE_NONE;
  322. unsigned int virq;
  323. if (controller == NULL)
  324. host = irq_default_host;
  325. else
  326. host = irq_find_host(controller);
  327. if (host == NULL) {
  328. printk(KERN_WARNING "irq: no irq host found for %s !\n",
  329. controller->full_name);
  330. return NO_IRQ;
  331. }
  332. /* If host has no translation, then we assume interrupt line */
  333. if (host->ops->xlate == NULL)
  334. hwirq = intspec[0];
  335. else {
  336. if (host->ops->xlate(host, controller, intspec, intsize,
  337. &hwirq, &type))
  338. return NO_IRQ;
  339. }
  340. /* Create mapping */
  341. virq = irq_create_mapping(host, hwirq);
  342. if (virq == NO_IRQ)
  343. return virq;
  344. /* Set type if specified and different than the current one */
  345. if (type != IRQ_TYPE_NONE &&
  346. type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
  347. irq_set_irq_type(virq, type);
  348. return virq;
  349. }
  350. EXPORT_SYMBOL_GPL(irq_create_of_mapping);
  351. void irq_dispose_mapping(unsigned int virq)
  352. {
  353. struct irq_host *host;
  354. irq_hw_number_t hwirq;
  355. if (virq == NO_IRQ)
  356. return;
  357. /* Never unmap priority interrupts */
  358. if (virq < NR_PRIORITY_IRQS)
  359. return;
  360. host = irq_map[virq].host;
  361. if (WARN_ON(host == NULL))
  362. return;
  363. irq_set_status_flags(virq, IRQ_NOREQUEST);
  364. /* remove chip and handler */
  365. irq_set_chip_and_handler(virq, NULL, NULL);
  366. /* Make sure it's completed */
  367. synchronize_irq(virq);
  368. /* Tell the PIC about it */
  369. if (host->ops->unmap)
  370. host->ops->unmap(host, virq);
  371. smp_mb();
  372. /* Clear reverse map */
  373. hwirq = irq_map[virq].hwirq;
  374. switch (host->revmap_type) {
  375. case IRQ_HOST_MAP_LINEAR:
  376. if (hwirq < host->revmap_data.linear.size)
  377. host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
  378. break;
  379. case IRQ_HOST_MAP_TREE:
  380. mutex_lock(&revmap_trees_mutex);
  381. radix_tree_delete(&host->revmap_data.tree, hwirq);
  382. mutex_unlock(&revmap_trees_mutex);
  383. break;
  384. }
  385. /* Destroy map */
  386. smp_mb();
  387. irq_map[virq].hwirq = host->inval_irq;
  388. irq_free_descs(virq, 1);
  389. /* Free it */
  390. irq_free_virt(virq, 1);
  391. }
  392. EXPORT_SYMBOL_GPL(irq_dispose_mapping);
  393. unsigned int irq_find_mapping(struct irq_host *host,
  394. irq_hw_number_t hwirq)
  395. {
  396. unsigned int i;
  397. unsigned int hint = hwirq % irq_virq_count;
  398. /* Look for default host if nececssary */
  399. if (host == NULL)
  400. host = irq_default_host;
  401. if (host == NULL)
  402. return NO_IRQ;
  403. /* Slow path does a linear search of the map */
  404. i = hint;
  405. do {
  406. if (irq_map[i].host == host &&
  407. irq_map[i].hwirq == hwirq)
  408. return i;
  409. i++;
  410. if (i >= irq_virq_count)
  411. i = 4;
  412. } while (i != hint);
  413. return NO_IRQ;
  414. }
  415. EXPORT_SYMBOL_GPL(irq_find_mapping);
  416. unsigned int irq_radix_revmap_lookup(struct irq_host *host,
  417. irq_hw_number_t hwirq)
  418. {
  419. struct irq_map_entry *ptr;
  420. unsigned int virq;
  421. if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE))
  422. return irq_find_mapping(host, hwirq);
  423. /*
  424. * The ptr returned references the static global irq_map.
  425. * but freeing an irq can delete nodes along the path to
  426. * do the lookup via call_rcu.
  427. */
  428. rcu_read_lock();
  429. ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
  430. rcu_read_unlock();
  431. /*
  432. * If found in radix tree, then fine.
  433. * Else fallback to linear lookup - this should not happen in practice
  434. * as it means that we failed to insert the node in the radix tree.
  435. */
  436. if (ptr)
  437. virq = ptr - irq_map;
  438. else
  439. virq = irq_find_mapping(host, hwirq);
  440. return virq;
  441. }
  442. void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
  443. irq_hw_number_t hwirq)
  444. {
  445. if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE))
  446. return;
  447. if (virq != NO_IRQ) {
  448. mutex_lock(&revmap_trees_mutex);
  449. radix_tree_insert(&host->revmap_data.tree, hwirq,
  450. &irq_map[virq]);
  451. mutex_unlock(&revmap_trees_mutex);
  452. }
  453. }
  454. unsigned int irq_linear_revmap(struct irq_host *host,
  455. irq_hw_number_t hwirq)
  456. {
  457. unsigned int *revmap;
  458. if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR))
  459. return irq_find_mapping(host, hwirq);
  460. /* Check revmap bounds */
  461. if (unlikely(hwirq >= host->revmap_data.linear.size))
  462. return irq_find_mapping(host, hwirq);
  463. /* Check if revmap was allocated */
  464. revmap = host->revmap_data.linear.revmap;
  465. if (unlikely(revmap == NULL))
  466. return irq_find_mapping(host, hwirq);
  467. /* Fill up revmap with slow path if no mapping found */
  468. if (unlikely(revmap[hwirq] == NO_IRQ))
  469. revmap[hwirq] = irq_find_mapping(host, hwirq);
  470. return revmap[hwirq];
  471. }
  472. unsigned int irq_alloc_virt(struct irq_host *host,
  473. unsigned int count,
  474. unsigned int hint)
  475. {
  476. unsigned long flags;
  477. unsigned int i, j, found = NO_IRQ;
  478. if (count == 0 || count > (irq_virq_count - NR_PRIORITY_IRQS))
  479. return NO_IRQ;
  480. raw_spin_lock_irqsave(&irq_big_lock, flags);
  481. /* Use hint for 1 interrupt if any */
  482. if (count == 1 && hint >= NR_PRIORITY_IRQS &&
  483. hint < irq_virq_count && irq_map[hint].host == NULL) {
  484. found = hint;
  485. goto hint_found;
  486. }
  487. /* Look for count consecutive numbers in the allocatable
  488. * (non-legacy) space
  489. */
  490. for (i = NR_PRIORITY_IRQS, j = 0; i < irq_virq_count; i++) {
  491. if (irq_map[i].host != NULL)
  492. j = 0;
  493. else
  494. j++;
  495. if (j == count) {
  496. found = i - count + 1;
  497. break;
  498. }
  499. }
  500. if (found == NO_IRQ) {
  501. raw_spin_unlock_irqrestore(&irq_big_lock, flags);
  502. return NO_IRQ;
  503. }
  504. hint_found:
  505. for (i = found; i < (found + count); i++) {
  506. irq_map[i].hwirq = host->inval_irq;
  507. smp_wmb();
  508. irq_map[i].host = host;
  509. }
  510. raw_spin_unlock_irqrestore(&irq_big_lock, flags);
  511. return found;
  512. }
  513. void irq_free_virt(unsigned int virq, unsigned int count)
  514. {
  515. unsigned long flags;
  516. unsigned int i;
  517. WARN_ON(virq < NR_PRIORITY_IRQS);
  518. WARN_ON(count == 0 || (virq + count) > irq_virq_count);
  519. if (virq < NR_PRIORITY_IRQS) {
  520. if (virq + count < NR_PRIORITY_IRQS)
  521. return;
  522. count -= NR_PRIORITY_IRQS - virq;
  523. virq = NR_PRIORITY_IRQS;
  524. }
  525. if (count > irq_virq_count || virq > irq_virq_count - count) {
  526. if (virq > irq_virq_count)
  527. return;
  528. count = irq_virq_count - virq;
  529. }
  530. raw_spin_lock_irqsave(&irq_big_lock, flags);
  531. for (i = virq; i < (virq + count); i++) {
  532. struct irq_host *host;
  533. host = irq_map[i].host;
  534. irq_map[i].hwirq = host->inval_irq;
  535. smp_wmb();
  536. irq_map[i].host = NULL;
  537. }
  538. raw_spin_unlock_irqrestore(&irq_big_lock, flags);
  539. }
  540. #ifdef CONFIG_VIRQ_DEBUG
  541. static int virq_debug_show(struct seq_file *m, void *private)
  542. {
  543. unsigned long flags;
  544. struct irq_desc *desc;
  545. const char *p;
  546. static const char none[] = "none";
  547. void *data;
  548. int i;
  549. seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq",
  550. "chip name", "chip data", "host name");
  551. for (i = 1; i < nr_irqs; i++) {
  552. desc = irq_to_desc(i);
  553. if (!desc)
  554. continue;
  555. raw_spin_lock_irqsave(&desc->lock, flags);
  556. if (desc->action && desc->action->handler) {
  557. struct irq_chip *chip;
  558. seq_printf(m, "%5d ", i);
  559. seq_printf(m, "0x%05lx ", irq_map[i].hwirq);
  560. chip = irq_desc_get_chip(desc);
  561. if (chip && chip->name)
  562. p = chip->name;
  563. else
  564. p = none;
  565. seq_printf(m, "%-15s ", p);
  566. data = irq_desc_get_chip_data(desc);
  567. seq_printf(m, "0x%16p ", data);
  568. if (irq_map[i].host && irq_map[i].host->of_node)
  569. p = irq_map[i].host->of_node->full_name;
  570. else
  571. p = none;
  572. seq_printf(m, "%s\n", p);
  573. }
  574. raw_spin_unlock_irqrestore(&desc->lock, flags);
  575. }
  576. return 0;
  577. }
  578. static int virq_debug_open(struct inode *inode, struct file *file)
  579. {
  580. return single_open(file, virq_debug_show, inode->i_private);
  581. }
  582. static const struct file_operations virq_debug_fops = {
  583. .open = virq_debug_open,
  584. .read = seq_read,
  585. .llseek = seq_lseek,
  586. .release = single_release,
  587. };
  588. static int __init irq_debugfs_init(void)
  589. {
  590. if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
  591. NULL, &virq_debug_fops) == NULL)
  592. return -ENOMEM;
  593. return 0;
  594. }
  595. device_initcall(irq_debugfs_init);
  596. #endif /* CONFIG_VIRQ_DEBUG */