irq.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764
  1. /* irq.c: FRV IRQ handling
  2. *
  3. * Copyright (C) 2003, 2004 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. /*
  12. * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
  13. *
  14. * IRQs are in fact implemented a bit like signal handlers for the kernel.
  15. * Naturally it's not a 1:1 relation, but there are similarities.
  16. */
  17. #include <linux/config.h>
  18. #include <linux/ptrace.h>
  19. #include <linux/errno.h>
  20. #include <linux/signal.h>
  21. #include <linux/sched.h>
  22. #include <linux/ioport.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/timex.h>
  25. #include <linux/slab.h>
  26. #include <linux/random.h>
  27. #include <linux/smp_lock.h>
  28. #include <linux/init.h>
  29. #include <linux/kernel_stat.h>
  30. #include <linux/irq.h>
  31. #include <linux/proc_fs.h>
  32. #include <linux/seq_file.h>
  33. #include <asm/atomic.h>
  34. #include <asm/io.h>
  35. #include <asm/smp.h>
  36. #include <asm/system.h>
  37. #include <asm/bitops.h>
  38. #include <asm/uaccess.h>
  39. #include <asm/pgalloc.h>
  40. #include <asm/delay.h>
  41. #include <asm/irq.h>
  42. #include <asm/irc-regs.h>
  43. #include <asm/irq-routing.h>
  44. #include <asm/gdb-stub.h>
  45. extern void __init fpga_init(void);
  46. extern void __init route_mb93493_irqs(void);
  47. static void register_irq_proc (unsigned int irq);
  48. /*
  49. * Special irq handlers.
  50. */
  51. irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs) { return IRQ_HANDLED; }
  52. atomic_t irq_err_count;
  53. /*
  54. * Generic, controller-independent functions:
  55. */
  56. int show_interrupts(struct seq_file *p, void *v)
  57. {
  58. struct irqaction *action;
  59. struct irq_group *group;
  60. unsigned long flags;
  61. int level, grp, ix, i, j;
  62. i = *(loff_t *) v;
  63. switch (i) {
  64. case 0:
  65. seq_printf(p, " ");
  66. for (j = 0; j < NR_CPUS; j++)
  67. if (cpu_online(j))
  68. seq_printf(p, "CPU%d ",j);
  69. seq_putc(p, '\n');
  70. break;
  71. case 1 ... NR_IRQ_GROUPS * NR_IRQ_ACTIONS_PER_GROUP:
  72. local_irq_save(flags);
  73. grp = (i - 1) / NR_IRQ_ACTIONS_PER_GROUP;
  74. group = irq_groups[grp];
  75. if (!group)
  76. goto skip;
  77. ix = (i - 1) % NR_IRQ_ACTIONS_PER_GROUP;
  78. action = group->actions[ix];
  79. if (!action)
  80. goto skip;
  81. seq_printf(p, "%3d: ", i - 1);
  82. #ifndef CONFIG_SMP
  83. seq_printf(p, "%10u ", kstat_irqs(i));
  84. #else
  85. for (j = 0; j < NR_CPUS; j++)
  86. if (cpu_online(j))
  87. seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]);
  88. #endif
  89. level = group->sources[ix]->level - frv_irq_levels;
  90. seq_printf(p, " %12s@%x", group->sources[ix]->muxname, level);
  91. seq_printf(p, " %s", action->name);
  92. for (action = action->next; action; action = action->next)
  93. seq_printf(p, ", %s", action->name);
  94. seq_putc(p, '\n');
  95. skip:
  96. local_irq_restore(flags);
  97. break;
  98. case NR_IRQ_GROUPS * NR_IRQ_ACTIONS_PER_GROUP + 1:
  99. seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
  100. break;
  101. default:
  102. break;
  103. }
  104. return 0;
  105. }
  106. /*
  107. * Generic enable/disable code: this just calls
  108. * down into the PIC-specific version for the actual
  109. * hardware disable after having gotten the irq
  110. * controller lock.
  111. */
  112. /**
  113. * disable_irq_nosync - disable an irq without waiting
  114. * @irq: Interrupt to disable
  115. *
  116. * Disable the selected interrupt line. Disables and Enables are
  117. * nested.
  118. * Unlike disable_irq(), this function does not ensure existing
  119. * instances of the IRQ handler have completed before returning.
  120. *
  121. * This function may be called from IRQ context.
  122. */
  123. void disable_irq_nosync(unsigned int irq)
  124. {
  125. struct irq_source *source;
  126. struct irq_group *group;
  127. struct irq_level *level;
  128. unsigned long flags;
  129. int idx = irq & (NR_IRQ_ACTIONS_PER_GROUP - 1);
  130. group = irq_groups[irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP];
  131. if (!group)
  132. BUG();
  133. source = group->sources[idx];
  134. if (!source)
  135. BUG();
  136. level = source->level;
  137. spin_lock_irqsave(&level->lock, flags);
  138. if (group->control) {
  139. if (!group->disable_cnt[idx]++)
  140. group->control(group, idx, 0);
  141. } else if (!level->disable_count++) {
  142. __set_MASK(level - frv_irq_levels);
  143. }
  144. spin_unlock_irqrestore(&level->lock, flags);
  145. }
  146. /**
  147. * disable_irq - disable an irq and wait for completion
  148. * @irq: Interrupt to disable
  149. *
  150. * Disable the selected interrupt line. Enables and Disables are
  151. * nested.
  152. * This function waits for any pending IRQ handlers for this interrupt
  153. * to complete before returning. If you use this function while
  154. * holding a resource the IRQ handler may need you will deadlock.
  155. *
  156. * This function may be called - with care - from IRQ context.
  157. */
  158. void disable_irq(unsigned int irq)
  159. {
  160. disable_irq_nosync(irq);
  161. #ifdef CONFIG_SMP
  162. if (!local_irq_count(smp_processor_id())) {
  163. do {
  164. barrier();
  165. } while (irq_desc[irq].status & IRQ_INPROGRESS);
  166. }
  167. #endif
  168. }
  169. /**
  170. * enable_irq - enable handling of an irq
  171. * @irq: Interrupt to enable
  172. *
  173. * Undoes the effect of one call to disable_irq(). If this
  174. * matches the last disable, processing of interrupts on this
  175. * IRQ line is re-enabled.
  176. *
  177. * This function may be called from IRQ context.
  178. */
  179. void enable_irq(unsigned int irq)
  180. {
  181. struct irq_source *source;
  182. struct irq_group *group;
  183. struct irq_level *level;
  184. unsigned long flags;
  185. int idx = irq & (NR_IRQ_ACTIONS_PER_GROUP - 1);
  186. int count;
  187. group = irq_groups[irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP];
  188. if (!group)
  189. BUG();
  190. source = group->sources[idx];
  191. if (!source)
  192. BUG();
  193. level = source->level;
  194. spin_lock_irqsave(&level->lock, flags);
  195. if (group->control)
  196. count = group->disable_cnt[idx];
  197. else
  198. count = level->disable_count;
  199. switch (count) {
  200. case 1:
  201. if (group->control) {
  202. if (group->actions[idx])
  203. group->control(group, idx, 1);
  204. } else {
  205. if (level->usage)
  206. __clr_MASK(level - frv_irq_levels);
  207. }
  208. /* fall-through */
  209. default:
  210. count--;
  211. break;
  212. case 0:
  213. printk("enable_irq(%u) unbalanced from %p\n", irq, __builtin_return_address(0));
  214. }
  215. if (group->control)
  216. group->disable_cnt[idx] = count;
  217. else
  218. level->disable_count = count;
  219. spin_unlock_irqrestore(&level->lock, flags);
  220. }
  221. /*****************************************************************************/
  222. /*
  223. * handles all normal device IRQ's
  224. * - registers are referred to by the __frame variable (GR28)
  225. * - IRQ distribution is complicated in this arch because of the many PICs, the
  226. * way they work and the way they cascade
  227. */
  228. asmlinkage void do_IRQ(void)
  229. {
  230. struct irq_source *source;
  231. int level, cpu;
  232. level = (__frame->tbr >> 4) & 0xf;
  233. cpu = smp_processor_id();
  234. #if 0
  235. {
  236. static u32 irqcount;
  237. *(volatile u32 *) 0xe1200004 = ~((irqcount++ << 8) | level);
  238. *(volatile u16 *) 0xffc00100 = (u16) ~0x9999;
  239. mb();
  240. }
  241. #endif
  242. if ((unsigned long) __frame - (unsigned long) (current + 1) < 512)
  243. BUG();
  244. __set_MASK(level);
  245. __clr_RC(level);
  246. __clr_IRL();
  247. kstat_this_cpu.irqs[level]++;
  248. irq_enter();
  249. for (source = frv_irq_levels[level].sources; source; source = source->next)
  250. source->doirq(source);
  251. irq_exit();
  252. __clr_MASK(level);
  253. /* only process softirqs if we didn't interrupt another interrupt handler */
  254. if ((__frame->psr & PSR_PIL) == PSR_PIL_0)
  255. if (local_softirq_pending())
  256. do_softirq();
  257. #ifdef CONFIG_PREEMPT
  258. local_irq_disable();
  259. while (--current->preempt_count == 0) {
  260. if (!(__frame->psr & PSR_S) ||
  261. current->need_resched == 0 ||
  262. in_interrupt())
  263. break;
  264. current->preempt_count++;
  265. local_irq_enable();
  266. preempt_schedule();
  267. local_irq_disable();
  268. }
  269. #endif
  270. #if 0
  271. {
  272. *(volatile u16 *) 0xffc00100 = (u16) ~0x6666;
  273. mb();
  274. }
  275. #endif
  276. } /* end do_IRQ() */
  277. /*****************************************************************************/
  278. /*
  279. * handles all NMIs when not co-opted by the debugger
  280. * - registers are referred to by the __frame variable (GR28)
  281. */
  282. asmlinkage void do_NMI(void)
  283. {
  284. } /* end do_NMI() */
  285. /*****************************************************************************/
  286. /**
  287. * request_irq - allocate an interrupt line
  288. * @irq: Interrupt line to allocate
  289. * @handler: Function to be called when the IRQ occurs
  290. * @irqflags: Interrupt type flags
  291. * @devname: An ascii name for the claiming device
  292. * @dev_id: A cookie passed back to the handler function
  293. *
  294. * This call allocates interrupt resources and enables the
  295. * interrupt line and IRQ handling. From the point this
  296. * call is made your handler function may be invoked. Since
  297. * your handler function must clear any interrupt the board
  298. * raises, you must take care both to initialise your hardware
  299. * and to set up the interrupt handler in the right order.
  300. *
  301. * Dev_id must be globally unique. Normally the address of the
  302. * device data structure is used as the cookie. Since the handler
  303. * receives this value it makes sense to use it.
  304. *
  305. * If your interrupt is shared you must pass a non NULL dev_id
  306. * as this is required when freeing the interrupt.
  307. *
  308. * Flags:
  309. *
  310. * SA_SHIRQ Interrupt is shared
  311. *
  312. * SA_INTERRUPT Disable local interrupts while processing
  313. *
  314. * SA_SAMPLE_RANDOM The interrupt can be used for entropy
  315. *
  316. */
  317. int request_irq(unsigned int irq,
  318. irqreturn_t (*handler)(int, void *, struct pt_regs *),
  319. unsigned long irqflags,
  320. const char * devname,
  321. void *dev_id)
  322. {
  323. int retval;
  324. struct irqaction *action;
  325. #if 1
  326. /*
  327. * Sanity-check: shared interrupts should REALLY pass in
  328. * a real dev-ID, otherwise we'll have trouble later trying
  329. * to figure out which interrupt is which (messes up the
  330. * interrupt freeing logic etc).
  331. */
  332. if (irqflags & SA_SHIRQ) {
  333. if (!dev_id)
  334. printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n",
  335. devname, (&irq)[-1]);
  336. }
  337. #endif
  338. if ((irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP) >= NR_IRQ_GROUPS)
  339. return -EINVAL;
  340. if (!handler)
  341. return -EINVAL;
  342. action = (struct irqaction *) kmalloc(sizeof(struct irqaction), GFP_KERNEL);
  343. if (!action)
  344. return -ENOMEM;
  345. action->handler = handler;
  346. action->flags = irqflags;
  347. action->mask = CPU_MASK_NONE;
  348. action->name = devname;
  349. action->next = NULL;
  350. action->dev_id = dev_id;
  351. retval = setup_irq(irq, action);
  352. if (retval)
  353. kfree(action);
  354. return retval;
  355. }
  356. /**
  357. * free_irq - free an interrupt
  358. * @irq: Interrupt line to free
  359. * @dev_id: Device identity to free
  360. *
  361. * Remove an interrupt handler. The handler is removed and if the
  362. * interrupt line is no longer in use by any driver it is disabled.
  363. * On a shared IRQ the caller must ensure the interrupt is disabled
  364. * on the card it drives before calling this function. The function
  365. * does not return until any executing interrupts for this IRQ
  366. * have completed.
  367. *
  368. * This function may be called from interrupt context.
  369. *
  370. * Bugs: Attempting to free an irq in a handler for the same irq hangs
  371. * the machine.
  372. */
  373. void free_irq(unsigned int irq, void *dev_id)
  374. {
  375. struct irq_source *source;
  376. struct irq_group *group;
  377. struct irq_level *level;
  378. struct irqaction **p, **pp;
  379. unsigned long flags;
  380. if ((irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP) >= NR_IRQ_GROUPS)
  381. return;
  382. group = irq_groups[irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP];
  383. if (!group)
  384. BUG();
  385. source = group->sources[irq & (NR_IRQ_ACTIONS_PER_GROUP - 1)];
  386. if (!source)
  387. BUG();
  388. level = source->level;
  389. p = &group->actions[irq & (NR_IRQ_ACTIONS_PER_GROUP - 1)];
  390. spin_lock_irqsave(&level->lock, flags);
  391. for (pp = p; *pp; pp = &(*pp)->next) {
  392. struct irqaction *action = *pp;
  393. if (action->dev_id != dev_id)
  394. continue;
  395. /* found it - remove from the list of entries */
  396. *pp = action->next;
  397. level->usage--;
  398. if (p == pp && group->control)
  399. group->control(group, irq & (NR_IRQ_ACTIONS_PER_GROUP - 1), 0);
  400. if (level->usage == 0)
  401. __set_MASK(level - frv_irq_levels);
  402. spin_unlock_irqrestore(&level->lock,flags);
  403. #ifdef CONFIG_SMP
  404. /* Wait to make sure it's not being used on another CPU */
  405. while (desc->status & IRQ_INPROGRESS)
  406. barrier();
  407. #endif
  408. kfree(action);
  409. return;
  410. }
  411. }
  412. /*
  413. * IRQ autodetection code..
  414. *
  415. * This depends on the fact that any interrupt that comes in on to an
  416. * unassigned IRQ will cause GxICR_DETECT to be set
  417. */
  418. static DECLARE_MUTEX(probe_sem);
  419. /**
  420. * probe_irq_on - begin an interrupt autodetect
  421. *
  422. * Commence probing for an interrupt. The interrupts are scanned
  423. * and a mask of potential interrupt lines is returned.
  424. *
  425. */
  426. unsigned long probe_irq_on(void)
  427. {
  428. down(&probe_sem);
  429. return 0;
  430. }
  431. /*
  432. * Return a mask of triggered interrupts (this
  433. * can handle only legacy ISA interrupts).
  434. */
  435. /**
  436. * probe_irq_mask - scan a bitmap of interrupt lines
  437. * @val: mask of interrupts to consider
  438. *
  439. * Scan the ISA bus interrupt lines and return a bitmap of
  440. * active interrupts. The interrupt probe logic state is then
  441. * returned to its previous value.
  442. *
  443. * Note: we need to scan all the irq's even though we will
  444. * only return ISA irq numbers - just so that we reset them
  445. * all to a known state.
  446. */
  447. unsigned int probe_irq_mask(unsigned long xmask)
  448. {
  449. up(&probe_sem);
  450. return 0;
  451. }
  452. /*
  453. * Return the one interrupt that triggered (this can
  454. * handle any interrupt source).
  455. */
  456. /**
  457. * probe_irq_off - end an interrupt autodetect
  458. * @xmask: mask of potential interrupts (unused)
  459. *
  460. * Scans the unused interrupt lines and returns the line which
  461. * appears to have triggered the interrupt. If no interrupt was
  462. * found then zero is returned. If more than one interrupt is
  463. * found then minus the first candidate is returned to indicate
  464. * their is doubt.
  465. *
  466. * The interrupt probe logic state is returned to its previous
  467. * value.
  468. *
  469. * BUGS: When used in a module (which arguably shouldnt happen)
  470. * nothing prevents two IRQ probe callers from overlapping. The
  471. * results of this are non-optimal.
  472. */
  473. int probe_irq_off(unsigned long xmask)
  474. {
  475. up(&probe_sem);
  476. return -1;
  477. }
  478. /* this was setup_x86_irq but it seems pretty generic */
  479. int setup_irq(unsigned int irq, struct irqaction *new)
  480. {
  481. struct irq_source *source;
  482. struct irq_group *group;
  483. struct irq_level *level;
  484. struct irqaction **p, **pp;
  485. unsigned long flags;
  486. group = irq_groups[irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP];
  487. if (!group)
  488. BUG();
  489. source = group->sources[irq & (NR_IRQ_ACTIONS_PER_GROUP - 1)];
  490. if (!source)
  491. BUG();
  492. level = source->level;
  493. p = &group->actions[irq & (NR_IRQ_ACTIONS_PER_GROUP - 1)];
  494. /*
  495. * Some drivers like serial.c use request_irq() heavily,
  496. * so we have to be careful not to interfere with a
  497. * running system.
  498. */
  499. if (new->flags & SA_SAMPLE_RANDOM) {
  500. /*
  501. * This function might sleep, we want to call it first,
  502. * outside of the atomic block.
  503. * Yes, this might clear the entropy pool if the wrong
  504. * driver is attempted to be loaded, without actually
  505. * installing a new handler, but is this really a problem,
  506. * only the sysadmin is able to do this.
  507. */
  508. rand_initialize_irq(irq);
  509. }
  510. /* must juggle the interrupt processing stuff with interrupts disabled */
  511. spin_lock_irqsave(&level->lock, flags);
  512. /* can't share interrupts unless all parties agree to */
  513. if (level->usage != 0 && !(level->flags & new->flags & SA_SHIRQ)) {
  514. spin_unlock_irqrestore(&level->lock,flags);
  515. return -EBUSY;
  516. }
  517. /* add new interrupt at end of irq queue */
  518. pp = p;
  519. while (*pp)
  520. pp = &(*pp)->next;
  521. *pp = new;
  522. level->usage++;
  523. level->flags = new->flags;
  524. /* turn the interrupts on */
  525. if (level->usage == 1)
  526. __clr_MASK(level - frv_irq_levels);
  527. if (p == pp && group->control)
  528. group->control(group, irq & (NR_IRQ_ACTIONS_PER_GROUP - 1), 1);
  529. spin_unlock_irqrestore(&level->lock, flags);
  530. register_irq_proc(irq);
  531. return 0;
  532. }
  533. static struct proc_dir_entry * root_irq_dir;
  534. static struct proc_dir_entry * irq_dir [NR_IRQS];
  535. #define HEX_DIGITS 8
  536. static unsigned int parse_hex_value (const char *buffer,
  537. unsigned long count, unsigned long *ret)
  538. {
  539. unsigned char hexnum [HEX_DIGITS];
  540. unsigned long value;
  541. int i;
  542. if (!count)
  543. return -EINVAL;
  544. if (count > HEX_DIGITS)
  545. count = HEX_DIGITS;
  546. if (copy_from_user(hexnum, buffer, count))
  547. return -EFAULT;
  548. /*
  549. * Parse the first 8 characters as a hex string, any non-hex char
  550. * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
  551. */
  552. value = 0;
  553. for (i = 0; i < count; i++) {
  554. unsigned int c = hexnum[i];
  555. switch (c) {
  556. case '0' ... '9': c -= '0'; break;
  557. case 'a' ... 'f': c -= 'a'-10; break;
  558. case 'A' ... 'F': c -= 'A'-10; break;
  559. default:
  560. goto out;
  561. }
  562. value = (value << 4) | c;
  563. }
  564. out:
  565. *ret = value;
  566. return 0;
  567. }
  568. static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
  569. int count, int *eof, void *data)
  570. {
  571. unsigned long *mask = (unsigned long *) data;
  572. if (count < HEX_DIGITS+1)
  573. return -EINVAL;
  574. return sprintf (page, "%08lx\n", *mask);
  575. }
  576. static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
  577. unsigned long count, void *data)
  578. {
  579. unsigned long *mask = (unsigned long *) data, full_count = count, err;
  580. unsigned long new_value;
  581. show_state();
  582. err = parse_hex_value(buffer, count, &new_value);
  583. if (err)
  584. return err;
  585. *mask = new_value;
  586. return full_count;
  587. }
  588. #define MAX_NAMELEN 10
  589. static void register_irq_proc (unsigned int irq)
  590. {
  591. char name [MAX_NAMELEN];
  592. if (!root_irq_dir || irq_dir[irq])
  593. return;
  594. memset(name, 0, MAX_NAMELEN);
  595. sprintf(name, "%d", irq);
  596. /* create /proc/irq/1234 */
  597. irq_dir[irq] = proc_mkdir(name, root_irq_dir);
  598. }
  599. unsigned long prof_cpu_mask = -1;
  600. void init_irq_proc (void)
  601. {
  602. struct proc_dir_entry *entry;
  603. int i;
  604. /* create /proc/irq */
  605. root_irq_dir = proc_mkdir("irq", 0);
  606. /* create /proc/irq/prof_cpu_mask */
  607. entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
  608. if (!entry)
  609. return;
  610. entry->nlink = 1;
  611. entry->data = (void *)&prof_cpu_mask;
  612. entry->read_proc = prof_cpu_mask_read_proc;
  613. entry->write_proc = prof_cpu_mask_write_proc;
  614. /*
  615. * Create entries for all existing IRQs.
  616. */
  617. for (i = 0; i < NR_IRQS; i++)
  618. register_irq_proc(i);
  619. }
  620. /*****************************************************************************/
  621. /*
  622. * initialise the interrupt system
  623. */
  624. void __init init_IRQ(void)
  625. {
  626. route_cpu_irqs();
  627. fpga_init();
  628. #ifdef CONFIG_FUJITSU_MB93493
  629. route_mb93493_irqs();
  630. #endif
  631. } /* end init_IRQ() */