irq.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722
  1. /*
  2. * linux/arch/arm/kernel/irq.c
  3. *
  4. * Copyright (C) 1992 Linus Torvalds
  5. * Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
  6. * 'Borrowed' for ARM26 and (C) 2003 Ian Molton.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This file contains the code used by various IRQ handling routines:
  13. * asking for different IRQ's should be done through these routines
  14. * instead of just grabbing them. Thus setups with different IRQ numbers
  15. * shouldn't result in any weird surprises, and installing new handlers
  16. * should be easier.
  17. *
  18. * IRQ's are in fact implemented a bit like signal handlers for the kernel.
  19. * Naturally it's not a 1:1 relation, but there are similarities.
  20. */
  21. #include <linux/module.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/kernel_stat.h>
  24. #include <linux/signal.h>
  25. #include <linux/sched.h>
  26. #include <linux/ioport.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/slab.h>
  29. #include <linux/random.h>
  30. #include <linux/smp.h>
  31. #include <linux/init.h>
  32. #include <linux/seq_file.h>
  33. #include <linux/errno.h>
  34. #include <asm/irq.h>
  35. #include <asm/system.h>
  36. #include <asm/irqchip.h>
  37. //FIXME - this ought to be in a header IMO
  38. void __init arc_init_irq(void);
  39. /*
  40. * Maximum IRQ count. Currently, this is arbitary. However, it should
  41. * not be set too low to prevent false triggering. Conversely, if it
  42. * is set too high, then you could miss a stuck IRQ.
  43. *
  44. * FIXME Maybe we ought to set a timer and re-enable the IRQ at a later time?
  45. */
  46. #define MAX_IRQ_CNT 100000
  47. static volatile unsigned long irq_err_count;
  48. static DEFINE_SPINLOCK(irq_controller_lock);
  49. struct irqdesc irq_desc[NR_IRQS];
  50. /*
  51. * Dummy mask/unmask handler
  52. */
  53. void dummy_mask_unmask_irq(unsigned int irq)
  54. {
  55. }
  56. void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
  57. {
  58. irq_err_count += 1;
  59. printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
  60. }
  61. static struct irqchip bad_chip = {
  62. .ack = dummy_mask_unmask_irq,
  63. .mask = dummy_mask_unmask_irq,
  64. .unmask = dummy_mask_unmask_irq,
  65. };
  66. static struct irqdesc bad_irq_desc = {
  67. .chip = &bad_chip,
  68. .handle = do_bad_IRQ,
  69. .depth = 1,
  70. };
  71. /**
  72. * disable_irq - disable an irq and wait for completion
  73. * @irq: Interrupt to disable
  74. *
  75. * Disable the selected interrupt line. We do this lazily.
  76. *
  77. * This function may be called from IRQ context.
  78. */
  79. void disable_irq(unsigned int irq)
  80. {
  81. struct irqdesc *desc = irq_desc + irq;
  82. unsigned long flags;
  83. spin_lock_irqsave(&irq_controller_lock, flags);
  84. if (!desc->depth++)
  85. desc->enabled = 0;
  86. spin_unlock_irqrestore(&irq_controller_lock, flags);
  87. }
  88. EXPORT_SYMBOL(disable_irq);
  89. void disable_irq_nosync(unsigned int irq) __attribute__((alias("disable_irq")));
  90. EXPORT_SYMBOL(disable_irq_nosync);
  91. /**
  92. * enable_irq - enable interrupt handling on an irq
  93. * @irq: Interrupt to enable
  94. *
  95. * Re-enables the processing of interrupts on this IRQ line.
  96. * Note that this may call the interrupt handler, so you may
  97. * get unexpected results if you hold IRQs disabled.
  98. *
  99. * This function may be called from IRQ context.
  100. */
  101. void enable_irq(unsigned int irq)
  102. {
  103. struct irqdesc *desc = irq_desc + irq;
  104. unsigned long flags;
  105. int pending = 0;
  106. spin_lock_irqsave(&irq_controller_lock, flags);
  107. if (unlikely(!desc->depth)) {
  108. printk("enable_irq(%u) unbalanced from %p\n", irq,
  109. __builtin_return_address(0)); //FIXME bum addresses reported - why?
  110. } else if (!--desc->depth) {
  111. desc->probing = 0;
  112. desc->enabled = 1;
  113. desc->chip->unmask(irq);
  114. pending = desc->pending;
  115. desc->pending = 0;
  116. /*
  117. * If the interrupt was waiting to be processed,
  118. * retrigger it.
  119. */
  120. if (pending)
  121. desc->chip->rerun(irq);
  122. }
  123. spin_unlock_irqrestore(&irq_controller_lock, flags);
  124. }
  125. EXPORT_SYMBOL(enable_irq);
  126. int show_interrupts(struct seq_file *p, void *v)
  127. {
  128. int i = *(loff_t *) v;
  129. struct irqaction * action;
  130. if (i < NR_IRQS) {
  131. action = irq_desc[i].action;
  132. if (!action)
  133. goto out;
  134. seq_printf(p, "%3d: %10u ", i, kstat_irqs(i));
  135. seq_printf(p, " %s", action->name);
  136. for (action = action->next; action; action = action->next) {
  137. seq_printf(p, ", %s", action->name);
  138. }
  139. seq_putc(p, '\n');
  140. } else if (i == NR_IRQS) {
  141. show_fiq_list(p, v);
  142. seq_printf(p, "Err: %10lu\n", irq_err_count);
  143. }
  144. out:
  145. return 0;
  146. }
  147. /*
  148. * IRQ lock detection.
  149. *
  150. * Hopefully, this should get us out of a few locked situations.
  151. * However, it may take a while for this to happen, since we need
  152. * a large number if IRQs to appear in the same jiffie with the
  153. * same instruction pointer (or within 2 instructions).
  154. */
  155. static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs)
  156. {
  157. unsigned long instr_ptr = instruction_pointer(regs);
  158. if (desc->lck_jif == jiffies &&
  159. desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) {
  160. desc->lck_cnt += 1;
  161. if (desc->lck_cnt > MAX_IRQ_CNT) {
  162. printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq);
  163. return 1;
  164. }
  165. } else {
  166. desc->lck_cnt = 0;
  167. desc->lck_pc = instruction_pointer(regs);
  168. desc->lck_jif = jiffies;
  169. }
  170. return 0;
  171. }
  172. static void
  173. __do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
  174. {
  175. unsigned int status;
  176. int ret;
  177. spin_unlock(&irq_controller_lock);
  178. if (!(action->flags & IRQF_DISABLED))
  179. local_irq_enable();
  180. status = 0;
  181. do {
  182. ret = action->handler(irq, action->dev_id, regs);
  183. if (ret == IRQ_HANDLED)
  184. status |= action->flags;
  185. action = action->next;
  186. } while (action);
  187. if (status & IRQF_SAMPLE_RANDOM)
  188. add_interrupt_randomness(irq);
  189. spin_lock_irq(&irq_controller_lock);
  190. }
  191. /*
  192. * This is for software-decoded IRQs. The caller is expected to
  193. * handle the ack, clear, mask and unmask issues.
  194. */
  195. void
  196. do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
  197. {
  198. struct irqaction *action;
  199. const int cpu = smp_processor_id();
  200. desc->triggered = 1;
  201. kstat_cpu(cpu).irqs[irq]++;
  202. action = desc->action;
  203. if (action)
  204. __do_irq(irq, desc->action, regs);
  205. }
  206. /*
  207. * Most edge-triggered IRQ implementations seem to take a broken
  208. * approach to this. Hence the complexity.
  209. */
  210. void
  211. do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
  212. {
  213. const int cpu = smp_processor_id();
  214. desc->triggered = 1;
  215. /*
  216. * If we're currently running this IRQ, or its disabled,
  217. * we shouldn't process the IRQ. Instead, turn on the
  218. * hardware masks.
  219. */
  220. if (unlikely(desc->running || !desc->enabled))
  221. goto running;
  222. /*
  223. * Acknowledge and clear the IRQ, but don't mask it.
  224. */
  225. desc->chip->ack(irq);
  226. /*
  227. * Mark the IRQ currently in progress.
  228. */
  229. desc->running = 1;
  230. kstat_cpu(cpu).irqs[irq]++;
  231. do {
  232. struct irqaction *action;
  233. action = desc->action;
  234. if (!action)
  235. break;
  236. if (desc->pending && desc->enabled) {
  237. desc->pending = 0;
  238. desc->chip->unmask(irq);
  239. }
  240. __do_irq(irq, action, regs);
  241. } while (desc->pending);
  242. desc->running = 0;
  243. /*
  244. * If we were disabled or freed, shut down the handler.
  245. */
  246. if (likely(desc->action && !check_irq_lock(desc, irq, regs)))
  247. return;
  248. running:
  249. /*
  250. * We got another IRQ while this one was masked or
  251. * currently running. Delay it.
  252. */
  253. desc->pending = 1;
  254. desc->chip->mask(irq);
  255. desc->chip->ack(irq);
  256. }
  257. /*
  258. * Level-based IRQ handler. Nice and simple.
  259. */
  260. void
  261. do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
  262. {
  263. struct irqaction *action;
  264. const int cpu = smp_processor_id();
  265. desc->triggered = 1;
  266. /*
  267. * Acknowledge, clear _AND_ disable the interrupt.
  268. */
  269. desc->chip->ack(irq);
  270. if (likely(desc->enabled)) {
  271. kstat_cpu(cpu).irqs[irq]++;
  272. /*
  273. * Return with this interrupt masked if no action
  274. */
  275. action = desc->action;
  276. if (action) {
  277. __do_irq(irq, desc->action, regs);
  278. if (likely(desc->enabled &&
  279. !check_irq_lock(desc, irq, regs)))
  280. desc->chip->unmask(irq);
  281. }
  282. }
  283. }
  284. /*
  285. * do_IRQ handles all hardware IRQ's. Decoded IRQs should not
  286. * come via this function. Instead, they should provide their
  287. * own 'handler'
  288. */
  289. asmlinkage void asm_do_IRQ(int irq, struct pt_regs *regs)
  290. {
  291. struct irqdesc *desc = irq_desc + irq;
  292. /*
  293. * Some hardware gives randomly wrong interrupts. Rather
  294. * than crashing, do something sensible.
  295. */
  296. if (irq >= NR_IRQS)
  297. desc = &bad_irq_desc;
  298. irq_enter();
  299. spin_lock(&irq_controller_lock);
  300. desc->handle(irq, desc, regs);
  301. spin_unlock(&irq_controller_lock);
  302. irq_exit();
  303. }
  304. void __set_irq_handler(unsigned int irq, irq_handler_t handle, int is_chained)
  305. {
  306. struct irqdesc *desc;
  307. unsigned long flags;
  308. if (irq >= NR_IRQS) {
  309. printk(KERN_ERR "Trying to install handler for IRQ%d\n", irq);
  310. return;
  311. }
  312. if (handle == NULL)
  313. handle = do_bad_IRQ;
  314. desc = irq_desc + irq;
  315. if (is_chained && desc->chip == &bad_chip)
  316. printk(KERN_WARNING "Trying to install chained handler for IRQ%d\n", irq);
  317. spin_lock_irqsave(&irq_controller_lock, flags);
  318. if (handle == do_bad_IRQ) {
  319. desc->chip->mask(irq);
  320. desc->chip->ack(irq);
  321. desc->depth = 1;
  322. desc->enabled = 0;
  323. }
  324. desc->handle = handle;
  325. if (handle != do_bad_IRQ && is_chained) {
  326. desc->valid = 0;
  327. desc->probe_ok = 0;
  328. desc->depth = 0;
  329. desc->chip->unmask(irq);
  330. }
  331. spin_unlock_irqrestore(&irq_controller_lock, flags);
  332. }
  333. void set_irq_chip(unsigned int irq, struct irqchip *chip)
  334. {
  335. struct irqdesc *desc;
  336. unsigned long flags;
  337. if (irq >= NR_IRQS) {
  338. printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
  339. return;
  340. }
  341. if (chip == NULL)
  342. chip = &bad_chip;
  343. desc = irq_desc + irq;
  344. spin_lock_irqsave(&irq_controller_lock, flags);
  345. desc->chip = chip;
  346. spin_unlock_irqrestore(&irq_controller_lock, flags);
  347. }
  348. int set_irq_type(unsigned int irq, unsigned int type)
  349. {
  350. struct irqdesc *desc;
  351. unsigned long flags;
  352. int ret = -ENXIO;
  353. if (irq >= NR_IRQS) {
  354. printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
  355. return -ENODEV;
  356. }
  357. desc = irq_desc + irq;
  358. if (desc->chip->type) {
  359. spin_lock_irqsave(&irq_controller_lock, flags);
  360. ret = desc->chip->type(irq, type);
  361. spin_unlock_irqrestore(&irq_controller_lock, flags);
  362. }
  363. return ret;
  364. }
  365. void set_irq_flags(unsigned int irq, unsigned int iflags)
  366. {
  367. struct irqdesc *desc;
  368. unsigned long flags;
  369. if (irq >= NR_IRQS) {
  370. printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
  371. return;
  372. }
  373. desc = irq_desc + irq;
  374. spin_lock_irqsave(&irq_controller_lock, flags);
  375. desc->valid = (iflags & IRQF_VALID) != 0;
  376. desc->probe_ok = (iflags & IRQF_PROBE) != 0;
  377. desc->noautoenable = (iflags & IRQF_NOAUTOEN) != 0;
  378. spin_unlock_irqrestore(&irq_controller_lock, flags);
  379. }
  380. int setup_irq(unsigned int irq, struct irqaction *new)
  381. {
  382. int shared = 0;
  383. struct irqaction *old, **p;
  384. unsigned long flags;
  385. struct irqdesc *desc;
  386. /*
  387. * Some drivers like serial.c use request_irq() heavily,
  388. * so we have to be careful not to interfere with a
  389. * running system.
  390. */
  391. if (new->flags & IRQF_SAMPLE_RANDOM) {
  392. /*
  393. * This function might sleep, we want to call it first,
  394. * outside of the atomic block.
  395. * Yes, this might clear the entropy pool if the wrong
  396. * driver is attempted to be loaded, without actually
  397. * installing a new handler, but is this really a problem,
  398. * only the sysadmin is able to do this.
  399. */
  400. rand_initialize_irq(irq);
  401. }
  402. /*
  403. * The following block of code has to be executed atomically
  404. */
  405. desc = irq_desc + irq;
  406. spin_lock_irqsave(&irq_controller_lock, flags);
  407. p = &desc->action;
  408. if ((old = *p) != NULL) {
  409. /* Can't share interrupts unless both agree to */
  410. if (!(old->flags & new->flags & IRQF_SHARED)) {
  411. spin_unlock_irqrestore(&irq_controller_lock, flags);
  412. return -EBUSY;
  413. }
  414. /* add new interrupt at end of irq queue */
  415. do {
  416. p = &old->next;
  417. old = *p;
  418. } while (old);
  419. shared = 1;
  420. }
  421. *p = new;
  422. if (!shared) {
  423. desc->probing = 0;
  424. desc->running = 0;
  425. desc->pending = 0;
  426. desc->depth = 1;
  427. if (!desc->noautoenable) {
  428. desc->depth = 0;
  429. desc->enabled = 1;
  430. desc->chip->unmask(irq);
  431. }
  432. }
  433. spin_unlock_irqrestore(&irq_controller_lock, flags);
  434. return 0;
  435. }
  436. /**
  437. * request_irq - allocate an interrupt line
  438. * @irq: Interrupt line to allocate
  439. * @handler: Function to be called when the IRQ occurs
  440. * @irqflags: Interrupt type flags
  441. * @devname: An ascii name for the claiming device
  442. * @dev_id: A cookie passed back to the handler function
  443. *
  444. * This call allocates interrupt resources and enables the
  445. * interrupt line and IRQ handling. From the point this
  446. * call is made your handler function may be invoked. Since
  447. * your handler function must clear any interrupt the board
  448. * raises, you must take care both to initialise your hardware
  449. * and to set up the interrupt handler in the right order.
  450. *
  451. * Dev_id must be globally unique. Normally the address of the
  452. * device data structure is used as the cookie. Since the handler
  453. * receives this value it makes sense to use it.
  454. *
  455. * If your interrupt is shared you must pass a non NULL dev_id
  456. * as this is required when freeing the interrupt.
  457. *
  458. * Flags:
  459. *
  460. * IRQF_SHARED Interrupt is shared
  461. *
  462. * IRQF_DISABLED Disable local interrupts while processing
  463. *
  464. * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
  465. *
  466. */
  467. //FIXME - handler used to return void - whats the significance of the change?
  468. int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
  469. unsigned long irq_flags, const char * devname, void *dev_id)
  470. {
  471. unsigned long retval;
  472. struct irqaction *action;
  473. if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler ||
  474. (irq_flags & IRQF_SHARED && !dev_id))
  475. return -EINVAL;
  476. action = kmalloc(sizeof(struct irqaction), GFP_KERNEL);
  477. if (!action)
  478. return -ENOMEM;
  479. action->handler = handler;
  480. action->flags = irq_flags;
  481. cpus_clear(action->mask);
  482. action->name = devname;
  483. action->next = NULL;
  484. action->dev_id = dev_id;
  485. retval = setup_irq(irq, action);
  486. if (retval)
  487. kfree(action);
  488. return retval;
  489. }
  490. EXPORT_SYMBOL(request_irq);
  491. /**
  492. * free_irq - free an interrupt
  493. * @irq: Interrupt line to free
  494. * @dev_id: Device identity to free
  495. *
  496. * Remove an interrupt handler. The handler is removed and if the
  497. * interrupt line is no longer in use by any driver it is disabled.
  498. * On a shared IRQ the caller must ensure the interrupt is disabled
  499. * on the card it drives before calling this function.
  500. *
  501. * This function may be called from interrupt context.
  502. */
  503. void free_irq(unsigned int irq, void *dev_id)
  504. {
  505. struct irqaction * action, **p;
  506. unsigned long flags;
  507. if (irq >= NR_IRQS || !irq_desc[irq].valid) {
  508. printk(KERN_ERR "Trying to free IRQ%d\n",irq);
  509. #ifdef CONFIG_DEBUG_ERRORS
  510. __backtrace();
  511. #endif
  512. return;
  513. }
  514. spin_lock_irqsave(&irq_controller_lock, flags);
  515. for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
  516. if (action->dev_id != dev_id)
  517. continue;
  518. /* Found it - now free it */
  519. *p = action->next;
  520. kfree(action);
  521. goto out;
  522. }
  523. printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
  524. #ifdef CONFIG_DEBUG_ERRORS
  525. __backtrace();
  526. #endif
  527. out:
  528. spin_unlock_irqrestore(&irq_controller_lock, flags);
  529. }
  530. EXPORT_SYMBOL(free_irq);
  531. /* Start the interrupt probing. Unlike other architectures,
  532. * we don't return a mask of interrupts from probe_irq_on,
  533. * but return the number of interrupts enabled for the probe.
  534. * The interrupts which have been enabled for probing is
  535. * instead recorded in the irq_desc structure.
  536. */
  537. unsigned long probe_irq_on(void)
  538. {
  539. unsigned int i, irqs = 0;
  540. unsigned long delay;
  541. /*
  542. * first snaffle up any unassigned but
  543. * probe-able interrupts
  544. */
  545. spin_lock_irq(&irq_controller_lock);
  546. for (i = 0; i < NR_IRQS; i++) {
  547. if (!irq_desc[i].probe_ok || irq_desc[i].action)
  548. continue;
  549. irq_desc[i].probing = 1;
  550. irq_desc[i].triggered = 0;
  551. if (irq_desc[i].chip->type)
  552. irq_desc[i].chip->type(i, IRQT_PROBE);
  553. irq_desc[i].chip->unmask(i);
  554. irqs += 1;
  555. }
  556. spin_unlock_irq(&irq_controller_lock);
  557. /*
  558. * wait for spurious interrupts to mask themselves out again
  559. */
  560. for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
  561. /* min 100ms delay */;
  562. /*
  563. * now filter out any obviously spurious interrupts
  564. */
  565. spin_lock_irq(&irq_controller_lock);
  566. for (i = 0; i < NR_IRQS; i++) {
  567. if (irq_desc[i].probing && irq_desc[i].triggered) {
  568. irq_desc[i].probing = 0;
  569. irqs -= 1;
  570. }
  571. }
  572. spin_unlock_irq(&irq_controller_lock);
  573. return irqs;
  574. }
  575. EXPORT_SYMBOL(probe_irq_on);
  576. /*
  577. * Possible return values:
  578. * >= 0 - interrupt number
  579. * -1 - no interrupt/many interrupts
  580. */
  581. int probe_irq_off(unsigned long irqs)
  582. {
  583. unsigned int i;
  584. int irq_found = NO_IRQ;
  585. /*
  586. * look at the interrupts, and find exactly one
  587. * that we were probing has been triggered
  588. */
  589. spin_lock_irq(&irq_controller_lock);
  590. for (i = 0; i < NR_IRQS; i++) {
  591. if (irq_desc[i].probing &&
  592. irq_desc[i].triggered) {
  593. if (irq_found != NO_IRQ) {
  594. irq_found = NO_IRQ;
  595. goto out;
  596. }
  597. irq_found = i;
  598. }
  599. }
  600. if (irq_found == -1)
  601. irq_found = NO_IRQ;
  602. out:
  603. spin_unlock_irq(&irq_controller_lock);
  604. return irq_found;
  605. }
  606. EXPORT_SYMBOL(probe_irq_off);
  607. void __init init_irq_proc(void)
  608. {
  609. }
  610. void __init init_IRQ(void)
  611. {
  612. struct irqdesc *desc;
  613. extern void init_dma(void);
  614. int irq;
  615. for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++)
  616. *desc = bad_irq_desc;
  617. arc_init_irq();
  618. init_dma();
  619. }