irq.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716
  1. /*
  2. * linux/arch/arm/kernel/irq.c
  3. *
  4. * Copyright (C) 1992 Linus Torvalds
  5. * Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
  6. * 'Borrowed' for ARM26 and (C) 2003 Ian Molton.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This file contains the code used by various IRQ handling routines:
  13. * asking for different IRQ's should be done through these routines
  14. * instead of just grabbing them. Thus setups with different IRQ numbers
  15. * shouldn't result in any weird surprises, and installing new handlers
  16. * should be easier.
  17. *
  18. * IRQ's are in fact implemented a bit like signal handlers for the kernel.
  19. * Naturally it's not a 1:1 relation, but there are similarities.
  20. */
  21. #include <linux/config.h>
  22. #include <linux/module.h>
  23. #include <linux/ptrace.h>
  24. #include <linux/kernel_stat.h>
  25. #include <linux/signal.h>
  26. #include <linux/sched.h>
  27. #include <linux/ioport.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/slab.h>
  30. #include <linux/random.h>
  31. #include <linux/smp.h>
  32. #include <linux/init.h>
  33. #include <linux/seq_file.h>
  34. #include <linux/errno.h>
  35. #include <asm/irq.h>
  36. #include <asm/system.h>
  37. #include <asm/irqchip.h>
  38. //FIXME - this ought to be in a header IMO
  39. void __init arc_init_irq(void);
  40. /*
  41. * Maximum IRQ count. Currently, this is arbitary. However, it should
  42. * not be set too low to prevent false triggering. Conversely, if it
  43. * is set too high, then you could miss a stuck IRQ.
  44. *
  45. * FIXME Maybe we ought to set a timer and re-enable the IRQ at a later time?
  46. */
  47. #define MAX_IRQ_CNT 100000
  48. static volatile unsigned long irq_err_count;
  49. static DEFINE_SPINLOCK(irq_controller_lock);
  50. struct irqdesc irq_desc[NR_IRQS];
  51. /*
  52. * Dummy mask/unmask handler
  53. */
  54. void dummy_mask_unmask_irq(unsigned int irq)
  55. {
  56. }
  57. void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
  58. {
  59. irq_err_count += 1;
  60. printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
  61. }
  62. static struct irqchip bad_chip = {
  63. .ack = dummy_mask_unmask_irq,
  64. .mask = dummy_mask_unmask_irq,
  65. .unmask = dummy_mask_unmask_irq,
  66. };
  67. static struct irqdesc bad_irq_desc = {
  68. .chip = &bad_chip,
  69. .handle = do_bad_IRQ,
  70. .depth = 1,
  71. };
  72. /**
  73. * disable_irq - disable an irq and wait for completion
  74. * @irq: Interrupt to disable
  75. *
  76. * Disable the selected interrupt line. We do this lazily.
  77. *
  78. * This function may be called from IRQ context.
  79. */
  80. void disable_irq(unsigned int irq)
  81. {
  82. struct irqdesc *desc = irq_desc + irq;
  83. unsigned long flags;
  84. spin_lock_irqsave(&irq_controller_lock, flags);
  85. if (!desc->depth++)
  86. desc->enabled = 0;
  87. spin_unlock_irqrestore(&irq_controller_lock, flags);
  88. }
  89. /**
  90. * enable_irq - enable interrupt handling on an irq
  91. * @irq: Interrupt to enable
  92. *
  93. * Re-enables the processing of interrupts on this IRQ line.
  94. * Note that this may call the interrupt handler, so you may
  95. * get unexpected results if you hold IRQs disabled.
  96. *
  97. * This function may be called from IRQ context.
  98. */
  99. void enable_irq(unsigned int irq)
  100. {
  101. struct irqdesc *desc = irq_desc + irq;
  102. unsigned long flags;
  103. int pending = 0;
  104. spin_lock_irqsave(&irq_controller_lock, flags);
  105. if (unlikely(!desc->depth)) {
  106. printk("enable_irq(%u) unbalanced from %p\n", irq,
  107. __builtin_return_address(0)); //FIXME bum addresses reported - why?
  108. } else if (!--desc->depth) {
  109. desc->probing = 0;
  110. desc->enabled = 1;
  111. desc->chip->unmask(irq);
  112. pending = desc->pending;
  113. desc->pending = 0;
  114. /*
  115. * If the interrupt was waiting to be processed,
  116. * retrigger it.
  117. */
  118. if (pending)
  119. desc->chip->rerun(irq);
  120. }
  121. spin_unlock_irqrestore(&irq_controller_lock, flags);
  122. }
  123. int show_interrupts(struct seq_file *p, void *v)
  124. {
  125. int i = *(loff_t *) v;
  126. struct irqaction * action;
  127. if (i < NR_IRQS) {
  128. action = irq_desc[i].action;
  129. if (!action)
  130. continue;
  131. seq_printf(p, "%3d: %10u ", i, kstat_irqs(i));
  132. seq_printf(p, " %s", action->name);
  133. for (action = action->next; action; action = action->next) {
  134. seq_printf(p, ", %s", action->name);
  135. }
  136. seq_putc(p, '\n');
  137. } else if (i == NR_IRQS) {
  138. show_fiq_list(p, v);
  139. seq_printf(p, "Err: %10lu\n", irq_err_count);
  140. }
  141. return 0;
  142. }
  143. /*
  144. * IRQ lock detection.
  145. *
  146. * Hopefully, this should get us out of a few locked situations.
  147. * However, it may take a while for this to happen, since we need
  148. * a large number if IRQs to appear in the same jiffie with the
  149. * same instruction pointer (or within 2 instructions).
  150. */
  151. static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs)
  152. {
  153. unsigned long instr_ptr = instruction_pointer(regs);
  154. if (desc->lck_jif == jiffies &&
  155. desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) {
  156. desc->lck_cnt += 1;
  157. if (desc->lck_cnt > MAX_IRQ_CNT) {
  158. printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq);
  159. return 1;
  160. }
  161. } else {
  162. desc->lck_cnt = 0;
  163. desc->lck_pc = instruction_pointer(regs);
  164. desc->lck_jif = jiffies;
  165. }
  166. return 0;
  167. }
  168. static void
  169. __do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
  170. {
  171. unsigned int status;
  172. int ret;
  173. spin_unlock(&irq_controller_lock);
  174. if (!(action->flags & SA_INTERRUPT))
  175. local_irq_enable();
  176. status = 0;
  177. do {
  178. ret = action->handler(irq, action->dev_id, regs);
  179. if (ret == IRQ_HANDLED)
  180. status |= action->flags;
  181. action = action->next;
  182. } while (action);
  183. if (status & SA_SAMPLE_RANDOM)
  184. add_interrupt_randomness(irq);
  185. spin_lock_irq(&irq_controller_lock);
  186. }
  187. /*
  188. * This is for software-decoded IRQs. The caller is expected to
  189. * handle the ack, clear, mask and unmask issues.
  190. */
  191. void
  192. do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
  193. {
  194. struct irqaction *action;
  195. const int cpu = smp_processor_id();
  196. desc->triggered = 1;
  197. kstat_cpu(cpu).irqs[irq]++;
  198. action = desc->action;
  199. if (action)
  200. __do_irq(irq, desc->action, regs);
  201. }
  202. /*
  203. * Most edge-triggered IRQ implementations seem to take a broken
  204. * approach to this. Hence the complexity.
  205. */
  206. void
  207. do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
  208. {
  209. const int cpu = smp_processor_id();
  210. desc->triggered = 1;
  211. /*
  212. * If we're currently running this IRQ, or its disabled,
  213. * we shouldn't process the IRQ. Instead, turn on the
  214. * hardware masks.
  215. */
  216. if (unlikely(desc->running || !desc->enabled))
  217. goto running;
  218. /*
  219. * Acknowledge and clear the IRQ, but don't mask it.
  220. */
  221. desc->chip->ack(irq);
  222. /*
  223. * Mark the IRQ currently in progress.
  224. */
  225. desc->running = 1;
  226. kstat_cpu(cpu).irqs[irq]++;
  227. do {
  228. struct irqaction *action;
  229. action = desc->action;
  230. if (!action)
  231. break;
  232. if (desc->pending && desc->enabled) {
  233. desc->pending = 0;
  234. desc->chip->unmask(irq);
  235. }
  236. __do_irq(irq, action, regs);
  237. } while (desc->pending);
  238. desc->running = 0;
  239. /*
  240. * If we were disabled or freed, shut down the handler.
  241. */
  242. if (likely(desc->action && !check_irq_lock(desc, irq, regs)))
  243. return;
  244. running:
  245. /*
  246. * We got another IRQ while this one was masked or
  247. * currently running. Delay it.
  248. */
  249. desc->pending = 1;
  250. desc->chip->mask(irq);
  251. desc->chip->ack(irq);
  252. }
  253. /*
  254. * Level-based IRQ handler. Nice and simple.
  255. */
  256. void
  257. do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
  258. {
  259. struct irqaction *action;
  260. const int cpu = smp_processor_id();
  261. desc->triggered = 1;
  262. /*
  263. * Acknowledge, clear _AND_ disable the interrupt.
  264. */
  265. desc->chip->ack(irq);
  266. if (likely(desc->enabled)) {
  267. kstat_cpu(cpu).irqs[irq]++;
  268. /*
  269. * Return with this interrupt masked if no action
  270. */
  271. action = desc->action;
  272. if (action) {
  273. __do_irq(irq, desc->action, regs);
  274. if (likely(desc->enabled &&
  275. !check_irq_lock(desc, irq, regs)))
  276. desc->chip->unmask(irq);
  277. }
  278. }
  279. }
  280. /*
  281. * do_IRQ handles all hardware IRQ's. Decoded IRQs should not
  282. * come via this function. Instead, they should provide their
  283. * own 'handler'
  284. */
  285. asmlinkage void asm_do_IRQ(int irq, struct pt_regs *regs)
  286. {
  287. struct irqdesc *desc = irq_desc + irq;
  288. /*
  289. * Some hardware gives randomly wrong interrupts. Rather
  290. * than crashing, do something sensible.
  291. */
  292. if (irq >= NR_IRQS)
  293. desc = &bad_irq_desc;
  294. irq_enter();
  295. spin_lock(&irq_controller_lock);
  296. desc->handle(irq, desc, regs);
  297. spin_unlock(&irq_controller_lock);
  298. irq_exit();
  299. }
  300. void __set_irq_handler(unsigned int irq, irq_handler_t handle, int is_chained)
  301. {
  302. struct irqdesc *desc;
  303. unsigned long flags;
  304. if (irq >= NR_IRQS) {
  305. printk(KERN_ERR "Trying to install handler for IRQ%d\n", irq);
  306. return;
  307. }
  308. if (handle == NULL)
  309. handle = do_bad_IRQ;
  310. desc = irq_desc + irq;
  311. if (is_chained && desc->chip == &bad_chip)
  312. printk(KERN_WARNING "Trying to install chained handler for IRQ%d\n", irq);
  313. spin_lock_irqsave(&irq_controller_lock, flags);
  314. if (handle == do_bad_IRQ) {
  315. desc->chip->mask(irq);
  316. desc->chip->ack(irq);
  317. desc->depth = 1;
  318. desc->enabled = 0;
  319. }
  320. desc->handle = handle;
  321. if (handle != do_bad_IRQ && is_chained) {
  322. desc->valid = 0;
  323. desc->probe_ok = 0;
  324. desc->depth = 0;
  325. desc->chip->unmask(irq);
  326. }
  327. spin_unlock_irqrestore(&irq_controller_lock, flags);
  328. }
  329. void set_irq_chip(unsigned int irq, struct irqchip *chip)
  330. {
  331. struct irqdesc *desc;
  332. unsigned long flags;
  333. if (irq >= NR_IRQS) {
  334. printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
  335. return;
  336. }
  337. if (chip == NULL)
  338. chip = &bad_chip;
  339. desc = irq_desc + irq;
  340. spin_lock_irqsave(&irq_controller_lock, flags);
  341. desc->chip = chip;
  342. spin_unlock_irqrestore(&irq_controller_lock, flags);
  343. }
  344. int set_irq_type(unsigned int irq, unsigned int type)
  345. {
  346. struct irqdesc *desc;
  347. unsigned long flags;
  348. int ret = -ENXIO;
  349. if (irq >= NR_IRQS) {
  350. printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
  351. return -ENODEV;
  352. }
  353. desc = irq_desc + irq;
  354. if (desc->chip->type) {
  355. spin_lock_irqsave(&irq_controller_lock, flags);
  356. ret = desc->chip->type(irq, type);
  357. spin_unlock_irqrestore(&irq_controller_lock, flags);
  358. }
  359. return ret;
  360. }
  361. void set_irq_flags(unsigned int irq, unsigned int iflags)
  362. {
  363. struct irqdesc *desc;
  364. unsigned long flags;
  365. if (irq >= NR_IRQS) {
  366. printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
  367. return;
  368. }
  369. desc = irq_desc + irq;
  370. spin_lock_irqsave(&irq_controller_lock, flags);
  371. desc->valid = (iflags & IRQF_VALID) != 0;
  372. desc->probe_ok = (iflags & IRQF_PROBE) != 0;
  373. desc->noautoenable = (iflags & IRQF_NOAUTOEN) != 0;
  374. spin_unlock_irqrestore(&irq_controller_lock, flags);
  375. }
  376. int setup_irq(unsigned int irq, struct irqaction *new)
  377. {
  378. int shared = 0;
  379. struct irqaction *old, **p;
  380. unsigned long flags;
  381. struct irqdesc *desc;
  382. /*
  383. * Some drivers like serial.c use request_irq() heavily,
  384. * so we have to be careful not to interfere with a
  385. * running system.
  386. */
  387. if (new->flags & SA_SAMPLE_RANDOM) {
  388. /*
  389. * This function might sleep, we want to call it first,
  390. * outside of the atomic block.
  391. * Yes, this might clear the entropy pool if the wrong
  392. * driver is attempted to be loaded, without actually
  393. * installing a new handler, but is this really a problem,
  394. * only the sysadmin is able to do this.
  395. */
  396. rand_initialize_irq(irq);
  397. }
  398. /*
  399. * The following block of code has to be executed atomically
  400. */
  401. desc = irq_desc + irq;
  402. spin_lock_irqsave(&irq_controller_lock, flags);
  403. p = &desc->action;
  404. if ((old = *p) != NULL) {
  405. /* Can't share interrupts unless both agree to */
  406. if (!(old->flags & new->flags & SA_SHIRQ)) {
  407. spin_unlock_irqrestore(&irq_controller_lock, flags);
  408. return -EBUSY;
  409. }
  410. /* add new interrupt at end of irq queue */
  411. do {
  412. p = &old->next;
  413. old = *p;
  414. } while (old);
  415. shared = 1;
  416. }
  417. *p = new;
  418. if (!shared) {
  419. desc->probing = 0;
  420. desc->running = 0;
  421. desc->pending = 0;
  422. desc->depth = 1;
  423. if (!desc->noautoenable) {
  424. desc->depth = 0;
  425. desc->enabled = 1;
  426. desc->chip->unmask(irq);
  427. }
  428. }
  429. spin_unlock_irqrestore(&irq_controller_lock, flags);
  430. return 0;
  431. }
  432. /**
  433. * request_irq - allocate an interrupt line
  434. * @irq: Interrupt line to allocate
  435. * @handler: Function to be called when the IRQ occurs
  436. * @irqflags: Interrupt type flags
  437. * @devname: An ascii name for the claiming device
  438. * @dev_id: A cookie passed back to the handler function
  439. *
  440. * This call allocates interrupt resources and enables the
  441. * interrupt line and IRQ handling. From the point this
  442. * call is made your handler function may be invoked. Since
  443. * your handler function must clear any interrupt the board
  444. * raises, you must take care both to initialise your hardware
  445. * and to set up the interrupt handler in the right order.
  446. *
  447. * Dev_id must be globally unique. Normally the address of the
  448. * device data structure is used as the cookie. Since the handler
  449. * receives this value it makes sense to use it.
  450. *
  451. * If your interrupt is shared you must pass a non NULL dev_id
  452. * as this is required when freeing the interrupt.
  453. *
  454. * Flags:
  455. *
  456. * SA_SHIRQ Interrupt is shared
  457. *
  458. * SA_INTERRUPT Disable local interrupts while processing
  459. *
  460. * SA_SAMPLE_RANDOM The interrupt can be used for entropy
  461. *
  462. */
  463. //FIXME - handler used to return void - whats the significance of the change?
  464. int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
  465. unsigned long irq_flags, const char * devname, void *dev_id)
  466. {
  467. unsigned long retval;
  468. struct irqaction *action;
  469. if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler ||
  470. (irq_flags & SA_SHIRQ && !dev_id))
  471. return -EINVAL;
  472. action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
  473. if (!action)
  474. return -ENOMEM;
  475. action->handler = handler;
  476. action->flags = irq_flags;
  477. cpus_clear(action->mask);
  478. action->name = devname;
  479. action->next = NULL;
  480. action->dev_id = dev_id;
  481. retval = setup_irq(irq, action);
  482. if (retval)
  483. kfree(action);
  484. return retval;
  485. }
  486. EXPORT_SYMBOL(request_irq);
  487. /**
  488. * free_irq - free an interrupt
  489. * @irq: Interrupt line to free
  490. * @dev_id: Device identity to free
  491. *
  492. * Remove an interrupt handler. The handler is removed and if the
  493. * interrupt line is no longer in use by any driver it is disabled.
  494. * On a shared IRQ the caller must ensure the interrupt is disabled
  495. * on the card it drives before calling this function.
  496. *
  497. * This function may be called from interrupt context.
  498. */
  499. void free_irq(unsigned int irq, void *dev_id)
  500. {
  501. struct irqaction * action, **p;
  502. unsigned long flags;
  503. if (irq >= NR_IRQS || !irq_desc[irq].valid) {
  504. printk(KERN_ERR "Trying to free IRQ%d\n",irq);
  505. #ifdef CONFIG_DEBUG_ERRORS
  506. __backtrace();
  507. #endif
  508. return;
  509. }
  510. spin_lock_irqsave(&irq_controller_lock, flags);
  511. for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
  512. if (action->dev_id != dev_id)
  513. continue;
  514. /* Found it - now free it */
  515. *p = action->next;
  516. kfree(action);
  517. goto out;
  518. }
  519. printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
  520. #ifdef CONFIG_DEBUG_ERRORS
  521. __backtrace();
  522. #endif
  523. out:
  524. spin_unlock_irqrestore(&irq_controller_lock, flags);
  525. }
  526. EXPORT_SYMBOL(free_irq);
  527. /* Start the interrupt probing. Unlike other architectures,
  528. * we don't return a mask of interrupts from probe_irq_on,
  529. * but return the number of interrupts enabled for the probe.
  530. * The interrupts which have been enabled for probing is
  531. * instead recorded in the irq_desc structure.
  532. */
  533. unsigned long probe_irq_on(void)
  534. {
  535. unsigned int i, irqs = 0;
  536. unsigned long delay;
  537. /*
  538. * first snaffle up any unassigned but
  539. * probe-able interrupts
  540. */
  541. spin_lock_irq(&irq_controller_lock);
  542. for (i = 0; i < NR_IRQS; i++) {
  543. if (!irq_desc[i].probe_ok || irq_desc[i].action)
  544. continue;
  545. irq_desc[i].probing = 1;
  546. irq_desc[i].triggered = 0;
  547. if (irq_desc[i].chip->type)
  548. irq_desc[i].chip->type(i, IRQT_PROBE);
  549. irq_desc[i].chip->unmask(i);
  550. irqs += 1;
  551. }
  552. spin_unlock_irq(&irq_controller_lock);
  553. /*
  554. * wait for spurious interrupts to mask themselves out again
  555. */
  556. for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
  557. /* min 100ms delay */;
  558. /*
  559. * now filter out any obviously spurious interrupts
  560. */
  561. spin_lock_irq(&irq_controller_lock);
  562. for (i = 0; i < NR_IRQS; i++) {
  563. if (irq_desc[i].probing && irq_desc[i].triggered) {
  564. irq_desc[i].probing = 0;
  565. irqs -= 1;
  566. }
  567. }
  568. spin_unlock_irq(&irq_controller_lock);
  569. return irqs;
  570. }
  571. EXPORT_SYMBOL(probe_irq_on);
  572. /*
  573. * Possible return values:
  574. * >= 0 - interrupt number
  575. * -1 - no interrupt/many interrupts
  576. */
  577. int probe_irq_off(unsigned long irqs)
  578. {
  579. unsigned int i;
  580. int irq_found = NO_IRQ;
  581. /*
  582. * look at the interrupts, and find exactly one
  583. * that we were probing has been triggered
  584. */
  585. spin_lock_irq(&irq_controller_lock);
  586. for (i = 0; i < NR_IRQS; i++) {
  587. if (irq_desc[i].probing &&
  588. irq_desc[i].triggered) {
  589. if (irq_found != NO_IRQ) {
  590. irq_found = NO_IRQ;
  591. goto out;
  592. }
  593. irq_found = i;
  594. }
  595. }
  596. if (irq_found == -1)
  597. irq_found = NO_IRQ;
  598. out:
  599. spin_unlock_irq(&irq_controller_lock);
  600. return irq_found;
  601. }
  602. EXPORT_SYMBOL(probe_irq_off);
  603. void __init init_irq_proc(void)
  604. {
  605. }
  606. void __init init_IRQ(void)
  607. {
  608. struct irqdesc *desc;
  609. extern void init_dma(void);
  610. int irq;
  611. for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++)
  612. *desc = bad_irq_desc;
  613. arc_init_irq();
  614. init_dma();
  615. }