irq.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717
  1. /*
  2. * linux/arch/arm/kernel/irq.c
  3. *
  4. * Copyright (C) 1992 Linus Torvalds
  5. * Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
  6. * 'Borrowed' for ARM26 and (C) 2003 Ian Molton.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This file contains the code used by various IRQ handling routines:
  13. * asking for different IRQ's should be done through these routines
  14. * instead of just grabbing them. Thus setups with different IRQ numbers
  15. * shouldn't result in any weird surprises, and installing new handlers
  16. * should be easier.
  17. *
  18. * IRQ's are in fact implemented a bit like signal handlers for the kernel.
  19. * Naturally it's not a 1:1 relation, but there are similarities.
  20. */
  21. #include <linux/config.h>
  22. #include <linux/module.h>
  23. #include <linux/ptrace.h>
  24. #include <linux/kernel_stat.h>
  25. #include <linux/signal.h>
  26. #include <linux/sched.h>
  27. #include <linux/ioport.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/slab.h>
  30. #include <linux/random.h>
  31. #include <linux/smp.h>
  32. #include <linux/init.h>
  33. #include <linux/seq_file.h>
  34. #include <linux/errno.h>
  35. #include <asm/irq.h>
  36. #include <asm/system.h>
  37. #include <asm/irqchip.h>
  38. //FIXME - this ought to be in a header IMO
  39. void __init arc_init_irq(void);
  40. /*
  41. * Maximum IRQ count. Currently, this is arbitary. However, it should
  42. * not be set too low to prevent false triggering. Conversely, if it
  43. * is set too high, then you could miss a stuck IRQ.
  44. *
  45. * FIXME Maybe we ought to set a timer and re-enable the IRQ at a later time?
  46. */
  47. #define MAX_IRQ_CNT 100000
  48. static volatile unsigned long irq_err_count;
  49. static DEFINE_SPINLOCK(irq_controller_lock);
  50. struct irqdesc irq_desc[NR_IRQS];
  51. /*
  52. * Dummy mask/unmask handler
  53. */
  54. void dummy_mask_unmask_irq(unsigned int irq)
  55. {
  56. }
  57. void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
  58. {
  59. irq_err_count += 1;
  60. printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
  61. }
  62. static struct irqchip bad_chip = {
  63. .ack = dummy_mask_unmask_irq,
  64. .mask = dummy_mask_unmask_irq,
  65. .unmask = dummy_mask_unmask_irq,
  66. };
  67. static struct irqdesc bad_irq_desc = {
  68. .chip = &bad_chip,
  69. .handle = do_bad_IRQ,
  70. .depth = 1,
  71. };
  72. /**
  73. * disable_irq - disable an irq and wait for completion
  74. * @irq: Interrupt to disable
  75. *
  76. * Disable the selected interrupt line. We do this lazily.
  77. *
  78. * This function may be called from IRQ context.
  79. */
  80. void disable_irq(unsigned int irq)
  81. {
  82. struct irqdesc *desc = irq_desc + irq;
  83. unsigned long flags;
  84. spin_lock_irqsave(&irq_controller_lock, flags);
  85. if (!desc->depth++)
  86. desc->enabled = 0;
  87. spin_unlock_irqrestore(&irq_controller_lock, flags);
  88. }
  89. /**
  90. * enable_irq - enable interrupt handling on an irq
  91. * @irq: Interrupt to enable
  92. *
  93. * Re-enables the processing of interrupts on this IRQ line.
  94. * Note that this may call the interrupt handler, so you may
  95. * get unexpected results if you hold IRQs disabled.
  96. *
  97. * This function may be called from IRQ context.
  98. */
  99. void enable_irq(unsigned int irq)
  100. {
  101. struct irqdesc *desc = irq_desc + irq;
  102. unsigned long flags;
  103. int pending = 0;
  104. spin_lock_irqsave(&irq_controller_lock, flags);
  105. if (unlikely(!desc->depth)) {
  106. printk("enable_irq(%u) unbalanced from %p\n", irq,
  107. __builtin_return_address(0)); //FIXME bum addresses reported - why?
  108. } else if (!--desc->depth) {
  109. desc->probing = 0;
  110. desc->enabled = 1;
  111. desc->chip->unmask(irq);
  112. pending = desc->pending;
  113. desc->pending = 0;
  114. /*
  115. * If the interrupt was waiting to be processed,
  116. * retrigger it.
  117. */
  118. if (pending)
  119. desc->chip->rerun(irq);
  120. }
  121. spin_unlock_irqrestore(&irq_controller_lock, flags);
  122. }
  123. int show_interrupts(struct seq_file *p, void *v)
  124. {
  125. int i = *(loff_t *) v;
  126. struct irqaction * action;
  127. if (i < NR_IRQS) {
  128. action = irq_desc[i].action;
  129. if (!action)
  130. goto out;
  131. seq_printf(p, "%3d: %10u ", i, kstat_irqs(i));
  132. seq_printf(p, " %s", action->name);
  133. for (action = action->next; action; action = action->next) {
  134. seq_printf(p, ", %s", action->name);
  135. }
  136. seq_putc(p, '\n');
  137. } else if (i == NR_IRQS) {
  138. show_fiq_list(p, v);
  139. seq_printf(p, "Err: %10lu\n", irq_err_count);
  140. }
  141. out:
  142. return 0;
  143. }
  144. /*
  145. * IRQ lock detection.
  146. *
  147. * Hopefully, this should get us out of a few locked situations.
  148. * However, it may take a while for this to happen, since we need
  149. * a large number if IRQs to appear in the same jiffie with the
  150. * same instruction pointer (or within 2 instructions).
  151. */
  152. static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs)
  153. {
  154. unsigned long instr_ptr = instruction_pointer(regs);
  155. if (desc->lck_jif == jiffies &&
  156. desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) {
  157. desc->lck_cnt += 1;
  158. if (desc->lck_cnt > MAX_IRQ_CNT) {
  159. printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq);
  160. return 1;
  161. }
  162. } else {
  163. desc->lck_cnt = 0;
  164. desc->lck_pc = instruction_pointer(regs);
  165. desc->lck_jif = jiffies;
  166. }
  167. return 0;
  168. }
  169. static void
  170. __do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
  171. {
  172. unsigned int status;
  173. int ret;
  174. spin_unlock(&irq_controller_lock);
  175. if (!(action->flags & SA_INTERRUPT))
  176. local_irq_enable();
  177. status = 0;
  178. do {
  179. ret = action->handler(irq, action->dev_id, regs);
  180. if (ret == IRQ_HANDLED)
  181. status |= action->flags;
  182. action = action->next;
  183. } while (action);
  184. if (status & SA_SAMPLE_RANDOM)
  185. add_interrupt_randomness(irq);
  186. spin_lock_irq(&irq_controller_lock);
  187. }
  188. /*
  189. * This is for software-decoded IRQs. The caller is expected to
  190. * handle the ack, clear, mask and unmask issues.
  191. */
  192. void
  193. do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
  194. {
  195. struct irqaction *action;
  196. const int cpu = smp_processor_id();
  197. desc->triggered = 1;
  198. kstat_cpu(cpu).irqs[irq]++;
  199. action = desc->action;
  200. if (action)
  201. __do_irq(irq, desc->action, regs);
  202. }
  203. /*
  204. * Most edge-triggered IRQ implementations seem to take a broken
  205. * approach to this. Hence the complexity.
  206. */
  207. void
  208. do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
  209. {
  210. const int cpu = smp_processor_id();
  211. desc->triggered = 1;
  212. /*
  213. * If we're currently running this IRQ, or its disabled,
  214. * we shouldn't process the IRQ. Instead, turn on the
  215. * hardware masks.
  216. */
  217. if (unlikely(desc->running || !desc->enabled))
  218. goto running;
  219. /*
  220. * Acknowledge and clear the IRQ, but don't mask it.
  221. */
  222. desc->chip->ack(irq);
  223. /*
  224. * Mark the IRQ currently in progress.
  225. */
  226. desc->running = 1;
  227. kstat_cpu(cpu).irqs[irq]++;
  228. do {
  229. struct irqaction *action;
  230. action = desc->action;
  231. if (!action)
  232. break;
  233. if (desc->pending && desc->enabled) {
  234. desc->pending = 0;
  235. desc->chip->unmask(irq);
  236. }
  237. __do_irq(irq, action, regs);
  238. } while (desc->pending);
  239. desc->running = 0;
  240. /*
  241. * If we were disabled or freed, shut down the handler.
  242. */
  243. if (likely(desc->action && !check_irq_lock(desc, irq, regs)))
  244. return;
  245. running:
  246. /*
  247. * We got another IRQ while this one was masked or
  248. * currently running. Delay it.
  249. */
  250. desc->pending = 1;
  251. desc->chip->mask(irq);
  252. desc->chip->ack(irq);
  253. }
  254. /*
  255. * Level-based IRQ handler. Nice and simple.
  256. */
  257. void
  258. do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
  259. {
  260. struct irqaction *action;
  261. const int cpu = smp_processor_id();
  262. desc->triggered = 1;
  263. /*
  264. * Acknowledge, clear _AND_ disable the interrupt.
  265. */
  266. desc->chip->ack(irq);
  267. if (likely(desc->enabled)) {
  268. kstat_cpu(cpu).irqs[irq]++;
  269. /*
  270. * Return with this interrupt masked if no action
  271. */
  272. action = desc->action;
  273. if (action) {
  274. __do_irq(irq, desc->action, regs);
  275. if (likely(desc->enabled &&
  276. !check_irq_lock(desc, irq, regs)))
  277. desc->chip->unmask(irq);
  278. }
  279. }
  280. }
  281. /*
  282. * do_IRQ handles all hardware IRQ's. Decoded IRQs should not
  283. * come via this function. Instead, they should provide their
  284. * own 'handler'
  285. */
  286. asmlinkage void asm_do_IRQ(int irq, struct pt_regs *regs)
  287. {
  288. struct irqdesc *desc = irq_desc + irq;
  289. /*
  290. * Some hardware gives randomly wrong interrupts. Rather
  291. * than crashing, do something sensible.
  292. */
  293. if (irq >= NR_IRQS)
  294. desc = &bad_irq_desc;
  295. irq_enter();
  296. spin_lock(&irq_controller_lock);
  297. desc->handle(irq, desc, regs);
  298. spin_unlock(&irq_controller_lock);
  299. irq_exit();
  300. }
  301. void __set_irq_handler(unsigned int irq, irq_handler_t handle, int is_chained)
  302. {
  303. struct irqdesc *desc;
  304. unsigned long flags;
  305. if (irq >= NR_IRQS) {
  306. printk(KERN_ERR "Trying to install handler for IRQ%d\n", irq);
  307. return;
  308. }
  309. if (handle == NULL)
  310. handle = do_bad_IRQ;
  311. desc = irq_desc + irq;
  312. if (is_chained && desc->chip == &bad_chip)
  313. printk(KERN_WARNING "Trying to install chained handler for IRQ%d\n", irq);
  314. spin_lock_irqsave(&irq_controller_lock, flags);
  315. if (handle == do_bad_IRQ) {
  316. desc->chip->mask(irq);
  317. desc->chip->ack(irq);
  318. desc->depth = 1;
  319. desc->enabled = 0;
  320. }
  321. desc->handle = handle;
  322. if (handle != do_bad_IRQ && is_chained) {
  323. desc->valid = 0;
  324. desc->probe_ok = 0;
  325. desc->depth = 0;
  326. desc->chip->unmask(irq);
  327. }
  328. spin_unlock_irqrestore(&irq_controller_lock, flags);
  329. }
  330. void set_irq_chip(unsigned int irq, struct irqchip *chip)
  331. {
  332. struct irqdesc *desc;
  333. unsigned long flags;
  334. if (irq >= NR_IRQS) {
  335. printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
  336. return;
  337. }
  338. if (chip == NULL)
  339. chip = &bad_chip;
  340. desc = irq_desc + irq;
  341. spin_lock_irqsave(&irq_controller_lock, flags);
  342. desc->chip = chip;
  343. spin_unlock_irqrestore(&irq_controller_lock, flags);
  344. }
  345. int set_irq_type(unsigned int irq, unsigned int type)
  346. {
  347. struct irqdesc *desc;
  348. unsigned long flags;
  349. int ret = -ENXIO;
  350. if (irq >= NR_IRQS) {
  351. printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
  352. return -ENODEV;
  353. }
  354. desc = irq_desc + irq;
  355. if (desc->chip->type) {
  356. spin_lock_irqsave(&irq_controller_lock, flags);
  357. ret = desc->chip->type(irq, type);
  358. spin_unlock_irqrestore(&irq_controller_lock, flags);
  359. }
  360. return ret;
  361. }
  362. void set_irq_flags(unsigned int irq, unsigned int iflags)
  363. {
  364. struct irqdesc *desc;
  365. unsigned long flags;
  366. if (irq >= NR_IRQS) {
  367. printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
  368. return;
  369. }
  370. desc = irq_desc + irq;
  371. spin_lock_irqsave(&irq_controller_lock, flags);
  372. desc->valid = (iflags & IRQF_VALID) != 0;
  373. desc->probe_ok = (iflags & IRQF_PROBE) != 0;
  374. desc->noautoenable = (iflags & IRQF_NOAUTOEN) != 0;
  375. spin_unlock_irqrestore(&irq_controller_lock, flags);
  376. }
  377. int setup_irq(unsigned int irq, struct irqaction *new)
  378. {
  379. int shared = 0;
  380. struct irqaction *old, **p;
  381. unsigned long flags;
  382. struct irqdesc *desc;
  383. /*
  384. * Some drivers like serial.c use request_irq() heavily,
  385. * so we have to be careful not to interfere with a
  386. * running system.
  387. */
  388. if (new->flags & SA_SAMPLE_RANDOM) {
  389. /*
  390. * This function might sleep, we want to call it first,
  391. * outside of the atomic block.
  392. * Yes, this might clear the entropy pool if the wrong
  393. * driver is attempted to be loaded, without actually
  394. * installing a new handler, but is this really a problem,
  395. * only the sysadmin is able to do this.
  396. */
  397. rand_initialize_irq(irq);
  398. }
  399. /*
  400. * The following block of code has to be executed atomically
  401. */
  402. desc = irq_desc + irq;
  403. spin_lock_irqsave(&irq_controller_lock, flags);
  404. p = &desc->action;
  405. if ((old = *p) != NULL) {
  406. /* Can't share interrupts unless both agree to */
  407. if (!(old->flags & new->flags & SA_SHIRQ)) {
  408. spin_unlock_irqrestore(&irq_controller_lock, flags);
  409. return -EBUSY;
  410. }
  411. /* add new interrupt at end of irq queue */
  412. do {
  413. p = &old->next;
  414. old = *p;
  415. } while (old);
  416. shared = 1;
  417. }
  418. *p = new;
  419. if (!shared) {
  420. desc->probing = 0;
  421. desc->running = 0;
  422. desc->pending = 0;
  423. desc->depth = 1;
  424. if (!desc->noautoenable) {
  425. desc->depth = 0;
  426. desc->enabled = 1;
  427. desc->chip->unmask(irq);
  428. }
  429. }
  430. spin_unlock_irqrestore(&irq_controller_lock, flags);
  431. return 0;
  432. }
  433. /**
  434. * request_irq - allocate an interrupt line
  435. * @irq: Interrupt line to allocate
  436. * @handler: Function to be called when the IRQ occurs
  437. * @irqflags: Interrupt type flags
  438. * @devname: An ascii name for the claiming device
  439. * @dev_id: A cookie passed back to the handler function
  440. *
  441. * This call allocates interrupt resources and enables the
  442. * interrupt line and IRQ handling. From the point this
  443. * call is made your handler function may be invoked. Since
  444. * your handler function must clear any interrupt the board
  445. * raises, you must take care both to initialise your hardware
  446. * and to set up the interrupt handler in the right order.
  447. *
  448. * Dev_id must be globally unique. Normally the address of the
  449. * device data structure is used as the cookie. Since the handler
  450. * receives this value it makes sense to use it.
  451. *
  452. * If your interrupt is shared you must pass a non NULL dev_id
  453. * as this is required when freeing the interrupt.
  454. *
  455. * Flags:
  456. *
  457. * SA_SHIRQ Interrupt is shared
  458. *
  459. * SA_INTERRUPT Disable local interrupts while processing
  460. *
  461. * SA_SAMPLE_RANDOM The interrupt can be used for entropy
  462. *
  463. */
  464. //FIXME - handler used to return void - whats the significance of the change?
  465. int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
  466. unsigned long irq_flags, const char * devname, void *dev_id)
  467. {
  468. unsigned long retval;
  469. struct irqaction *action;
  470. if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler ||
  471. (irq_flags & SA_SHIRQ && !dev_id))
  472. return -EINVAL;
  473. action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
  474. if (!action)
  475. return -ENOMEM;
  476. action->handler = handler;
  477. action->flags = irq_flags;
  478. cpus_clear(action->mask);
  479. action->name = devname;
  480. action->next = NULL;
  481. action->dev_id = dev_id;
  482. retval = setup_irq(irq, action);
  483. if (retval)
  484. kfree(action);
  485. return retval;
  486. }
  487. EXPORT_SYMBOL(request_irq);
  488. /**
  489. * free_irq - free an interrupt
  490. * @irq: Interrupt line to free
  491. * @dev_id: Device identity to free
  492. *
  493. * Remove an interrupt handler. The handler is removed and if the
  494. * interrupt line is no longer in use by any driver it is disabled.
  495. * On a shared IRQ the caller must ensure the interrupt is disabled
  496. * on the card it drives before calling this function.
  497. *
  498. * This function may be called from interrupt context.
  499. */
  500. void free_irq(unsigned int irq, void *dev_id)
  501. {
  502. struct irqaction * action, **p;
  503. unsigned long flags;
  504. if (irq >= NR_IRQS || !irq_desc[irq].valid) {
  505. printk(KERN_ERR "Trying to free IRQ%d\n",irq);
  506. #ifdef CONFIG_DEBUG_ERRORS
  507. __backtrace();
  508. #endif
  509. return;
  510. }
  511. spin_lock_irqsave(&irq_controller_lock, flags);
  512. for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
  513. if (action->dev_id != dev_id)
  514. continue;
  515. /* Found it - now free it */
  516. *p = action->next;
  517. kfree(action);
  518. goto out;
  519. }
  520. printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
  521. #ifdef CONFIG_DEBUG_ERRORS
  522. __backtrace();
  523. #endif
  524. out:
  525. spin_unlock_irqrestore(&irq_controller_lock, flags);
  526. }
  527. EXPORT_SYMBOL(free_irq);
  528. /* Start the interrupt probing. Unlike other architectures,
  529. * we don't return a mask of interrupts from probe_irq_on,
  530. * but return the number of interrupts enabled for the probe.
  531. * The interrupts which have been enabled for probing is
  532. * instead recorded in the irq_desc structure.
  533. */
  534. unsigned long probe_irq_on(void)
  535. {
  536. unsigned int i, irqs = 0;
  537. unsigned long delay;
  538. /*
  539. * first snaffle up any unassigned but
  540. * probe-able interrupts
  541. */
  542. spin_lock_irq(&irq_controller_lock);
  543. for (i = 0; i < NR_IRQS; i++) {
  544. if (!irq_desc[i].probe_ok || irq_desc[i].action)
  545. continue;
  546. irq_desc[i].probing = 1;
  547. irq_desc[i].triggered = 0;
  548. if (irq_desc[i].chip->type)
  549. irq_desc[i].chip->type(i, IRQT_PROBE);
  550. irq_desc[i].chip->unmask(i);
  551. irqs += 1;
  552. }
  553. spin_unlock_irq(&irq_controller_lock);
  554. /*
  555. * wait for spurious interrupts to mask themselves out again
  556. */
  557. for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
  558. /* min 100ms delay */;
  559. /*
  560. * now filter out any obviously spurious interrupts
  561. */
  562. spin_lock_irq(&irq_controller_lock);
  563. for (i = 0; i < NR_IRQS; i++) {
  564. if (irq_desc[i].probing && irq_desc[i].triggered) {
  565. irq_desc[i].probing = 0;
  566. irqs -= 1;
  567. }
  568. }
  569. spin_unlock_irq(&irq_controller_lock);
  570. return irqs;
  571. }
  572. EXPORT_SYMBOL(probe_irq_on);
  573. /*
  574. * Possible return values:
  575. * >= 0 - interrupt number
  576. * -1 - no interrupt/many interrupts
  577. */
  578. int probe_irq_off(unsigned long irqs)
  579. {
  580. unsigned int i;
  581. int irq_found = NO_IRQ;
  582. /*
  583. * look at the interrupts, and find exactly one
  584. * that we were probing has been triggered
  585. */
  586. spin_lock_irq(&irq_controller_lock);
  587. for (i = 0; i < NR_IRQS; i++) {
  588. if (irq_desc[i].probing &&
  589. irq_desc[i].triggered) {
  590. if (irq_found != NO_IRQ) {
  591. irq_found = NO_IRQ;
  592. goto out;
  593. }
  594. irq_found = i;
  595. }
  596. }
  597. if (irq_found == -1)
  598. irq_found = NO_IRQ;
  599. out:
  600. spin_unlock_irq(&irq_controller_lock);
  601. return irq_found;
  602. }
  603. EXPORT_SYMBOL(probe_irq_off);
  604. void __init init_irq_proc(void)
  605. {
  606. }
  607. void __init init_IRQ(void)
  608. {
  609. struct irqdesc *desc;
  610. extern void init_dma(void);
  611. int irq;
  612. for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++)
  613. *desc = bad_irq_desc;
  614. arc_init_irq();
  615. init_dma();
  616. }