chip.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691
  1. /*
  2. * linux/kernel/irq/chip.c
  3. *
  4. * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
  5. * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
  6. *
  7. * This file contains the core interrupt handling code, for irq-chip
  8. * based architectures.
  9. *
  10. * Detailed information is available in Documentation/DocBook/genericirq
  11. */
  12. #include <linux/irq.h>
  13. #include <linux/msi.h>
  14. #include <linux/module.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/kernel_stat.h>
  17. #include "internals.h"
  18. /**
  19. * irq_set_chip - set the irq chip for an irq
  20. * @irq: irq number
  21. * @chip: pointer to irq chip description structure
  22. */
  23. int irq_set_chip(unsigned int irq, struct irq_chip *chip)
  24. {
  25. unsigned long flags;
  26. struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
  27. if (!desc)
  28. return -EINVAL;
  29. if (!chip)
  30. chip = &no_irq_chip;
  31. irq_chip_set_defaults(chip);
  32. desc->irq_data.chip = chip;
  33. irq_put_desc_unlock(desc, flags);
  34. return 0;
  35. }
  36. EXPORT_SYMBOL(irq_set_chip);
  37. /**
  38. * irq_set_type - set the irq trigger type for an irq
  39. * @irq: irq number
  40. * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
  41. */
  42. int irq_set_irq_type(unsigned int irq, unsigned int type)
  43. {
  44. unsigned long flags;
  45. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
  46. int ret = 0;
  47. if (!desc)
  48. return -EINVAL;
  49. type &= IRQ_TYPE_SENSE_MASK;
  50. if (type != IRQ_TYPE_NONE)
  51. ret = __irq_set_trigger(desc, irq, type);
  52. irq_put_desc_busunlock(desc, flags);
  53. return ret;
  54. }
  55. EXPORT_SYMBOL(irq_set_irq_type);
  56. /**
  57. * irq_set_handler_data - set irq handler data for an irq
  58. * @irq: Interrupt number
  59. * @data: Pointer to interrupt specific data
  60. *
  61. * Set the hardware irq controller data for an irq
  62. */
  63. int irq_set_handler_data(unsigned int irq, void *data)
  64. {
  65. unsigned long flags;
  66. struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
  67. if (!desc)
  68. return -EINVAL;
  69. desc->irq_data.handler_data = data;
  70. irq_put_desc_unlock(desc, flags);
  71. return 0;
  72. }
  73. EXPORT_SYMBOL(irq_set_handler_data);
  74. /**
  75. * irq_set_msi_desc - set MSI descriptor data for an irq
  76. * @irq: Interrupt number
  77. * @entry: Pointer to MSI descriptor data
  78. *
  79. * Set the MSI descriptor entry for an irq
  80. */
  81. int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
  82. {
  83. unsigned long flags;
  84. struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
  85. if (!desc)
  86. return -EINVAL;
  87. desc->irq_data.msi_desc = entry;
  88. if (entry)
  89. entry->irq = irq;
  90. irq_put_desc_unlock(desc, flags);
  91. return 0;
  92. }
  93. /**
  94. * irq_set_chip_data - set irq chip data for an irq
  95. * @irq: Interrupt number
  96. * @data: Pointer to chip specific data
  97. *
  98. * Set the hardware irq chip data for an irq
  99. */
  100. int irq_set_chip_data(unsigned int irq, void *data)
  101. {
  102. unsigned long flags;
  103. struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
  104. if (!desc)
  105. return -EINVAL;
  106. desc->irq_data.chip_data = data;
  107. irq_put_desc_unlock(desc, flags);
  108. return 0;
  109. }
  110. EXPORT_SYMBOL(irq_set_chip_data);
  111. struct irq_data *irq_get_irq_data(unsigned int irq)
  112. {
  113. struct irq_desc *desc = irq_to_desc(irq);
  114. return desc ? &desc->irq_data : NULL;
  115. }
  116. EXPORT_SYMBOL_GPL(irq_get_irq_data);
  117. static void irq_state_clr_disabled(struct irq_desc *desc)
  118. {
  119. desc->istate &= ~IRQS_DISABLED;
  120. irq_compat_clr_disabled(desc);
  121. }
  122. static void irq_state_set_disabled(struct irq_desc *desc)
  123. {
  124. desc->istate |= IRQS_DISABLED;
  125. irq_compat_set_disabled(desc);
  126. }
  127. static void irq_state_clr_masked(struct irq_desc *desc)
  128. {
  129. desc->istate &= ~IRQS_MASKED;
  130. irq_compat_clr_masked(desc);
  131. }
  132. static void irq_state_set_masked(struct irq_desc *desc)
  133. {
  134. desc->istate |= IRQS_MASKED;
  135. irq_compat_set_masked(desc);
  136. }
  137. int irq_startup(struct irq_desc *desc)
  138. {
  139. irq_state_clr_disabled(desc);
  140. desc->depth = 0;
  141. if (desc->irq_data.chip->irq_startup) {
  142. int ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
  143. irq_state_clr_masked(desc);
  144. return ret;
  145. }
  146. irq_enable(desc);
  147. return 0;
  148. }
  149. void irq_shutdown(struct irq_desc *desc)
  150. {
  151. irq_state_set_disabled(desc);
  152. desc->depth = 1;
  153. if (desc->irq_data.chip->irq_shutdown)
  154. desc->irq_data.chip->irq_shutdown(&desc->irq_data);
  155. if (desc->irq_data.chip->irq_disable)
  156. desc->irq_data.chip->irq_disable(&desc->irq_data);
  157. else
  158. desc->irq_data.chip->irq_mask(&desc->irq_data);
  159. irq_state_set_masked(desc);
  160. }
  161. void irq_enable(struct irq_desc *desc)
  162. {
  163. irq_state_clr_disabled(desc);
  164. if (desc->irq_data.chip->irq_enable)
  165. desc->irq_data.chip->irq_enable(&desc->irq_data);
  166. else
  167. desc->irq_data.chip->irq_unmask(&desc->irq_data);
  168. irq_state_clr_masked(desc);
  169. }
  170. void irq_disable(struct irq_desc *desc)
  171. {
  172. irq_state_set_disabled(desc);
  173. if (desc->irq_data.chip->irq_disable) {
  174. desc->irq_data.chip->irq_disable(&desc->irq_data);
  175. irq_state_set_masked(desc);
  176. }
  177. }
  178. #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
  179. /* Temporary migration helpers */
  180. static void compat_irq_mask(struct irq_data *data)
  181. {
  182. data->chip->mask(data->irq);
  183. }
  184. static void compat_irq_unmask(struct irq_data *data)
  185. {
  186. data->chip->unmask(data->irq);
  187. }
  188. static void compat_irq_ack(struct irq_data *data)
  189. {
  190. data->chip->ack(data->irq);
  191. }
  192. static void compat_irq_mask_ack(struct irq_data *data)
  193. {
  194. data->chip->mask_ack(data->irq);
  195. }
  196. static void compat_irq_eoi(struct irq_data *data)
  197. {
  198. data->chip->eoi(data->irq);
  199. }
  200. static void compat_irq_enable(struct irq_data *data)
  201. {
  202. data->chip->enable(data->irq);
  203. }
  204. static void compat_irq_disable(struct irq_data *data)
  205. {
  206. data->chip->disable(data->irq);
  207. }
  208. static void compat_irq_shutdown(struct irq_data *data)
  209. {
  210. data->chip->shutdown(data->irq);
  211. }
  212. static unsigned int compat_irq_startup(struct irq_data *data)
  213. {
  214. return data->chip->startup(data->irq);
  215. }
  216. static int compat_irq_set_affinity(struct irq_data *data,
  217. const struct cpumask *dest, bool force)
  218. {
  219. return data->chip->set_affinity(data->irq, dest);
  220. }
  221. static int compat_irq_set_type(struct irq_data *data, unsigned int type)
  222. {
  223. return data->chip->set_type(data->irq, type);
  224. }
  225. static int compat_irq_set_wake(struct irq_data *data, unsigned int on)
  226. {
  227. return data->chip->set_wake(data->irq, on);
  228. }
  229. static int compat_irq_retrigger(struct irq_data *data)
  230. {
  231. return data->chip->retrigger(data->irq);
  232. }
  233. static void compat_bus_lock(struct irq_data *data)
  234. {
  235. data->chip->bus_lock(data->irq);
  236. }
  237. static void compat_bus_sync_unlock(struct irq_data *data)
  238. {
  239. data->chip->bus_sync_unlock(data->irq);
  240. }
  241. #endif
  242. /*
  243. * Fixup enable/disable function pointers
  244. */
  245. void irq_chip_set_defaults(struct irq_chip *chip)
  246. {
  247. #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
  248. if (chip->enable)
  249. chip->irq_enable = compat_irq_enable;
  250. if (chip->disable)
  251. chip->irq_disable = compat_irq_disable;
  252. if (chip->shutdown)
  253. chip->irq_shutdown = compat_irq_shutdown;
  254. if (chip->startup)
  255. chip->irq_startup = compat_irq_startup;
  256. if (!chip->end)
  257. chip->end = dummy_irq_chip.end;
  258. if (chip->bus_lock)
  259. chip->irq_bus_lock = compat_bus_lock;
  260. if (chip->bus_sync_unlock)
  261. chip->irq_bus_sync_unlock = compat_bus_sync_unlock;
  262. if (chip->mask)
  263. chip->irq_mask = compat_irq_mask;
  264. if (chip->unmask)
  265. chip->irq_unmask = compat_irq_unmask;
  266. if (chip->ack)
  267. chip->irq_ack = compat_irq_ack;
  268. if (chip->mask_ack)
  269. chip->irq_mask_ack = compat_irq_mask_ack;
  270. if (chip->eoi)
  271. chip->irq_eoi = compat_irq_eoi;
  272. if (chip->set_affinity)
  273. chip->irq_set_affinity = compat_irq_set_affinity;
  274. if (chip->set_type)
  275. chip->irq_set_type = compat_irq_set_type;
  276. if (chip->set_wake)
  277. chip->irq_set_wake = compat_irq_set_wake;
  278. if (chip->retrigger)
  279. chip->irq_retrigger = compat_irq_retrigger;
  280. #endif
  281. }
  282. static inline void mask_ack_irq(struct irq_desc *desc)
  283. {
  284. if (desc->irq_data.chip->irq_mask_ack)
  285. desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
  286. else {
  287. desc->irq_data.chip->irq_mask(&desc->irq_data);
  288. if (desc->irq_data.chip->irq_ack)
  289. desc->irq_data.chip->irq_ack(&desc->irq_data);
  290. }
  291. irq_state_set_masked(desc);
  292. }
  293. void mask_irq(struct irq_desc *desc)
  294. {
  295. if (desc->irq_data.chip->irq_mask) {
  296. desc->irq_data.chip->irq_mask(&desc->irq_data);
  297. irq_state_set_masked(desc);
  298. }
  299. }
  300. void unmask_irq(struct irq_desc *desc)
  301. {
  302. if (desc->irq_data.chip->irq_unmask) {
  303. desc->irq_data.chip->irq_unmask(&desc->irq_data);
  304. irq_state_clr_masked(desc);
  305. }
  306. }
  307. /*
  308. * handle_nested_irq - Handle a nested irq from a irq thread
  309. * @irq: the interrupt number
  310. *
  311. * Handle interrupts which are nested into a threaded interrupt
  312. * handler. The handler function is called inside the calling
  313. * threads context.
  314. */
  315. void handle_nested_irq(unsigned int irq)
  316. {
  317. struct irq_desc *desc = irq_to_desc(irq);
  318. struct irqaction *action;
  319. irqreturn_t action_ret;
  320. might_sleep();
  321. raw_spin_lock_irq(&desc->lock);
  322. kstat_incr_irqs_this_cpu(irq, desc);
  323. action = desc->action;
  324. if (unlikely(!action || (desc->istate & IRQS_DISABLED)))
  325. goto out_unlock;
  326. irq_compat_set_progress(desc);
  327. desc->istate |= IRQS_INPROGRESS;
  328. raw_spin_unlock_irq(&desc->lock);
  329. action_ret = action->thread_fn(action->irq, action->dev_id);
  330. if (!noirqdebug)
  331. note_interrupt(irq, desc, action_ret);
  332. raw_spin_lock_irq(&desc->lock);
  333. desc->istate &= ~IRQS_INPROGRESS;
  334. irq_compat_clr_progress(desc);
  335. out_unlock:
  336. raw_spin_unlock_irq(&desc->lock);
  337. }
  338. EXPORT_SYMBOL_GPL(handle_nested_irq);
  339. static bool irq_check_poll(struct irq_desc *desc)
  340. {
  341. if (!(desc->istate & IRQS_POLL_INPROGRESS))
  342. return false;
  343. return irq_wait_for_poll(desc);
  344. }
  345. /**
  346. * handle_simple_irq - Simple and software-decoded IRQs.
  347. * @irq: the interrupt number
  348. * @desc: the interrupt description structure for this irq
  349. *
  350. * Simple interrupts are either sent from a demultiplexing interrupt
  351. * handler or come from hardware, where no interrupt hardware control
  352. * is necessary.
  353. *
  354. * Note: The caller is expected to handle the ack, clear, mask and
  355. * unmask issues if necessary.
  356. */
  357. void
  358. handle_simple_irq(unsigned int irq, struct irq_desc *desc)
  359. {
  360. raw_spin_lock(&desc->lock);
  361. if (unlikely(desc->istate & IRQS_INPROGRESS))
  362. if (!irq_check_poll(desc))
  363. goto out_unlock;
  364. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  365. kstat_incr_irqs_this_cpu(irq, desc);
  366. if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED)))
  367. goto out_unlock;
  368. handle_irq_event(desc);
  369. out_unlock:
  370. raw_spin_unlock(&desc->lock);
  371. }
  372. /**
  373. * handle_level_irq - Level type irq handler
  374. * @irq: the interrupt number
  375. * @desc: the interrupt description structure for this irq
  376. *
  377. * Level type interrupts are active as long as the hardware line has
  378. * the active level. This may require to mask the interrupt and unmask
  379. * it after the associated handler has acknowledged the device, so the
  380. * interrupt line is back to inactive.
  381. */
  382. void
  383. handle_level_irq(unsigned int irq, struct irq_desc *desc)
  384. {
  385. raw_spin_lock(&desc->lock);
  386. mask_ack_irq(desc);
  387. if (unlikely(desc->istate & IRQS_INPROGRESS))
  388. if (!irq_check_poll(desc))
  389. goto out_unlock;
  390. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  391. kstat_incr_irqs_this_cpu(irq, desc);
  392. /*
  393. * If its disabled or no action available
  394. * keep it masked and get out of here
  395. */
  396. if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED)))
  397. goto out_unlock;
  398. handle_irq_event(desc);
  399. if (!(desc->istate & (IRQS_DISABLED | IRQS_ONESHOT)))
  400. unmask_irq(desc);
  401. out_unlock:
  402. raw_spin_unlock(&desc->lock);
  403. }
  404. EXPORT_SYMBOL_GPL(handle_level_irq);
  405. #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
  406. static inline void preflow_handler(struct irq_desc *desc)
  407. {
  408. if (desc->preflow_handler)
  409. desc->preflow_handler(&desc->irq_data);
  410. }
  411. #else
  412. static inline void preflow_handler(struct irq_desc *desc) { }
  413. #endif
  414. /**
  415. * handle_fasteoi_irq - irq handler for transparent controllers
  416. * @irq: the interrupt number
  417. * @desc: the interrupt description structure for this irq
  418. *
  419. * Only a single callback will be issued to the chip: an ->eoi()
  420. * call when the interrupt has been serviced. This enables support
  421. * for modern forms of interrupt handlers, which handle the flow
  422. * details in hardware, transparently.
  423. */
  424. void
  425. handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
  426. {
  427. raw_spin_lock(&desc->lock);
  428. if (unlikely(desc->istate & IRQS_INPROGRESS))
  429. if (!irq_check_poll(desc))
  430. goto out;
  431. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  432. kstat_incr_irqs_this_cpu(irq, desc);
  433. /*
  434. * If its disabled or no action available
  435. * then mask it and get out of here:
  436. */
  437. if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) {
  438. irq_compat_set_pending(desc);
  439. desc->istate |= IRQS_PENDING;
  440. mask_irq(desc);
  441. goto out;
  442. }
  443. if (desc->istate & IRQS_ONESHOT)
  444. mask_irq(desc);
  445. preflow_handler(desc);
  446. handle_irq_event(desc);
  447. out_eoi:
  448. desc->irq_data.chip->irq_eoi(&desc->irq_data);
  449. out_unlock:
  450. raw_spin_unlock(&desc->lock);
  451. return;
  452. out:
  453. if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED))
  454. goto out_eoi;
  455. goto out_unlock;
  456. }
  457. /**
  458. * handle_edge_irq - edge type IRQ handler
  459. * @irq: the interrupt number
  460. * @desc: the interrupt description structure for this irq
  461. *
  462. * Interrupt occures on the falling and/or rising edge of a hardware
  463. * signal. The occurence is latched into the irq controller hardware
  464. * and must be acked in order to be reenabled. After the ack another
  465. * interrupt can happen on the same source even before the first one
  466. * is handled by the associated event handler. If this happens it
  467. * might be necessary to disable (mask) the interrupt depending on the
  468. * controller hardware. This requires to reenable the interrupt inside
  469. * of the loop which handles the interrupts which have arrived while
  470. * the handler was running. If all pending interrupts are handled, the
  471. * loop is left.
  472. */
  473. void
  474. handle_edge_irq(unsigned int irq, struct irq_desc *desc)
  475. {
  476. raw_spin_lock(&desc->lock);
  477. desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
  478. /*
  479. * If we're currently running this IRQ, or its disabled,
  480. * we shouldn't process the IRQ. Mark it pending, handle
  481. * the necessary masking and go out
  482. */
  483. if (unlikely((desc->istate & (IRQS_DISABLED | IRQS_INPROGRESS) ||
  484. !desc->action))) {
  485. if (!irq_check_poll(desc)) {
  486. irq_compat_set_pending(desc);
  487. desc->istate |= IRQS_PENDING;
  488. mask_ack_irq(desc);
  489. goto out_unlock;
  490. }
  491. }
  492. kstat_incr_irqs_this_cpu(irq, desc);
  493. /* Start handling the irq */
  494. desc->irq_data.chip->irq_ack(&desc->irq_data);
  495. do {
  496. if (unlikely(!desc->action)) {
  497. mask_irq(desc);
  498. goto out_unlock;
  499. }
  500. /*
  501. * When another irq arrived while we were handling
  502. * one, we could have masked the irq.
  503. * Renable it, if it was not disabled in meantime.
  504. */
  505. if (unlikely(desc->istate & IRQS_PENDING)) {
  506. if (!(desc->istate & IRQS_DISABLED) &&
  507. (desc->istate & IRQS_MASKED))
  508. unmask_irq(desc);
  509. }
  510. handle_irq_event(desc);
  511. } while ((desc->istate & IRQS_PENDING) &&
  512. !(desc->istate & IRQS_DISABLED));
  513. out_unlock:
  514. raw_spin_unlock(&desc->lock);
  515. }
  516. /**
  517. * handle_percpu_irq - Per CPU local irq handler
  518. * @irq: the interrupt number
  519. * @desc: the interrupt description structure for this irq
  520. *
  521. * Per CPU interrupts on SMP machines without locking requirements
  522. */
  523. void
  524. handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
  525. {
  526. struct irq_chip *chip = irq_desc_get_chip(desc);
  527. kstat_incr_irqs_this_cpu(irq, desc);
  528. if (chip->irq_ack)
  529. chip->irq_ack(&desc->irq_data);
  530. handle_irq_event_percpu(desc, desc->action);
  531. if (chip->irq_eoi)
  532. chip->irq_eoi(&desc->irq_data);
  533. }
  534. void
  535. __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
  536. const char *name)
  537. {
  538. unsigned long flags;
  539. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
  540. if (!desc)
  541. return;
  542. if (!handle) {
  543. handle = handle_bad_irq;
  544. } else {
  545. if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
  546. goto out;
  547. }
  548. /* Uninstall? */
  549. if (handle == handle_bad_irq) {
  550. if (desc->irq_data.chip != &no_irq_chip)
  551. mask_ack_irq(desc);
  552. irq_compat_set_disabled(desc);
  553. desc->istate |= IRQS_DISABLED;
  554. desc->depth = 1;
  555. }
  556. desc->handle_irq = handle;
  557. desc->name = name;
  558. if (handle != handle_bad_irq && is_chained) {
  559. irq_settings_set_noprobe(desc);
  560. irq_settings_set_norequest(desc);
  561. irq_startup(desc);
  562. }
  563. out:
  564. irq_put_desc_busunlock(desc, flags);
  565. }
  566. EXPORT_SYMBOL_GPL(__irq_set_handler);
  567. void
  568. irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
  569. irq_flow_handler_t handle, const char *name)
  570. {
  571. irq_set_chip(irq, chip);
  572. __irq_set_handler(irq, handle, 0, name);
  573. }
  574. void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
  575. {
  576. unsigned long flags;
  577. struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
  578. if (!desc)
  579. return;
  580. irq_settings_clr_and_set(desc, clr, set);
  581. irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
  582. IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
  583. if (irq_settings_has_no_balance_set(desc))
  584. irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
  585. if (irq_settings_is_per_cpu(desc))
  586. irqd_set(&desc->irq_data, IRQD_PER_CPU);
  587. if (irq_settings_can_move_pcntxt(desc))
  588. irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
  589. irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
  590. irq_put_desc_unlock(desc, flags);
  591. }