manage.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299
  1. /*
  2. * linux/kernel/irq/manage.c
  3. *
  4. * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
  5. * Copyright (C) 2005-2006 Thomas Gleixner
  6. *
  7. * This file contains driver APIs to the irq subsystem.
  8. */
  9. #include <linux/irq.h>
  10. #include <linux/kthread.h>
  11. #include <linux/module.h>
  12. #include <linux/random.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/slab.h>
  15. #include <linux/sched.h>
  16. #include "internals.h"
  17. /**
  18. * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
  19. * @irq: interrupt number to wait for
  20. *
  21. * This function waits for any pending IRQ handlers for this interrupt
  22. * to complete before returning. If you use this function while
  23. * holding a resource the IRQ handler may need you will deadlock.
  24. *
  25. * This function may be called - with care - from IRQ context.
  26. */
  27. void synchronize_irq(unsigned int irq)
  28. {
  29. struct irq_desc *desc = irq_to_desc(irq);
  30. unsigned int status;
  31. if (!desc)
  32. return;
  33. do {
  34. unsigned long flags;
  35. /*
  36. * Wait until we're out of the critical section. This might
  37. * give the wrong answer due to the lack of memory barriers.
  38. */
  39. while (desc->status & IRQ_INPROGRESS)
  40. cpu_relax();
  41. /* Ok, that indicated we're done: double-check carefully. */
  42. raw_spin_lock_irqsave(&desc->lock, flags);
  43. status = desc->status;
  44. raw_spin_unlock_irqrestore(&desc->lock, flags);
  45. /* Oops, that failed? */
  46. } while (status & IRQ_INPROGRESS);
  47. /*
  48. * We made sure that no hardirq handler is running. Now verify
  49. * that no threaded handlers are active.
  50. */
  51. wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
  52. }
  53. EXPORT_SYMBOL(synchronize_irq);
  54. #ifdef CONFIG_SMP
  55. cpumask_var_t irq_default_affinity;
  56. /**
  57. * irq_can_set_affinity - Check if the affinity of a given irq can be set
  58. * @irq: Interrupt to check
  59. *
  60. */
  61. int irq_can_set_affinity(unsigned int irq)
  62. {
  63. struct irq_desc *desc = irq_to_desc(irq);
  64. if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip ||
  65. !desc->irq_data.chip->irq_set_affinity)
  66. return 0;
  67. return 1;
  68. }
  69. /**
  70. * irq_set_thread_affinity - Notify irq threads to adjust affinity
  71. * @desc: irq descriptor which has affitnity changed
  72. *
  73. * We just set IRQTF_AFFINITY and delegate the affinity setting
  74. * to the interrupt thread itself. We can not call
  75. * set_cpus_allowed_ptr() here as we hold desc->lock and this
  76. * code can be called from hard interrupt context.
  77. */
  78. void irq_set_thread_affinity(struct irq_desc *desc)
  79. {
  80. struct irqaction *action = desc->action;
  81. while (action) {
  82. if (action->thread)
  83. set_bit(IRQTF_AFFINITY, &action->thread_flags);
  84. action = action->next;
  85. }
  86. }
  87. #ifdef CONFIG_GENERIC_PENDING_IRQ
  88. static inline bool irq_can_move_pcntxt(struct irq_desc *desc)
  89. {
  90. return desc->status & IRQ_MOVE_PCNTXT;
  91. }
  92. static inline bool irq_move_pending(struct irq_desc *desc)
  93. {
  94. return desc->status & IRQ_MOVE_PENDING;
  95. }
  96. static inline void
  97. irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
  98. {
  99. cpumask_copy(desc->pending_mask, mask);
  100. }
  101. static inline void
  102. irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
  103. {
  104. cpumask_copy(mask, desc->pending_mask);
  105. }
  106. #else
  107. static inline bool irq_can_move_pcntxt(struct irq_desc *desc) { return true; }
  108. static inline bool irq_move_pending(struct irq_desc *desc) { return false; }
  109. static inline void
  110. irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
  111. static inline void
  112. irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
  113. #endif
  114. /**
  115. * irq_set_affinity - Set the irq affinity of a given irq
  116. * @irq: Interrupt to set affinity
  117. * @cpumask: cpumask
  118. *
  119. */
  120. int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
  121. {
  122. struct irq_desc *desc = irq_to_desc(irq);
  123. struct irq_chip *chip = desc->irq_data.chip;
  124. unsigned long flags;
  125. int ret = 0;
  126. if (!chip->irq_set_affinity)
  127. return -EINVAL;
  128. raw_spin_lock_irqsave(&desc->lock, flags);
  129. if (irq_can_move_pcntxt(desc)) {
  130. ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
  131. switch (ret) {
  132. case IRQ_SET_MASK_OK:
  133. cpumask_copy(desc->irq_data.affinity, mask);
  134. case IRQ_SET_MASK_OK_NOCOPY:
  135. irq_set_thread_affinity(desc);
  136. ret = 0;
  137. }
  138. } else {
  139. desc->status |= IRQ_MOVE_PENDING;
  140. irq_copy_pending(desc, mask);
  141. }
  142. if (desc->affinity_notify) {
  143. kref_get(&desc->affinity_notify->kref);
  144. schedule_work(&desc->affinity_notify->work);
  145. }
  146. desc->status |= IRQ_AFFINITY_SET;
  147. raw_spin_unlock_irqrestore(&desc->lock, flags);
  148. return ret;
  149. }
  150. int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
  151. {
  152. struct irq_desc *desc = irq_to_desc(irq);
  153. unsigned long flags;
  154. if (!desc)
  155. return -EINVAL;
  156. raw_spin_lock_irqsave(&desc->lock, flags);
  157. desc->affinity_hint = m;
  158. raw_spin_unlock_irqrestore(&desc->lock, flags);
  159. return 0;
  160. }
  161. EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
  162. static void irq_affinity_notify(struct work_struct *work)
  163. {
  164. struct irq_affinity_notify *notify =
  165. container_of(work, struct irq_affinity_notify, work);
  166. struct irq_desc *desc = irq_to_desc(notify->irq);
  167. cpumask_var_t cpumask;
  168. unsigned long flags;
  169. if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
  170. goto out;
  171. raw_spin_lock_irqsave(&desc->lock, flags);
  172. if (irq_move_pending(desc))
  173. irq_get_pending(cpumask, desc);
  174. else
  175. cpumask_copy(cpumask, desc->irq_data.affinity);
  176. raw_spin_unlock_irqrestore(&desc->lock, flags);
  177. notify->notify(notify, cpumask);
  178. free_cpumask_var(cpumask);
  179. out:
  180. kref_put(&notify->kref, notify->release);
  181. }
  182. /**
  183. * irq_set_affinity_notifier - control notification of IRQ affinity changes
  184. * @irq: Interrupt for which to enable/disable notification
  185. * @notify: Context for notification, or %NULL to disable
  186. * notification. Function pointers must be initialised;
  187. * the other fields will be initialised by this function.
  188. *
  189. * Must be called in process context. Notification may only be enabled
  190. * after the IRQ is allocated and must be disabled before the IRQ is
  191. * freed using free_irq().
  192. */
  193. int
  194. irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
  195. {
  196. struct irq_desc *desc = irq_to_desc(irq);
  197. struct irq_affinity_notify *old_notify;
  198. unsigned long flags;
  199. /* The release function is promised process context */
  200. might_sleep();
  201. if (!desc)
  202. return -EINVAL;
  203. /* Complete initialisation of *notify */
  204. if (notify) {
  205. notify->irq = irq;
  206. kref_init(&notify->kref);
  207. INIT_WORK(&notify->work, irq_affinity_notify);
  208. }
  209. raw_spin_lock_irqsave(&desc->lock, flags);
  210. old_notify = desc->affinity_notify;
  211. desc->affinity_notify = notify;
  212. raw_spin_unlock_irqrestore(&desc->lock, flags);
  213. if (old_notify)
  214. kref_put(&old_notify->kref, old_notify->release);
  215. return 0;
  216. }
  217. EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
  218. #ifndef CONFIG_AUTO_IRQ_AFFINITY
  219. /*
  220. * Generic version of the affinity autoselector.
  221. */
  222. static int
  223. setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
  224. {
  225. struct irq_chip *chip = get_irq_desc_chip(desc);
  226. struct cpumask *set = irq_default_affinity;
  227. int ret;
  228. /* Excludes PER_CPU and NO_BALANCE interrupts */
  229. if (!irq_can_set_affinity(irq))
  230. return 0;
  231. /*
  232. * Preserve an userspace affinity setup, but make sure that
  233. * one of the targets is online.
  234. */
  235. if (desc->status & (IRQ_AFFINITY_SET)) {
  236. if (cpumask_intersects(desc->irq_data.affinity,
  237. cpu_online_mask))
  238. set = desc->irq_data.affinity;
  239. else
  240. desc->status &= ~IRQ_AFFINITY_SET;
  241. }
  242. cpumask_and(mask, cpu_online_mask, set);
  243. ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
  244. switch (ret) {
  245. case IRQ_SET_MASK_OK:
  246. cpumask_copy(desc->irq_data.affinity, mask);
  247. case IRQ_SET_MASK_OK_NOCOPY:
  248. irq_set_thread_affinity(desc);
  249. }
  250. return 0;
  251. }
  252. #else
  253. static inline int
  254. setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
  255. {
  256. return irq_select_affinity(irq);
  257. }
  258. #endif
  259. /*
  260. * Called when affinity is set via /proc/irq
  261. */
  262. int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
  263. {
  264. struct irq_desc *desc = irq_to_desc(irq);
  265. unsigned long flags;
  266. int ret;
  267. raw_spin_lock_irqsave(&desc->lock, flags);
  268. ret = setup_affinity(irq, desc, mask);
  269. if (!ret)
  270. irq_set_thread_affinity(desc);
  271. raw_spin_unlock_irqrestore(&desc->lock, flags);
  272. return ret;
  273. }
  274. #else
  275. static inline int
  276. setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
  277. {
  278. return 0;
  279. }
  280. #endif
  281. void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
  282. {
  283. if (suspend) {
  284. if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
  285. return;
  286. desc->status |= IRQ_SUSPENDED;
  287. }
  288. if (!desc->depth++) {
  289. desc->status |= IRQ_DISABLED;
  290. desc->irq_data.chip->irq_disable(&desc->irq_data);
  291. }
  292. }
  293. /**
  294. * disable_irq_nosync - disable an irq without waiting
  295. * @irq: Interrupt to disable
  296. *
  297. * Disable the selected interrupt line. Disables and Enables are
  298. * nested.
  299. * Unlike disable_irq(), this function does not ensure existing
  300. * instances of the IRQ handler have completed before returning.
  301. *
  302. * This function may be called from IRQ context.
  303. */
  304. void disable_irq_nosync(unsigned int irq)
  305. {
  306. struct irq_desc *desc = irq_to_desc(irq);
  307. unsigned long flags;
  308. if (!desc)
  309. return;
  310. chip_bus_lock(desc);
  311. raw_spin_lock_irqsave(&desc->lock, flags);
  312. __disable_irq(desc, irq, false);
  313. raw_spin_unlock_irqrestore(&desc->lock, flags);
  314. chip_bus_sync_unlock(desc);
  315. }
  316. EXPORT_SYMBOL(disable_irq_nosync);
  317. /**
  318. * disable_irq - disable an irq and wait for completion
  319. * @irq: Interrupt to disable
  320. *
  321. * Disable the selected interrupt line. Enables and Disables are
  322. * nested.
  323. * This function waits for any pending IRQ handlers for this interrupt
  324. * to complete before returning. If you use this function while
  325. * holding a resource the IRQ handler may need you will deadlock.
  326. *
  327. * This function may be called - with care - from IRQ context.
  328. */
  329. void disable_irq(unsigned int irq)
  330. {
  331. struct irq_desc *desc = irq_to_desc(irq);
  332. if (!desc)
  333. return;
  334. disable_irq_nosync(irq);
  335. if (desc->action)
  336. synchronize_irq(irq);
  337. }
  338. EXPORT_SYMBOL(disable_irq);
  339. void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
  340. {
  341. if (resume) {
  342. if (!(desc->status & IRQ_SUSPENDED)) {
  343. if (!desc->action)
  344. return;
  345. if (!(desc->action->flags & IRQF_FORCE_RESUME))
  346. return;
  347. /* Pretend that it got disabled ! */
  348. desc->depth++;
  349. }
  350. desc->status &= ~IRQ_SUSPENDED;
  351. }
  352. switch (desc->depth) {
  353. case 0:
  354. err_out:
  355. WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
  356. break;
  357. case 1: {
  358. unsigned int status = desc->status & ~IRQ_DISABLED;
  359. if (desc->status & IRQ_SUSPENDED)
  360. goto err_out;
  361. /* Prevent probing on this irq: */
  362. desc->status = status | IRQ_NOPROBE;
  363. check_irq_resend(desc, irq);
  364. /* fall-through */
  365. }
  366. default:
  367. desc->depth--;
  368. }
  369. }
  370. /**
  371. * enable_irq - enable handling of an irq
  372. * @irq: Interrupt to enable
  373. *
  374. * Undoes the effect of one call to disable_irq(). If this
  375. * matches the last disable, processing of interrupts on this
  376. * IRQ line is re-enabled.
  377. *
  378. * This function may be called from IRQ context only when
  379. * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
  380. */
  381. void enable_irq(unsigned int irq)
  382. {
  383. struct irq_desc *desc = irq_to_desc(irq);
  384. unsigned long flags;
  385. if (!desc)
  386. return;
  387. if (WARN(!desc->irq_data.chip || !desc->irq_data.chip->irq_enable,
  388. KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
  389. return;
  390. chip_bus_lock(desc);
  391. raw_spin_lock_irqsave(&desc->lock, flags);
  392. __enable_irq(desc, irq, false);
  393. raw_spin_unlock_irqrestore(&desc->lock, flags);
  394. chip_bus_sync_unlock(desc);
  395. }
  396. EXPORT_SYMBOL(enable_irq);
  397. static int set_irq_wake_real(unsigned int irq, unsigned int on)
  398. {
  399. struct irq_desc *desc = irq_to_desc(irq);
  400. int ret = -ENXIO;
  401. if (desc->irq_data.chip->irq_set_wake)
  402. ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
  403. return ret;
  404. }
  405. /**
  406. * irq_set_irq_wake - control irq power management wakeup
  407. * @irq: interrupt to control
  408. * @on: enable/disable power management wakeup
  409. *
  410. * Enable/disable power management wakeup mode, which is
  411. * disabled by default. Enables and disables must match,
  412. * just as they match for non-wakeup mode support.
  413. *
  414. * Wakeup mode lets this IRQ wake the system from sleep
  415. * states like "suspend to RAM".
  416. */
  417. int irq_set_irq_wake(unsigned int irq, unsigned int on)
  418. {
  419. struct irq_desc *desc = irq_to_desc(irq);
  420. unsigned long flags;
  421. int ret = 0;
  422. /* wakeup-capable irqs can be shared between drivers that
  423. * don't need to have the same sleep mode behaviors.
  424. */
  425. chip_bus_lock(desc);
  426. raw_spin_lock_irqsave(&desc->lock, flags);
  427. if (on) {
  428. if (desc->wake_depth++ == 0) {
  429. ret = set_irq_wake_real(irq, on);
  430. if (ret)
  431. desc->wake_depth = 0;
  432. else
  433. desc->status |= IRQ_WAKEUP;
  434. }
  435. } else {
  436. if (desc->wake_depth == 0) {
  437. WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
  438. } else if (--desc->wake_depth == 0) {
  439. ret = set_irq_wake_real(irq, on);
  440. if (ret)
  441. desc->wake_depth = 1;
  442. else
  443. desc->status &= ~IRQ_WAKEUP;
  444. }
  445. }
  446. raw_spin_unlock_irqrestore(&desc->lock, flags);
  447. chip_bus_sync_unlock(desc);
  448. return ret;
  449. }
  450. EXPORT_SYMBOL(irq_set_irq_wake);
  451. /*
  452. * Internal function that tells the architecture code whether a
  453. * particular irq has been exclusively allocated or is available
  454. * for driver use.
  455. */
  456. int can_request_irq(unsigned int irq, unsigned long irqflags)
  457. {
  458. struct irq_desc *desc = irq_to_desc(irq);
  459. struct irqaction *action;
  460. unsigned long flags;
  461. if (!desc)
  462. return 0;
  463. if (desc->status & IRQ_NOREQUEST)
  464. return 0;
  465. raw_spin_lock_irqsave(&desc->lock, flags);
  466. action = desc->action;
  467. if (action)
  468. if (irqflags & action->flags & IRQF_SHARED)
  469. action = NULL;
  470. raw_spin_unlock_irqrestore(&desc->lock, flags);
  471. return !action;
  472. }
  473. void compat_irq_chip_set_default_handler(struct irq_desc *desc)
  474. {
  475. /*
  476. * If the architecture still has not overriden
  477. * the flow handler then zap the default. This
  478. * should catch incorrect flow-type setting.
  479. */
  480. if (desc->handle_irq == &handle_bad_irq)
  481. desc->handle_irq = NULL;
  482. }
  483. int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
  484. unsigned long flags)
  485. {
  486. int ret;
  487. struct irq_chip *chip = desc->irq_data.chip;
  488. if (!chip || !chip->irq_set_type) {
  489. /*
  490. * IRQF_TRIGGER_* but the PIC does not support multiple
  491. * flow-types?
  492. */
  493. pr_debug("No set_type function for IRQ %d (%s)\n", irq,
  494. chip ? (chip->name ? : "unknown") : "unknown");
  495. return 0;
  496. }
  497. /* caller masked out all except trigger mode flags */
  498. ret = chip->irq_set_type(&desc->irq_data, flags);
  499. if (ret)
  500. pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
  501. flags, irq, chip->irq_set_type);
  502. else {
  503. if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
  504. flags |= IRQ_LEVEL;
  505. /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
  506. desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
  507. desc->status |= flags;
  508. if (chip != desc->irq_data.chip)
  509. irq_chip_set_defaults(desc->irq_data.chip);
  510. }
  511. return ret;
  512. }
  513. /*
  514. * Default primary interrupt handler for threaded interrupts. Is
  515. * assigned as primary handler when request_threaded_irq is called
  516. * with handler == NULL. Useful for oneshot interrupts.
  517. */
  518. static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
  519. {
  520. return IRQ_WAKE_THREAD;
  521. }
  522. /*
  523. * Primary handler for nested threaded interrupts. Should never be
  524. * called.
  525. */
  526. static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
  527. {
  528. WARN(1, "Primary handler called for nested irq %d\n", irq);
  529. return IRQ_NONE;
  530. }
  531. static int irq_wait_for_interrupt(struct irqaction *action)
  532. {
  533. while (!kthread_should_stop()) {
  534. set_current_state(TASK_INTERRUPTIBLE);
  535. if (test_and_clear_bit(IRQTF_RUNTHREAD,
  536. &action->thread_flags)) {
  537. __set_current_state(TASK_RUNNING);
  538. return 0;
  539. }
  540. schedule();
  541. }
  542. return -1;
  543. }
  544. /*
  545. * Oneshot interrupts keep the irq line masked until the threaded
  546. * handler finished. unmask if the interrupt has not been disabled and
  547. * is marked MASKED.
  548. */
  549. static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
  550. {
  551. again:
  552. chip_bus_lock(desc);
  553. raw_spin_lock_irq(&desc->lock);
  554. /*
  555. * Implausible though it may be we need to protect us against
  556. * the following scenario:
  557. *
  558. * The thread is faster done than the hard interrupt handler
  559. * on the other CPU. If we unmask the irq line then the
  560. * interrupt can come in again and masks the line, leaves due
  561. * to IRQ_INPROGRESS and the irq line is masked forever.
  562. */
  563. if (unlikely(desc->status & IRQ_INPROGRESS)) {
  564. raw_spin_unlock_irq(&desc->lock);
  565. chip_bus_sync_unlock(desc);
  566. cpu_relax();
  567. goto again;
  568. }
  569. if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
  570. desc->status &= ~IRQ_MASKED;
  571. desc->irq_data.chip->irq_unmask(&desc->irq_data);
  572. }
  573. raw_spin_unlock_irq(&desc->lock);
  574. chip_bus_sync_unlock(desc);
  575. }
  576. #ifdef CONFIG_SMP
  577. /*
  578. * Check whether we need to change the affinity of the interrupt thread.
  579. */
  580. static void
  581. irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
  582. {
  583. cpumask_var_t mask;
  584. if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
  585. return;
  586. /*
  587. * In case we are out of memory we set IRQTF_AFFINITY again and
  588. * try again next time
  589. */
  590. if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
  591. set_bit(IRQTF_AFFINITY, &action->thread_flags);
  592. return;
  593. }
  594. raw_spin_lock_irq(&desc->lock);
  595. cpumask_copy(mask, desc->irq_data.affinity);
  596. raw_spin_unlock_irq(&desc->lock);
  597. set_cpus_allowed_ptr(current, mask);
  598. free_cpumask_var(mask);
  599. }
  600. #else
  601. static inline void
  602. irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
  603. #endif
  604. /*
  605. * Interrupt handler thread
  606. */
  607. static int irq_thread(void *data)
  608. {
  609. static const struct sched_param param = {
  610. .sched_priority = MAX_USER_RT_PRIO/2,
  611. };
  612. struct irqaction *action = data;
  613. struct irq_desc *desc = irq_to_desc(action->irq);
  614. int wake, oneshot = desc->status & IRQ_ONESHOT;
  615. sched_setscheduler(current, SCHED_FIFO, &param);
  616. current->irqaction = action;
  617. while (!irq_wait_for_interrupt(action)) {
  618. irq_thread_check_affinity(desc, action);
  619. atomic_inc(&desc->threads_active);
  620. raw_spin_lock_irq(&desc->lock);
  621. if (unlikely(desc->status & IRQ_DISABLED)) {
  622. /*
  623. * CHECKME: We might need a dedicated
  624. * IRQ_THREAD_PENDING flag here, which
  625. * retriggers the thread in check_irq_resend()
  626. * but AFAICT IRQ_PENDING should be fine as it
  627. * retriggers the interrupt itself --- tglx
  628. */
  629. desc->status |= IRQ_PENDING;
  630. raw_spin_unlock_irq(&desc->lock);
  631. } else {
  632. raw_spin_unlock_irq(&desc->lock);
  633. action->thread_fn(action->irq, action->dev_id);
  634. if (oneshot)
  635. irq_finalize_oneshot(action->irq, desc);
  636. }
  637. wake = atomic_dec_and_test(&desc->threads_active);
  638. if (wake && waitqueue_active(&desc->wait_for_threads))
  639. wake_up(&desc->wait_for_threads);
  640. }
  641. /*
  642. * Clear irqaction. Otherwise exit_irq_thread() would make
  643. * fuzz about an active irq thread going into nirvana.
  644. */
  645. current->irqaction = NULL;
  646. return 0;
  647. }
  648. /*
  649. * Called from do_exit()
  650. */
  651. void exit_irq_thread(void)
  652. {
  653. struct task_struct *tsk = current;
  654. if (!tsk->irqaction)
  655. return;
  656. printk(KERN_ERR
  657. "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
  658. tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
  659. /*
  660. * Set the THREAD DIED flag to prevent further wakeups of the
  661. * soon to be gone threaded handler.
  662. */
  663. set_bit(IRQTF_DIED, &tsk->irqaction->flags);
  664. }
  665. /*
  666. * Internal function to register an irqaction - typically used to
  667. * allocate special interrupts that are part of the architecture.
  668. */
  669. static int
  670. __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
  671. {
  672. struct irqaction *old, **old_ptr;
  673. const char *old_name = NULL;
  674. unsigned long flags;
  675. int ret, nested, shared = 0;
  676. cpumask_var_t mask;
  677. if (!desc)
  678. return -EINVAL;
  679. if (desc->irq_data.chip == &no_irq_chip)
  680. return -ENOSYS;
  681. /*
  682. * Some drivers like serial.c use request_irq() heavily,
  683. * so we have to be careful not to interfere with a
  684. * running system.
  685. */
  686. if (new->flags & IRQF_SAMPLE_RANDOM) {
  687. /*
  688. * This function might sleep, we want to call it first,
  689. * outside of the atomic block.
  690. * Yes, this might clear the entropy pool if the wrong
  691. * driver is attempted to be loaded, without actually
  692. * installing a new handler, but is this really a problem,
  693. * only the sysadmin is able to do this.
  694. */
  695. rand_initialize_irq(irq);
  696. }
  697. /* Oneshot interrupts are not allowed with shared */
  698. if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED))
  699. return -EINVAL;
  700. /*
  701. * Check whether the interrupt nests into another interrupt
  702. * thread.
  703. */
  704. nested = desc->status & IRQ_NESTED_THREAD;
  705. if (nested) {
  706. if (!new->thread_fn)
  707. return -EINVAL;
  708. /*
  709. * Replace the primary handler which was provided from
  710. * the driver for non nested interrupt handling by the
  711. * dummy function which warns when called.
  712. */
  713. new->handler = irq_nested_primary_handler;
  714. }
  715. /*
  716. * Create a handler thread when a thread function is supplied
  717. * and the interrupt does not nest into another interrupt
  718. * thread.
  719. */
  720. if (new->thread_fn && !nested) {
  721. struct task_struct *t;
  722. t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
  723. new->name);
  724. if (IS_ERR(t))
  725. return PTR_ERR(t);
  726. /*
  727. * We keep the reference to the task struct even if
  728. * the thread dies to avoid that the interrupt code
  729. * references an already freed task_struct.
  730. */
  731. get_task_struct(t);
  732. new->thread = t;
  733. }
  734. if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
  735. ret = -ENOMEM;
  736. goto out_thread;
  737. }
  738. /*
  739. * The following block of code has to be executed atomically
  740. */
  741. raw_spin_lock_irqsave(&desc->lock, flags);
  742. old_ptr = &desc->action;
  743. old = *old_ptr;
  744. if (old) {
  745. /*
  746. * Can't share interrupts unless both agree to and are
  747. * the same type (level, edge, polarity). So both flag
  748. * fields must have IRQF_SHARED set and the bits which
  749. * set the trigger type must match.
  750. */
  751. if (!((old->flags & new->flags) & IRQF_SHARED) ||
  752. ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) {
  753. old_name = old->name;
  754. goto mismatch;
  755. }
  756. #if defined(CONFIG_IRQ_PER_CPU)
  757. /* All handlers must agree on per-cpuness */
  758. if ((old->flags & IRQF_PERCPU) !=
  759. (new->flags & IRQF_PERCPU))
  760. goto mismatch;
  761. #endif
  762. /* add new interrupt at end of irq queue */
  763. do {
  764. old_ptr = &old->next;
  765. old = *old_ptr;
  766. } while (old);
  767. shared = 1;
  768. }
  769. if (!shared) {
  770. irq_chip_set_defaults(desc->irq_data.chip);
  771. init_waitqueue_head(&desc->wait_for_threads);
  772. /* Setup the type (level, edge polarity) if configured: */
  773. if (new->flags & IRQF_TRIGGER_MASK) {
  774. ret = __irq_set_trigger(desc, irq,
  775. new->flags & IRQF_TRIGGER_MASK);
  776. if (ret)
  777. goto out_mask;
  778. } else
  779. compat_irq_chip_set_default_handler(desc);
  780. #if defined(CONFIG_IRQ_PER_CPU)
  781. if (new->flags & IRQF_PERCPU)
  782. desc->status |= IRQ_PER_CPU;
  783. #endif
  784. desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT |
  785. IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED);
  786. if (new->flags & IRQF_ONESHOT)
  787. desc->status |= IRQ_ONESHOT;
  788. if (!(desc->status & IRQ_NOAUTOEN)) {
  789. desc->depth = 0;
  790. desc->status &= ~IRQ_DISABLED;
  791. desc->irq_data.chip->irq_startup(&desc->irq_data);
  792. } else
  793. /* Undo nested disables: */
  794. desc->depth = 1;
  795. /* Exclude IRQ from balancing if requested */
  796. if (new->flags & IRQF_NOBALANCING)
  797. desc->status |= IRQ_NO_BALANCING;
  798. /* Set default affinity mask once everything is setup */
  799. setup_affinity(irq, desc, mask);
  800. } else if ((new->flags & IRQF_TRIGGER_MASK)
  801. && (new->flags & IRQF_TRIGGER_MASK)
  802. != (desc->status & IRQ_TYPE_SENSE_MASK)) {
  803. /* hope the handler works with the actual trigger mode... */
  804. pr_warning("IRQ %d uses trigger mode %d; requested %d\n",
  805. irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK),
  806. (int)(new->flags & IRQF_TRIGGER_MASK));
  807. }
  808. new->irq = irq;
  809. *old_ptr = new;
  810. /* Reset broken irq detection when installing new handler */
  811. desc->irq_count = 0;
  812. desc->irqs_unhandled = 0;
  813. /*
  814. * Check whether we disabled the irq via the spurious handler
  815. * before. Reenable it and give it another chance.
  816. */
  817. if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
  818. desc->status &= ~IRQ_SPURIOUS_DISABLED;
  819. __enable_irq(desc, irq, false);
  820. }
  821. raw_spin_unlock_irqrestore(&desc->lock, flags);
  822. /*
  823. * Strictly no need to wake it up, but hung_task complains
  824. * when no hard interrupt wakes the thread up.
  825. */
  826. if (new->thread)
  827. wake_up_process(new->thread);
  828. register_irq_proc(irq, desc);
  829. new->dir = NULL;
  830. register_handler_proc(irq, new);
  831. return 0;
  832. mismatch:
  833. #ifdef CONFIG_DEBUG_SHIRQ
  834. if (!(new->flags & IRQF_PROBE_SHARED)) {
  835. printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
  836. if (old_name)
  837. printk(KERN_ERR "current handler: %s\n", old_name);
  838. dump_stack();
  839. }
  840. #endif
  841. ret = -EBUSY;
  842. out_mask:
  843. free_cpumask_var(mask);
  844. out_thread:
  845. raw_spin_unlock_irqrestore(&desc->lock, flags);
  846. if (new->thread) {
  847. struct task_struct *t = new->thread;
  848. new->thread = NULL;
  849. if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
  850. kthread_stop(t);
  851. put_task_struct(t);
  852. }
  853. return ret;
  854. }
  855. /**
  856. * setup_irq - setup an interrupt
  857. * @irq: Interrupt line to setup
  858. * @act: irqaction for the interrupt
  859. *
  860. * Used to statically setup interrupts in the early boot process.
  861. */
  862. int setup_irq(unsigned int irq, struct irqaction *act)
  863. {
  864. int retval;
  865. struct irq_desc *desc = irq_to_desc(irq);
  866. chip_bus_lock(desc);
  867. retval = __setup_irq(irq, desc, act);
  868. chip_bus_sync_unlock(desc);
  869. return retval;
  870. }
  871. EXPORT_SYMBOL_GPL(setup_irq);
  872. /*
  873. * Internal function to unregister an irqaction - used to free
  874. * regular and special interrupts that are part of the architecture.
  875. */
  876. static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
  877. {
  878. struct irq_desc *desc = irq_to_desc(irq);
  879. struct irqaction *action, **action_ptr;
  880. unsigned long flags;
  881. WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
  882. if (!desc)
  883. return NULL;
  884. raw_spin_lock_irqsave(&desc->lock, flags);
  885. /*
  886. * There can be multiple actions per IRQ descriptor, find the right
  887. * one based on the dev_id:
  888. */
  889. action_ptr = &desc->action;
  890. for (;;) {
  891. action = *action_ptr;
  892. if (!action) {
  893. WARN(1, "Trying to free already-free IRQ %d\n", irq);
  894. raw_spin_unlock_irqrestore(&desc->lock, flags);
  895. return NULL;
  896. }
  897. if (action->dev_id == dev_id)
  898. break;
  899. action_ptr = &action->next;
  900. }
  901. /* Found it - now remove it from the list of entries: */
  902. *action_ptr = action->next;
  903. /* Currently used only by UML, might disappear one day: */
  904. #ifdef CONFIG_IRQ_RELEASE_METHOD
  905. if (desc->irq_data.chip->release)
  906. desc->irq_data.chip->release(irq, dev_id);
  907. #endif
  908. /* If this was the last handler, shut down the IRQ line: */
  909. if (!desc->action) {
  910. desc->status |= IRQ_DISABLED;
  911. if (desc->irq_data.chip->irq_shutdown)
  912. desc->irq_data.chip->irq_shutdown(&desc->irq_data);
  913. else
  914. desc->irq_data.chip->irq_disable(&desc->irq_data);
  915. }
  916. #ifdef CONFIG_SMP
  917. /* make sure affinity_hint is cleaned up */
  918. if (WARN_ON_ONCE(desc->affinity_hint))
  919. desc->affinity_hint = NULL;
  920. #endif
  921. raw_spin_unlock_irqrestore(&desc->lock, flags);
  922. unregister_handler_proc(irq, action);
  923. /* Make sure it's not being used on another CPU: */
  924. synchronize_irq(irq);
  925. #ifdef CONFIG_DEBUG_SHIRQ
  926. /*
  927. * It's a shared IRQ -- the driver ought to be prepared for an IRQ
  928. * event to happen even now it's being freed, so let's make sure that
  929. * is so by doing an extra call to the handler ....
  930. *
  931. * ( We do this after actually deregistering it, to make sure that a
  932. * 'real' IRQ doesn't run in * parallel with our fake. )
  933. */
  934. if (action->flags & IRQF_SHARED) {
  935. local_irq_save(flags);
  936. action->handler(irq, dev_id);
  937. local_irq_restore(flags);
  938. }
  939. #endif
  940. if (action->thread) {
  941. if (!test_bit(IRQTF_DIED, &action->thread_flags))
  942. kthread_stop(action->thread);
  943. put_task_struct(action->thread);
  944. }
  945. return action;
  946. }
  947. /**
  948. * remove_irq - free an interrupt
  949. * @irq: Interrupt line to free
  950. * @act: irqaction for the interrupt
  951. *
  952. * Used to remove interrupts statically setup by the early boot process.
  953. */
  954. void remove_irq(unsigned int irq, struct irqaction *act)
  955. {
  956. __free_irq(irq, act->dev_id);
  957. }
  958. EXPORT_SYMBOL_GPL(remove_irq);
  959. /**
  960. * free_irq - free an interrupt allocated with request_irq
  961. * @irq: Interrupt line to free
  962. * @dev_id: Device identity to free
  963. *
  964. * Remove an interrupt handler. The handler is removed and if the
  965. * interrupt line is no longer in use by any driver it is disabled.
  966. * On a shared IRQ the caller must ensure the interrupt is disabled
  967. * on the card it drives before calling this function. The function
  968. * does not return until any executing interrupts for this IRQ
  969. * have completed.
  970. *
  971. * This function must not be called from interrupt context.
  972. */
  973. void free_irq(unsigned int irq, void *dev_id)
  974. {
  975. struct irq_desc *desc = irq_to_desc(irq);
  976. if (!desc)
  977. return;
  978. #ifdef CONFIG_SMP
  979. if (WARN_ON(desc->affinity_notify))
  980. desc->affinity_notify = NULL;
  981. #endif
  982. chip_bus_lock(desc);
  983. kfree(__free_irq(irq, dev_id));
  984. chip_bus_sync_unlock(desc);
  985. }
  986. EXPORT_SYMBOL(free_irq);
  987. /**
  988. * request_threaded_irq - allocate an interrupt line
  989. * @irq: Interrupt line to allocate
  990. * @handler: Function to be called when the IRQ occurs.
  991. * Primary handler for threaded interrupts
  992. * If NULL and thread_fn != NULL the default
  993. * primary handler is installed
  994. * @thread_fn: Function called from the irq handler thread
  995. * If NULL, no irq thread is created
  996. * @irqflags: Interrupt type flags
  997. * @devname: An ascii name for the claiming device
  998. * @dev_id: A cookie passed back to the handler function
  999. *
  1000. * This call allocates interrupt resources and enables the
  1001. * interrupt line and IRQ handling. From the point this
  1002. * call is made your handler function may be invoked. Since
  1003. * your handler function must clear any interrupt the board
  1004. * raises, you must take care both to initialise your hardware
  1005. * and to set up the interrupt handler in the right order.
  1006. *
  1007. * If you want to set up a threaded irq handler for your device
  1008. * then you need to supply @handler and @thread_fn. @handler ist
  1009. * still called in hard interrupt context and has to check
  1010. * whether the interrupt originates from the device. If yes it
  1011. * needs to disable the interrupt on the device and return
  1012. * IRQ_WAKE_THREAD which will wake up the handler thread and run
  1013. * @thread_fn. This split handler design is necessary to support
  1014. * shared interrupts.
  1015. *
  1016. * Dev_id must be globally unique. Normally the address of the
  1017. * device data structure is used as the cookie. Since the handler
  1018. * receives this value it makes sense to use it.
  1019. *
  1020. * If your interrupt is shared you must pass a non NULL dev_id
  1021. * as this is required when freeing the interrupt.
  1022. *
  1023. * Flags:
  1024. *
  1025. * IRQF_SHARED Interrupt is shared
  1026. * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
  1027. * IRQF_TRIGGER_* Specify active edge(s) or level
  1028. *
  1029. */
  1030. int request_threaded_irq(unsigned int irq, irq_handler_t handler,
  1031. irq_handler_t thread_fn, unsigned long irqflags,
  1032. const char *devname, void *dev_id)
  1033. {
  1034. struct irqaction *action;
  1035. struct irq_desc *desc;
  1036. int retval;
  1037. /*
  1038. * Sanity-check: shared interrupts must pass in a real dev-ID,
  1039. * otherwise we'll have trouble later trying to figure out
  1040. * which interrupt is which (messes up the interrupt freeing
  1041. * logic etc).
  1042. */
  1043. if ((irqflags & IRQF_SHARED) && !dev_id)
  1044. return -EINVAL;
  1045. desc = irq_to_desc(irq);
  1046. if (!desc)
  1047. return -EINVAL;
  1048. if (desc->status & IRQ_NOREQUEST)
  1049. return -EINVAL;
  1050. if (!handler) {
  1051. if (!thread_fn)
  1052. return -EINVAL;
  1053. handler = irq_default_primary_handler;
  1054. }
  1055. action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
  1056. if (!action)
  1057. return -ENOMEM;
  1058. action->handler = handler;
  1059. action->thread_fn = thread_fn;
  1060. action->flags = irqflags;
  1061. action->name = devname;
  1062. action->dev_id = dev_id;
  1063. chip_bus_lock(desc);
  1064. retval = __setup_irq(irq, desc, action);
  1065. chip_bus_sync_unlock(desc);
  1066. if (retval)
  1067. kfree(action);
  1068. #ifdef CONFIG_DEBUG_SHIRQ_FIXME
  1069. if (!retval && (irqflags & IRQF_SHARED)) {
  1070. /*
  1071. * It's a shared IRQ -- the driver ought to be prepared for it
  1072. * to happen immediately, so let's make sure....
  1073. * We disable the irq to make sure that a 'real' IRQ doesn't
  1074. * run in parallel with our fake.
  1075. */
  1076. unsigned long flags;
  1077. disable_irq(irq);
  1078. local_irq_save(flags);
  1079. handler(irq, dev_id);
  1080. local_irq_restore(flags);
  1081. enable_irq(irq);
  1082. }
  1083. #endif
  1084. return retval;
  1085. }
  1086. EXPORT_SYMBOL(request_threaded_irq);
  1087. /**
  1088. * request_any_context_irq - allocate an interrupt line
  1089. * @irq: Interrupt line to allocate
  1090. * @handler: Function to be called when the IRQ occurs.
  1091. * Threaded handler for threaded interrupts.
  1092. * @flags: Interrupt type flags
  1093. * @name: An ascii name for the claiming device
  1094. * @dev_id: A cookie passed back to the handler function
  1095. *
  1096. * This call allocates interrupt resources and enables the
  1097. * interrupt line and IRQ handling. It selects either a
  1098. * hardirq or threaded handling method depending on the
  1099. * context.
  1100. *
  1101. * On failure, it returns a negative value. On success,
  1102. * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
  1103. */
  1104. int request_any_context_irq(unsigned int irq, irq_handler_t handler,
  1105. unsigned long flags, const char *name, void *dev_id)
  1106. {
  1107. struct irq_desc *desc = irq_to_desc(irq);
  1108. int ret;
  1109. if (!desc)
  1110. return -EINVAL;
  1111. if (desc->status & IRQ_NESTED_THREAD) {
  1112. ret = request_threaded_irq(irq, NULL, handler,
  1113. flags, name, dev_id);
  1114. return !ret ? IRQC_IS_NESTED : ret;
  1115. }
  1116. ret = request_irq(irq, handler, flags, name, dev_id);
  1117. return !ret ? IRQC_IS_HARDIRQ : ret;
  1118. }
  1119. EXPORT_SYMBOL_GPL(request_any_context_irq);