manage.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385
  1. /*
  2. * linux/kernel/irq/manage.c
  3. *
  4. * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
  5. * Copyright (C) 2005-2006 Thomas Gleixner
  6. *
  7. * This file contains driver APIs to the irq subsystem.
  8. */
  9. #include <linux/irq.h>
  10. #include <linux/kthread.h>
  11. #include <linux/module.h>
  12. #include <linux/random.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/slab.h>
  15. #include <linux/sched.h>
  16. #include "internals.h"
  17. #ifdef CONFIG_IRQ_FORCED_THREADING
  18. __read_mostly bool force_irqthreads;
  19. static int __init setup_forced_irqthreads(char *arg)
  20. {
  21. force_irqthreads = true;
  22. return 0;
  23. }
  24. early_param("threadirqs", setup_forced_irqthreads);
  25. #endif
  26. /**
  27. * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
  28. * @irq: interrupt number to wait for
  29. *
  30. * This function waits for any pending IRQ handlers for this interrupt
  31. * to complete before returning. If you use this function while
  32. * holding a resource the IRQ handler may need you will deadlock.
  33. *
  34. * This function may be called - with care - from IRQ context.
  35. */
  36. void synchronize_irq(unsigned int irq)
  37. {
  38. struct irq_desc *desc = irq_to_desc(irq);
  39. bool inprogress;
  40. if (!desc)
  41. return;
  42. do {
  43. unsigned long flags;
  44. /*
  45. * Wait until we're out of the critical section. This might
  46. * give the wrong answer due to the lack of memory barriers.
  47. */
  48. while (irqd_irq_inprogress(&desc->irq_data))
  49. cpu_relax();
  50. /* Ok, that indicated we're done: double-check carefully. */
  51. raw_spin_lock_irqsave(&desc->lock, flags);
  52. inprogress = irqd_irq_inprogress(&desc->irq_data);
  53. raw_spin_unlock_irqrestore(&desc->lock, flags);
  54. /* Oops, that failed? */
  55. } while (inprogress);
  56. /*
  57. * We made sure that no hardirq handler is running. Now verify
  58. * that no threaded handlers are active.
  59. */
  60. wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
  61. }
  62. EXPORT_SYMBOL(synchronize_irq);
  63. #ifdef CONFIG_SMP
  64. cpumask_var_t irq_default_affinity;
  65. /**
  66. * irq_can_set_affinity - Check if the affinity of a given irq can be set
  67. * @irq: Interrupt to check
  68. *
  69. */
  70. int irq_can_set_affinity(unsigned int irq)
  71. {
  72. struct irq_desc *desc = irq_to_desc(irq);
  73. if (!desc || !irqd_can_balance(&desc->irq_data) ||
  74. !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
  75. return 0;
  76. return 1;
  77. }
  78. /**
  79. * irq_set_thread_affinity - Notify irq threads to adjust affinity
  80. * @desc: irq descriptor which has affitnity changed
  81. *
  82. * We just set IRQTF_AFFINITY and delegate the affinity setting
  83. * to the interrupt thread itself. We can not call
  84. * set_cpus_allowed_ptr() here as we hold desc->lock and this
  85. * code can be called from hard interrupt context.
  86. */
  87. void irq_set_thread_affinity(struct irq_desc *desc)
  88. {
  89. struct irqaction *action = desc->action;
  90. while (action) {
  91. if (action->thread)
  92. set_bit(IRQTF_AFFINITY, &action->thread_flags);
  93. action = action->next;
  94. }
  95. }
  96. #ifdef CONFIG_GENERIC_PENDING_IRQ
  97. static inline bool irq_can_move_pcntxt(struct irq_data *data)
  98. {
  99. return irqd_can_move_in_process_context(data);
  100. }
  101. static inline bool irq_move_pending(struct irq_data *data)
  102. {
  103. return irqd_is_setaffinity_pending(data);
  104. }
  105. static inline void
  106. irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
  107. {
  108. cpumask_copy(desc->pending_mask, mask);
  109. }
  110. static inline void
  111. irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
  112. {
  113. cpumask_copy(mask, desc->pending_mask);
  114. }
  115. #else
  116. static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
  117. static inline bool irq_move_pending(struct irq_data *data) { return false; }
  118. static inline void
  119. irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
  120. static inline void
  121. irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
  122. #endif
  123. int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
  124. {
  125. struct irq_chip *chip = irq_data_get_irq_chip(data);
  126. struct irq_desc *desc = irq_data_to_desc(data);
  127. int ret = 0;
  128. if (!chip || !chip->irq_set_affinity)
  129. return -EINVAL;
  130. if (irq_can_move_pcntxt(data)) {
  131. ret = chip->irq_set_affinity(data, mask, false);
  132. switch (ret) {
  133. case IRQ_SET_MASK_OK:
  134. cpumask_copy(data->affinity, mask);
  135. case IRQ_SET_MASK_OK_NOCOPY:
  136. irq_set_thread_affinity(desc);
  137. ret = 0;
  138. }
  139. } else {
  140. irqd_set_move_pending(data);
  141. irq_copy_pending(desc, mask);
  142. }
  143. if (desc->affinity_notify) {
  144. kref_get(&desc->affinity_notify->kref);
  145. schedule_work(&desc->affinity_notify->work);
  146. }
  147. irqd_set(data, IRQD_AFFINITY_SET);
  148. return ret;
  149. }
  150. /**
  151. * irq_set_affinity - Set the irq affinity of a given irq
  152. * @irq: Interrupt to set affinity
  153. * @mask: cpumask
  154. *
  155. */
  156. int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
  157. {
  158. struct irq_desc *desc = irq_to_desc(irq);
  159. unsigned long flags;
  160. int ret;
  161. if (!desc)
  162. return -EINVAL;
  163. raw_spin_lock_irqsave(&desc->lock, flags);
  164. ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
  165. raw_spin_unlock_irqrestore(&desc->lock, flags);
  166. return ret;
  167. }
  168. int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
  169. {
  170. unsigned long flags;
  171. struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
  172. if (!desc)
  173. return -EINVAL;
  174. desc->affinity_hint = m;
  175. irq_put_desc_unlock(desc, flags);
  176. return 0;
  177. }
  178. EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
  179. static void irq_affinity_notify(struct work_struct *work)
  180. {
  181. struct irq_affinity_notify *notify =
  182. container_of(work, struct irq_affinity_notify, work);
  183. struct irq_desc *desc = irq_to_desc(notify->irq);
  184. cpumask_var_t cpumask;
  185. unsigned long flags;
  186. if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
  187. goto out;
  188. raw_spin_lock_irqsave(&desc->lock, flags);
  189. if (irq_move_pending(&desc->irq_data))
  190. irq_get_pending(cpumask, desc);
  191. else
  192. cpumask_copy(cpumask, desc->irq_data.affinity);
  193. raw_spin_unlock_irqrestore(&desc->lock, flags);
  194. notify->notify(notify, cpumask);
  195. free_cpumask_var(cpumask);
  196. out:
  197. kref_put(&notify->kref, notify->release);
  198. }
  199. /**
  200. * irq_set_affinity_notifier - control notification of IRQ affinity changes
  201. * @irq: Interrupt for which to enable/disable notification
  202. * @notify: Context for notification, or %NULL to disable
  203. * notification. Function pointers must be initialised;
  204. * the other fields will be initialised by this function.
  205. *
  206. * Must be called in process context. Notification may only be enabled
  207. * after the IRQ is allocated and must be disabled before the IRQ is
  208. * freed using free_irq().
  209. */
  210. int
  211. irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
  212. {
  213. struct irq_desc *desc = irq_to_desc(irq);
  214. struct irq_affinity_notify *old_notify;
  215. unsigned long flags;
  216. /* The release function is promised process context */
  217. might_sleep();
  218. if (!desc)
  219. return -EINVAL;
  220. /* Complete initialisation of *notify */
  221. if (notify) {
  222. notify->irq = irq;
  223. kref_init(&notify->kref);
  224. INIT_WORK(&notify->work, irq_affinity_notify);
  225. }
  226. raw_spin_lock_irqsave(&desc->lock, flags);
  227. old_notify = desc->affinity_notify;
  228. desc->affinity_notify = notify;
  229. raw_spin_unlock_irqrestore(&desc->lock, flags);
  230. if (old_notify)
  231. kref_put(&old_notify->kref, old_notify->release);
  232. return 0;
  233. }
  234. EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
  235. #ifndef CONFIG_AUTO_IRQ_AFFINITY
  236. /*
  237. * Generic version of the affinity autoselector.
  238. */
  239. static int
  240. setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
  241. {
  242. struct irq_chip *chip = irq_desc_get_chip(desc);
  243. struct cpumask *set = irq_default_affinity;
  244. int ret;
  245. /* Excludes PER_CPU and NO_BALANCE interrupts */
  246. if (!irq_can_set_affinity(irq))
  247. return 0;
  248. /*
  249. * Preserve an userspace affinity setup, but make sure that
  250. * one of the targets is online.
  251. */
  252. if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
  253. if (cpumask_intersects(desc->irq_data.affinity,
  254. cpu_online_mask))
  255. set = desc->irq_data.affinity;
  256. else
  257. irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
  258. }
  259. cpumask_and(mask, cpu_online_mask, set);
  260. ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
  261. switch (ret) {
  262. case IRQ_SET_MASK_OK:
  263. cpumask_copy(desc->irq_data.affinity, mask);
  264. case IRQ_SET_MASK_OK_NOCOPY:
  265. irq_set_thread_affinity(desc);
  266. }
  267. return 0;
  268. }
  269. #else
  270. static inline int
  271. setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
  272. {
  273. return irq_select_affinity(irq);
  274. }
  275. #endif
  276. /*
  277. * Called when affinity is set via /proc/irq
  278. */
  279. int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
  280. {
  281. struct irq_desc *desc = irq_to_desc(irq);
  282. unsigned long flags;
  283. int ret;
  284. raw_spin_lock_irqsave(&desc->lock, flags);
  285. ret = setup_affinity(irq, desc, mask);
  286. raw_spin_unlock_irqrestore(&desc->lock, flags);
  287. return ret;
  288. }
  289. #else
  290. static inline int
  291. setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
  292. {
  293. return 0;
  294. }
  295. #endif
  296. void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
  297. {
  298. if (suspend) {
  299. if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
  300. return;
  301. desc->istate |= IRQS_SUSPENDED;
  302. }
  303. if (!desc->depth++)
  304. irq_disable(desc);
  305. }
  306. static int __disable_irq_nosync(unsigned int irq)
  307. {
  308. unsigned long flags;
  309. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
  310. if (!desc)
  311. return -EINVAL;
  312. __disable_irq(desc, irq, false);
  313. irq_put_desc_busunlock(desc, flags);
  314. return 0;
  315. }
  316. /**
  317. * disable_irq_nosync - disable an irq without waiting
  318. * @irq: Interrupt to disable
  319. *
  320. * Disable the selected interrupt line. Disables and Enables are
  321. * nested.
  322. * Unlike disable_irq(), this function does not ensure existing
  323. * instances of the IRQ handler have completed before returning.
  324. *
  325. * This function may be called from IRQ context.
  326. */
  327. void disable_irq_nosync(unsigned int irq)
  328. {
  329. __disable_irq_nosync(irq);
  330. }
  331. EXPORT_SYMBOL(disable_irq_nosync);
  332. /**
  333. * disable_irq - disable an irq and wait for completion
  334. * @irq: Interrupt to disable
  335. *
  336. * Disable the selected interrupt line. Enables and Disables are
  337. * nested.
  338. * This function waits for any pending IRQ handlers for this interrupt
  339. * to complete before returning. If you use this function while
  340. * holding a resource the IRQ handler may need you will deadlock.
  341. *
  342. * This function may be called - with care - from IRQ context.
  343. */
  344. void disable_irq(unsigned int irq)
  345. {
  346. if (!__disable_irq_nosync(irq))
  347. synchronize_irq(irq);
  348. }
  349. EXPORT_SYMBOL(disable_irq);
  350. void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
  351. {
  352. if (resume) {
  353. if (!(desc->istate & IRQS_SUSPENDED)) {
  354. if (!desc->action)
  355. return;
  356. if (!(desc->action->flags & IRQF_FORCE_RESUME))
  357. return;
  358. /* Pretend that it got disabled ! */
  359. desc->depth++;
  360. }
  361. desc->istate &= ~IRQS_SUSPENDED;
  362. }
  363. switch (desc->depth) {
  364. case 0:
  365. err_out:
  366. WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
  367. break;
  368. case 1: {
  369. if (desc->istate & IRQS_SUSPENDED)
  370. goto err_out;
  371. /* Prevent probing on this irq: */
  372. irq_settings_set_noprobe(desc);
  373. irq_enable(desc);
  374. check_irq_resend(desc, irq);
  375. /* fall-through */
  376. }
  377. default:
  378. desc->depth--;
  379. }
  380. }
  381. /**
  382. * enable_irq - enable handling of an irq
  383. * @irq: Interrupt to enable
  384. *
  385. * Undoes the effect of one call to disable_irq(). If this
  386. * matches the last disable, processing of interrupts on this
  387. * IRQ line is re-enabled.
  388. *
  389. * This function may be called from IRQ context only when
  390. * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
  391. */
  392. void enable_irq(unsigned int irq)
  393. {
  394. unsigned long flags;
  395. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
  396. if (!desc)
  397. return;
  398. if (WARN(!desc->irq_data.chip,
  399. KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
  400. goto out;
  401. __enable_irq(desc, irq, false);
  402. out:
  403. irq_put_desc_busunlock(desc, flags);
  404. }
  405. EXPORT_SYMBOL(enable_irq);
  406. static int set_irq_wake_real(unsigned int irq, unsigned int on)
  407. {
  408. struct irq_desc *desc = irq_to_desc(irq);
  409. int ret = -ENXIO;
  410. if (desc->irq_data.chip->irq_set_wake)
  411. ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
  412. return ret;
  413. }
  414. /**
  415. * irq_set_irq_wake - control irq power management wakeup
  416. * @irq: interrupt to control
  417. * @on: enable/disable power management wakeup
  418. *
  419. * Enable/disable power management wakeup mode, which is
  420. * disabled by default. Enables and disables must match,
  421. * just as they match for non-wakeup mode support.
  422. *
  423. * Wakeup mode lets this IRQ wake the system from sleep
  424. * states like "suspend to RAM".
  425. */
  426. int irq_set_irq_wake(unsigned int irq, unsigned int on)
  427. {
  428. unsigned long flags;
  429. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
  430. int ret = 0;
  431. /* wakeup-capable irqs can be shared between drivers that
  432. * don't need to have the same sleep mode behaviors.
  433. */
  434. if (on) {
  435. if (desc->wake_depth++ == 0) {
  436. ret = set_irq_wake_real(irq, on);
  437. if (ret)
  438. desc->wake_depth = 0;
  439. else
  440. irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
  441. }
  442. } else {
  443. if (desc->wake_depth == 0) {
  444. WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
  445. } else if (--desc->wake_depth == 0) {
  446. ret = set_irq_wake_real(irq, on);
  447. if (ret)
  448. desc->wake_depth = 1;
  449. else
  450. irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
  451. }
  452. }
  453. irq_put_desc_busunlock(desc, flags);
  454. return ret;
  455. }
  456. EXPORT_SYMBOL(irq_set_irq_wake);
  457. /*
  458. * Internal function that tells the architecture code whether a
  459. * particular irq has been exclusively allocated or is available
  460. * for driver use.
  461. */
  462. int can_request_irq(unsigned int irq, unsigned long irqflags)
  463. {
  464. unsigned long flags;
  465. struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
  466. int canrequest = 0;
  467. if (!desc)
  468. return 0;
  469. if (irq_settings_can_request(desc)) {
  470. if (desc->action)
  471. if (irqflags & desc->action->flags & IRQF_SHARED)
  472. canrequest =1;
  473. }
  474. irq_put_desc_unlock(desc, flags);
  475. return canrequest;
  476. }
  477. int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
  478. unsigned long flags)
  479. {
  480. struct irq_chip *chip = desc->irq_data.chip;
  481. int ret, unmask = 0;
  482. if (!chip || !chip->irq_set_type) {
  483. /*
  484. * IRQF_TRIGGER_* but the PIC does not support multiple
  485. * flow-types?
  486. */
  487. pr_debug("No set_type function for IRQ %d (%s)\n", irq,
  488. chip ? (chip->name ? : "unknown") : "unknown");
  489. return 0;
  490. }
  491. flags &= IRQ_TYPE_SENSE_MASK;
  492. if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
  493. if (!irqd_irq_masked(&desc->irq_data))
  494. mask_irq(desc);
  495. if (!irqd_irq_disabled(&desc->irq_data))
  496. unmask = 1;
  497. }
  498. /* caller masked out all except trigger mode flags */
  499. ret = chip->irq_set_type(&desc->irq_data, flags);
  500. switch (ret) {
  501. case IRQ_SET_MASK_OK:
  502. irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
  503. irqd_set(&desc->irq_data, flags);
  504. case IRQ_SET_MASK_OK_NOCOPY:
  505. flags = irqd_get_trigger_type(&desc->irq_data);
  506. irq_settings_set_trigger_mask(desc, flags);
  507. irqd_clear(&desc->irq_data, IRQD_LEVEL);
  508. irq_settings_clr_level(desc);
  509. if (flags & IRQ_TYPE_LEVEL_MASK) {
  510. irq_settings_set_level(desc);
  511. irqd_set(&desc->irq_data, IRQD_LEVEL);
  512. }
  513. ret = 0;
  514. break;
  515. default:
  516. pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
  517. flags, irq, chip->irq_set_type);
  518. }
  519. if (unmask)
  520. unmask_irq(desc);
  521. return ret;
  522. }
  523. /*
  524. * Default primary interrupt handler for threaded interrupts. Is
  525. * assigned as primary handler when request_threaded_irq is called
  526. * with handler == NULL. Useful for oneshot interrupts.
  527. */
  528. static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
  529. {
  530. return IRQ_WAKE_THREAD;
  531. }
  532. /*
  533. * Primary handler for nested threaded interrupts. Should never be
  534. * called.
  535. */
  536. static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
  537. {
  538. WARN(1, "Primary handler called for nested irq %d\n", irq);
  539. return IRQ_NONE;
  540. }
  541. static int irq_wait_for_interrupt(struct irqaction *action)
  542. {
  543. while (!kthread_should_stop()) {
  544. set_current_state(TASK_INTERRUPTIBLE);
  545. if (test_and_clear_bit(IRQTF_RUNTHREAD,
  546. &action->thread_flags)) {
  547. __set_current_state(TASK_RUNNING);
  548. return 0;
  549. }
  550. schedule();
  551. }
  552. return -1;
  553. }
  554. /*
  555. * Oneshot interrupts keep the irq line masked until the threaded
  556. * handler finished. unmask if the interrupt has not been disabled and
  557. * is marked MASKED.
  558. */
  559. static void irq_finalize_oneshot(struct irq_desc *desc,
  560. struct irqaction *action, bool force)
  561. {
  562. if (!(desc->istate & IRQS_ONESHOT))
  563. return;
  564. again:
  565. chip_bus_lock(desc);
  566. raw_spin_lock_irq(&desc->lock);
  567. /*
  568. * Implausible though it may be we need to protect us against
  569. * the following scenario:
  570. *
  571. * The thread is faster done than the hard interrupt handler
  572. * on the other CPU. If we unmask the irq line then the
  573. * interrupt can come in again and masks the line, leaves due
  574. * to IRQS_INPROGRESS and the irq line is masked forever.
  575. *
  576. * This also serializes the state of shared oneshot handlers
  577. * versus "desc->threads_onehsot |= action->thread_mask;" in
  578. * irq_wake_thread(). See the comment there which explains the
  579. * serialization.
  580. */
  581. if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
  582. raw_spin_unlock_irq(&desc->lock);
  583. chip_bus_sync_unlock(desc);
  584. cpu_relax();
  585. goto again;
  586. }
  587. /*
  588. * Now check again, whether the thread should run. Otherwise
  589. * we would clear the threads_oneshot bit of this thread which
  590. * was just set.
  591. */
  592. if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
  593. goto out_unlock;
  594. desc->threads_oneshot &= ~action->thread_mask;
  595. if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
  596. irqd_irq_masked(&desc->irq_data))
  597. unmask_irq(desc);
  598. out_unlock:
  599. raw_spin_unlock_irq(&desc->lock);
  600. chip_bus_sync_unlock(desc);
  601. }
  602. #ifdef CONFIG_SMP
  603. /*
  604. * Check whether we need to chasnge the affinity of the interrupt thread.
  605. */
  606. static void
  607. irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
  608. {
  609. cpumask_var_t mask;
  610. if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
  611. return;
  612. /*
  613. * In case we are out of memory we set IRQTF_AFFINITY again and
  614. * try again next time
  615. */
  616. if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
  617. set_bit(IRQTF_AFFINITY, &action->thread_flags);
  618. return;
  619. }
  620. raw_spin_lock_irq(&desc->lock);
  621. cpumask_copy(mask, desc->irq_data.affinity);
  622. raw_spin_unlock_irq(&desc->lock);
  623. set_cpus_allowed_ptr(current, mask);
  624. free_cpumask_var(mask);
  625. }
  626. #else
  627. static inline void
  628. irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
  629. #endif
  630. /*
  631. * Interrupts which are not explicitely requested as threaded
  632. * interrupts rely on the implicit bh/preempt disable of the hard irq
  633. * context. So we need to disable bh here to avoid deadlocks and other
  634. * side effects.
  635. */
  636. static void
  637. irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
  638. {
  639. local_bh_disable();
  640. action->thread_fn(action->irq, action->dev_id);
  641. irq_finalize_oneshot(desc, action, false);
  642. local_bh_enable();
  643. }
  644. /*
  645. * Interrupts explicitely requested as threaded interupts want to be
  646. * preemtible - many of them need to sleep and wait for slow busses to
  647. * complete.
  648. */
  649. static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action)
  650. {
  651. action->thread_fn(action->irq, action->dev_id);
  652. irq_finalize_oneshot(desc, action, false);
  653. }
  654. /*
  655. * Interrupt handler thread
  656. */
  657. static int irq_thread(void *data)
  658. {
  659. static const struct sched_param param = {
  660. .sched_priority = MAX_USER_RT_PRIO/2,
  661. };
  662. struct irqaction *action = data;
  663. struct irq_desc *desc = irq_to_desc(action->irq);
  664. void (*handler_fn)(struct irq_desc *desc, struct irqaction *action);
  665. int wake;
  666. if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
  667. &action->thread_flags))
  668. handler_fn = irq_forced_thread_fn;
  669. else
  670. handler_fn = irq_thread_fn;
  671. sched_setscheduler(current, SCHED_FIFO, &param);
  672. current->irqaction = action;
  673. while (!irq_wait_for_interrupt(action)) {
  674. irq_thread_check_affinity(desc, action);
  675. atomic_inc(&desc->threads_active);
  676. raw_spin_lock_irq(&desc->lock);
  677. if (unlikely(irqd_irq_disabled(&desc->irq_data))) {
  678. /*
  679. * CHECKME: We might need a dedicated
  680. * IRQ_THREAD_PENDING flag here, which
  681. * retriggers the thread in check_irq_resend()
  682. * but AFAICT IRQS_PENDING should be fine as it
  683. * retriggers the interrupt itself --- tglx
  684. */
  685. desc->istate |= IRQS_PENDING;
  686. raw_spin_unlock_irq(&desc->lock);
  687. } else {
  688. raw_spin_unlock_irq(&desc->lock);
  689. handler_fn(desc, action);
  690. }
  691. wake = atomic_dec_and_test(&desc->threads_active);
  692. if (wake && waitqueue_active(&desc->wait_for_threads))
  693. wake_up(&desc->wait_for_threads);
  694. }
  695. /* Prevent a stale desc->threads_oneshot */
  696. irq_finalize_oneshot(desc, action, true);
  697. /*
  698. * Clear irqaction. Otherwise exit_irq_thread() would make
  699. * fuzz about an active irq thread going into nirvana.
  700. */
  701. current->irqaction = NULL;
  702. return 0;
  703. }
  704. /*
  705. * Called from do_exit()
  706. */
  707. void exit_irq_thread(void)
  708. {
  709. struct task_struct *tsk = current;
  710. struct irq_desc *desc;
  711. if (!tsk->irqaction)
  712. return;
  713. printk(KERN_ERR
  714. "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
  715. tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
  716. desc = irq_to_desc(tsk->irqaction->irq);
  717. /*
  718. * Prevent a stale desc->threads_oneshot. Must be called
  719. * before setting the IRQTF_DIED flag.
  720. */
  721. irq_finalize_oneshot(desc, tsk->irqaction, true);
  722. /*
  723. * Set the THREAD DIED flag to prevent further wakeups of the
  724. * soon to be gone threaded handler.
  725. */
  726. set_bit(IRQTF_DIED, &tsk->irqaction->flags);
  727. }
  728. static void irq_setup_forced_threading(struct irqaction *new)
  729. {
  730. if (!force_irqthreads)
  731. return;
  732. if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
  733. return;
  734. new->flags |= IRQF_ONESHOT;
  735. if (!new->thread_fn) {
  736. set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
  737. new->thread_fn = new->handler;
  738. new->handler = irq_default_primary_handler;
  739. }
  740. }
  741. /*
  742. * Internal function to register an irqaction - typically used to
  743. * allocate special interrupts that are part of the architecture.
  744. */
  745. static int
  746. __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
  747. {
  748. struct irqaction *old, **old_ptr;
  749. const char *old_name = NULL;
  750. unsigned long flags, thread_mask = 0;
  751. int ret, nested, shared = 0;
  752. cpumask_var_t mask;
  753. if (!desc)
  754. return -EINVAL;
  755. if (desc->irq_data.chip == &no_irq_chip)
  756. return -ENOSYS;
  757. /*
  758. * Some drivers like serial.c use request_irq() heavily,
  759. * so we have to be careful not to interfere with a
  760. * running system.
  761. */
  762. if (new->flags & IRQF_SAMPLE_RANDOM) {
  763. /*
  764. * This function might sleep, we want to call it first,
  765. * outside of the atomic block.
  766. * Yes, this might clear the entropy pool if the wrong
  767. * driver is attempted to be loaded, without actually
  768. * installing a new handler, but is this really a problem,
  769. * only the sysadmin is able to do this.
  770. */
  771. rand_initialize_irq(irq);
  772. }
  773. /*
  774. * Check whether the interrupt nests into another interrupt
  775. * thread.
  776. */
  777. nested = irq_settings_is_nested_thread(desc);
  778. if (nested) {
  779. if (!new->thread_fn)
  780. return -EINVAL;
  781. /*
  782. * Replace the primary handler which was provided from
  783. * the driver for non nested interrupt handling by the
  784. * dummy function which warns when called.
  785. */
  786. new->handler = irq_nested_primary_handler;
  787. } else {
  788. irq_setup_forced_threading(new);
  789. }
  790. /*
  791. * Create a handler thread when a thread function is supplied
  792. * and the interrupt does not nest into another interrupt
  793. * thread.
  794. */
  795. if (new->thread_fn && !nested) {
  796. struct task_struct *t;
  797. t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
  798. new->name);
  799. if (IS_ERR(t))
  800. return PTR_ERR(t);
  801. /*
  802. * We keep the reference to the task struct even if
  803. * the thread dies to avoid that the interrupt code
  804. * references an already freed task_struct.
  805. */
  806. get_task_struct(t);
  807. new->thread = t;
  808. }
  809. if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
  810. ret = -ENOMEM;
  811. goto out_thread;
  812. }
  813. /*
  814. * The following block of code has to be executed atomically
  815. */
  816. raw_spin_lock_irqsave(&desc->lock, flags);
  817. old_ptr = &desc->action;
  818. old = *old_ptr;
  819. if (old) {
  820. /*
  821. * Can't share interrupts unless both agree to and are
  822. * the same type (level, edge, polarity). So both flag
  823. * fields must have IRQF_SHARED set and the bits which
  824. * set the trigger type must match. Also all must
  825. * agree on ONESHOT.
  826. */
  827. if (!((old->flags & new->flags) & IRQF_SHARED) ||
  828. ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
  829. ((old->flags ^ new->flags) & IRQF_ONESHOT)) {
  830. old_name = old->name;
  831. goto mismatch;
  832. }
  833. /* All handlers must agree on per-cpuness */
  834. if ((old->flags & IRQF_PERCPU) !=
  835. (new->flags & IRQF_PERCPU))
  836. goto mismatch;
  837. /* add new interrupt at end of irq queue */
  838. do {
  839. thread_mask |= old->thread_mask;
  840. old_ptr = &old->next;
  841. old = *old_ptr;
  842. } while (old);
  843. shared = 1;
  844. }
  845. /*
  846. * Setup the thread mask for this irqaction. Unlikely to have
  847. * 32 resp 64 irqs sharing one line, but who knows.
  848. */
  849. if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
  850. ret = -EBUSY;
  851. goto out_mask;
  852. }
  853. new->thread_mask = 1 << ffz(thread_mask);
  854. if (!shared) {
  855. init_waitqueue_head(&desc->wait_for_threads);
  856. /* Setup the type (level, edge polarity) if configured: */
  857. if (new->flags & IRQF_TRIGGER_MASK) {
  858. ret = __irq_set_trigger(desc, irq,
  859. new->flags & IRQF_TRIGGER_MASK);
  860. if (ret)
  861. goto out_mask;
  862. }
  863. desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
  864. IRQS_ONESHOT | IRQS_WAITING);
  865. irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
  866. if (new->flags & IRQF_PERCPU) {
  867. irqd_set(&desc->irq_data, IRQD_PER_CPU);
  868. irq_settings_set_per_cpu(desc);
  869. }
  870. if (new->flags & IRQF_ONESHOT)
  871. desc->istate |= IRQS_ONESHOT;
  872. if (irq_settings_can_autoenable(desc))
  873. irq_startup(desc);
  874. else
  875. /* Undo nested disables: */
  876. desc->depth = 1;
  877. /* Exclude IRQ from balancing if requested */
  878. if (new->flags & IRQF_NOBALANCING) {
  879. irq_settings_set_no_balancing(desc);
  880. irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
  881. }
  882. /* Set default affinity mask once everything is setup */
  883. setup_affinity(irq, desc, mask);
  884. } else if (new->flags & IRQF_TRIGGER_MASK) {
  885. unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
  886. unsigned int omsk = irq_settings_get_trigger_mask(desc);
  887. if (nmsk != omsk)
  888. /* hope the handler works with current trigger mode */
  889. pr_warning("IRQ %d uses trigger mode %u; requested %u\n",
  890. irq, nmsk, omsk);
  891. }
  892. new->irq = irq;
  893. *old_ptr = new;
  894. /* Reset broken irq detection when installing new handler */
  895. desc->irq_count = 0;
  896. desc->irqs_unhandled = 0;
  897. /*
  898. * Check whether we disabled the irq via the spurious handler
  899. * before. Reenable it and give it another chance.
  900. */
  901. if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
  902. desc->istate &= ~IRQS_SPURIOUS_DISABLED;
  903. __enable_irq(desc, irq, false);
  904. }
  905. raw_spin_unlock_irqrestore(&desc->lock, flags);
  906. /*
  907. * Strictly no need to wake it up, but hung_task complains
  908. * when no hard interrupt wakes the thread up.
  909. */
  910. if (new->thread)
  911. wake_up_process(new->thread);
  912. register_irq_proc(irq, desc);
  913. new->dir = NULL;
  914. register_handler_proc(irq, new);
  915. return 0;
  916. mismatch:
  917. #ifdef CONFIG_DEBUG_SHIRQ
  918. if (!(new->flags & IRQF_PROBE_SHARED)) {
  919. printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
  920. if (old_name)
  921. printk(KERN_ERR "current handler: %s\n", old_name);
  922. dump_stack();
  923. }
  924. #endif
  925. ret = -EBUSY;
  926. out_mask:
  927. raw_spin_unlock_irqrestore(&desc->lock, flags);
  928. free_cpumask_var(mask);
  929. out_thread:
  930. if (new->thread) {
  931. struct task_struct *t = new->thread;
  932. new->thread = NULL;
  933. if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
  934. kthread_stop(t);
  935. put_task_struct(t);
  936. }
  937. return ret;
  938. }
  939. /**
  940. * setup_irq - setup an interrupt
  941. * @irq: Interrupt line to setup
  942. * @act: irqaction for the interrupt
  943. *
  944. * Used to statically setup interrupts in the early boot process.
  945. */
  946. int setup_irq(unsigned int irq, struct irqaction *act)
  947. {
  948. int retval;
  949. struct irq_desc *desc = irq_to_desc(irq);
  950. chip_bus_lock(desc);
  951. retval = __setup_irq(irq, desc, act);
  952. chip_bus_sync_unlock(desc);
  953. return retval;
  954. }
  955. EXPORT_SYMBOL_GPL(setup_irq);
  956. /*
  957. * Internal function to unregister an irqaction - used to free
  958. * regular and special interrupts that are part of the architecture.
  959. */
  960. static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
  961. {
  962. struct irq_desc *desc = irq_to_desc(irq);
  963. struct irqaction *action, **action_ptr;
  964. unsigned long flags;
  965. WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
  966. if (!desc)
  967. return NULL;
  968. raw_spin_lock_irqsave(&desc->lock, flags);
  969. /*
  970. * There can be multiple actions per IRQ descriptor, find the right
  971. * one based on the dev_id:
  972. */
  973. action_ptr = &desc->action;
  974. for (;;) {
  975. action = *action_ptr;
  976. if (!action) {
  977. WARN(1, "Trying to free already-free IRQ %d\n", irq);
  978. raw_spin_unlock_irqrestore(&desc->lock, flags);
  979. return NULL;
  980. }
  981. if (action->dev_id == dev_id)
  982. break;
  983. action_ptr = &action->next;
  984. }
  985. /* Found it - now remove it from the list of entries: */
  986. *action_ptr = action->next;
  987. /* Currently used only by UML, might disappear one day: */
  988. #ifdef CONFIG_IRQ_RELEASE_METHOD
  989. if (desc->irq_data.chip->release)
  990. desc->irq_data.chip->release(irq, dev_id);
  991. #endif
  992. /* If this was the last handler, shut down the IRQ line: */
  993. if (!desc->action)
  994. irq_shutdown(desc);
  995. #ifdef CONFIG_SMP
  996. /* make sure affinity_hint is cleaned up */
  997. if (WARN_ON_ONCE(desc->affinity_hint))
  998. desc->affinity_hint = NULL;
  999. #endif
  1000. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1001. unregister_handler_proc(irq, action);
  1002. /* Make sure it's not being used on another CPU: */
  1003. synchronize_irq(irq);
  1004. #ifdef CONFIG_DEBUG_SHIRQ
  1005. /*
  1006. * It's a shared IRQ -- the driver ought to be prepared for an IRQ
  1007. * event to happen even now it's being freed, so let's make sure that
  1008. * is so by doing an extra call to the handler ....
  1009. *
  1010. * ( We do this after actually deregistering it, to make sure that a
  1011. * 'real' IRQ doesn't run in * parallel with our fake. )
  1012. */
  1013. if (action->flags & IRQF_SHARED) {
  1014. local_irq_save(flags);
  1015. action->handler(irq, dev_id);
  1016. local_irq_restore(flags);
  1017. }
  1018. #endif
  1019. if (action->thread) {
  1020. if (!test_bit(IRQTF_DIED, &action->thread_flags))
  1021. kthread_stop(action->thread);
  1022. put_task_struct(action->thread);
  1023. }
  1024. return action;
  1025. }
  1026. /**
  1027. * remove_irq - free an interrupt
  1028. * @irq: Interrupt line to free
  1029. * @act: irqaction for the interrupt
  1030. *
  1031. * Used to remove interrupts statically setup by the early boot process.
  1032. */
  1033. void remove_irq(unsigned int irq, struct irqaction *act)
  1034. {
  1035. __free_irq(irq, act->dev_id);
  1036. }
  1037. EXPORT_SYMBOL_GPL(remove_irq);
  1038. /**
  1039. * free_irq - free an interrupt allocated with request_irq
  1040. * @irq: Interrupt line to free
  1041. * @dev_id: Device identity to free
  1042. *
  1043. * Remove an interrupt handler. The handler is removed and if the
  1044. * interrupt line is no longer in use by any driver it is disabled.
  1045. * On a shared IRQ the caller must ensure the interrupt is disabled
  1046. * on the card it drives before calling this function. The function
  1047. * does not return until any executing interrupts for this IRQ
  1048. * have completed.
  1049. *
  1050. * This function must not be called from interrupt context.
  1051. */
  1052. void free_irq(unsigned int irq, void *dev_id)
  1053. {
  1054. struct irq_desc *desc = irq_to_desc(irq);
  1055. if (!desc)
  1056. return;
  1057. #ifdef CONFIG_SMP
  1058. if (WARN_ON(desc->affinity_notify))
  1059. desc->affinity_notify = NULL;
  1060. #endif
  1061. chip_bus_lock(desc);
  1062. kfree(__free_irq(irq, dev_id));
  1063. chip_bus_sync_unlock(desc);
  1064. }
  1065. EXPORT_SYMBOL(free_irq);
  1066. /**
  1067. * request_threaded_irq - allocate an interrupt line
  1068. * @irq: Interrupt line to allocate
  1069. * @handler: Function to be called when the IRQ occurs.
  1070. * Primary handler for threaded interrupts
  1071. * If NULL and thread_fn != NULL the default
  1072. * primary handler is installed
  1073. * @thread_fn: Function called from the irq handler thread
  1074. * If NULL, no irq thread is created
  1075. * @irqflags: Interrupt type flags
  1076. * @devname: An ascii name for the claiming device
  1077. * @dev_id: A cookie passed back to the handler function
  1078. *
  1079. * This call allocates interrupt resources and enables the
  1080. * interrupt line and IRQ handling. From the point this
  1081. * call is made your handler function may be invoked. Since
  1082. * your handler function must clear any interrupt the board
  1083. * raises, you must take care both to initialise your hardware
  1084. * and to set up the interrupt handler in the right order.
  1085. *
  1086. * If you want to set up a threaded irq handler for your device
  1087. * then you need to supply @handler and @thread_fn. @handler ist
  1088. * still called in hard interrupt context and has to check
  1089. * whether the interrupt originates from the device. If yes it
  1090. * needs to disable the interrupt on the device and return
  1091. * IRQ_WAKE_THREAD which will wake up the handler thread and run
  1092. * @thread_fn. This split handler design is necessary to support
  1093. * shared interrupts.
  1094. *
  1095. * Dev_id must be globally unique. Normally the address of the
  1096. * device data structure is used as the cookie. Since the handler
  1097. * receives this value it makes sense to use it.
  1098. *
  1099. * If your interrupt is shared you must pass a non NULL dev_id
  1100. * as this is required when freeing the interrupt.
  1101. *
  1102. * Flags:
  1103. *
  1104. * IRQF_SHARED Interrupt is shared
  1105. * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
  1106. * IRQF_TRIGGER_* Specify active edge(s) or level
  1107. *
  1108. */
  1109. int request_threaded_irq(unsigned int irq, irq_handler_t handler,
  1110. irq_handler_t thread_fn, unsigned long irqflags,
  1111. const char *devname, void *dev_id)
  1112. {
  1113. struct irqaction *action;
  1114. struct irq_desc *desc;
  1115. int retval;
  1116. /*
  1117. * Sanity-check: shared interrupts must pass in a real dev-ID,
  1118. * otherwise we'll have trouble later trying to figure out
  1119. * which interrupt is which (messes up the interrupt freeing
  1120. * logic etc).
  1121. */
  1122. if ((irqflags & IRQF_SHARED) && !dev_id)
  1123. return -EINVAL;
  1124. desc = irq_to_desc(irq);
  1125. if (!desc)
  1126. return -EINVAL;
  1127. if (!irq_settings_can_request(desc))
  1128. return -EINVAL;
  1129. if (!handler) {
  1130. if (!thread_fn)
  1131. return -EINVAL;
  1132. handler = irq_default_primary_handler;
  1133. }
  1134. action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
  1135. if (!action)
  1136. return -ENOMEM;
  1137. action->handler = handler;
  1138. action->thread_fn = thread_fn;
  1139. action->flags = irqflags;
  1140. action->name = devname;
  1141. action->dev_id = dev_id;
  1142. chip_bus_lock(desc);
  1143. retval = __setup_irq(irq, desc, action);
  1144. chip_bus_sync_unlock(desc);
  1145. if (retval)
  1146. kfree(action);
  1147. #ifdef CONFIG_DEBUG_SHIRQ_FIXME
  1148. if (!retval && (irqflags & IRQF_SHARED)) {
  1149. /*
  1150. * It's a shared IRQ -- the driver ought to be prepared for it
  1151. * to happen immediately, so let's make sure....
  1152. * We disable the irq to make sure that a 'real' IRQ doesn't
  1153. * run in parallel with our fake.
  1154. */
  1155. unsigned long flags;
  1156. disable_irq(irq);
  1157. local_irq_save(flags);
  1158. handler(irq, dev_id);
  1159. local_irq_restore(flags);
  1160. enable_irq(irq);
  1161. }
  1162. #endif
  1163. return retval;
  1164. }
  1165. EXPORT_SYMBOL(request_threaded_irq);
  1166. /**
  1167. * request_any_context_irq - allocate an interrupt line
  1168. * @irq: Interrupt line to allocate
  1169. * @handler: Function to be called when the IRQ occurs.
  1170. * Threaded handler for threaded interrupts.
  1171. * @flags: Interrupt type flags
  1172. * @name: An ascii name for the claiming device
  1173. * @dev_id: A cookie passed back to the handler function
  1174. *
  1175. * This call allocates interrupt resources and enables the
  1176. * interrupt line and IRQ handling. It selects either a
  1177. * hardirq or threaded handling method depending on the
  1178. * context.
  1179. *
  1180. * On failure, it returns a negative value. On success,
  1181. * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
  1182. */
  1183. int request_any_context_irq(unsigned int irq, irq_handler_t handler,
  1184. unsigned long flags, const char *name, void *dev_id)
  1185. {
  1186. struct irq_desc *desc = irq_to_desc(irq);
  1187. int ret;
  1188. if (!desc)
  1189. return -EINVAL;
  1190. if (irq_settings_is_nested_thread(desc)) {
  1191. ret = request_threaded_irq(irq, NULL, handler,
  1192. flags, name, dev_id);
  1193. return !ret ? IRQC_IS_NESTED : ret;
  1194. }
  1195. ret = request_irq(irq, handler, flags, name, dev_id);
  1196. return !ret ? IRQC_IS_HARDIRQ : ret;
  1197. }
  1198. EXPORT_SYMBOL_GPL(request_any_context_irq);