manage.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658
  1. /*
  2. * linux/kernel/irq/manage.c
  3. *
  4. * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
  5. * Copyright (C) 2005-2006 Thomas Gleixner
  6. *
  7. * This file contains driver APIs to the irq subsystem.
  8. */
  9. #define pr_fmt(fmt) "genirq: " fmt
  10. #include <linux/irq.h>
  11. #include <linux/kthread.h>
  12. #include <linux/module.h>
  13. #include <linux/random.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/slab.h>
  16. #include <linux/sched.h>
  17. #include <linux/task_work.h>
  18. #include "internals.h"
  19. #ifdef CONFIG_IRQ_FORCED_THREADING
  20. __read_mostly bool force_irqthreads;
  21. static int __init setup_forced_irqthreads(char *arg)
  22. {
  23. force_irqthreads = true;
  24. return 0;
  25. }
  26. early_param("threadirqs", setup_forced_irqthreads);
  27. #endif
  28. /**
  29. * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
  30. * @irq: interrupt number to wait for
  31. *
  32. * This function waits for any pending IRQ handlers for this interrupt
  33. * to complete before returning. If you use this function while
  34. * holding a resource the IRQ handler may need you will deadlock.
  35. *
  36. * This function may be called - with care - from IRQ context.
  37. */
  38. void synchronize_irq(unsigned int irq)
  39. {
  40. struct irq_desc *desc = irq_to_desc(irq);
  41. bool inprogress;
  42. if (!desc)
  43. return;
  44. do {
  45. unsigned long flags;
  46. /*
  47. * Wait until we're out of the critical section. This might
  48. * give the wrong answer due to the lack of memory barriers.
  49. */
  50. while (irqd_irq_inprogress(&desc->irq_data))
  51. cpu_relax();
  52. /* Ok, that indicated we're done: double-check carefully. */
  53. raw_spin_lock_irqsave(&desc->lock, flags);
  54. inprogress = irqd_irq_inprogress(&desc->irq_data);
  55. raw_spin_unlock_irqrestore(&desc->lock, flags);
  56. /* Oops, that failed? */
  57. } while (inprogress);
  58. /*
  59. * We made sure that no hardirq handler is running. Now verify
  60. * that no threaded handlers are active.
  61. */
  62. wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
  63. }
  64. EXPORT_SYMBOL(synchronize_irq);
  65. #ifdef CONFIG_SMP
  66. cpumask_var_t irq_default_affinity;
  67. /**
  68. * irq_can_set_affinity - Check if the affinity of a given irq can be set
  69. * @irq: Interrupt to check
  70. *
  71. */
  72. int irq_can_set_affinity(unsigned int irq)
  73. {
  74. struct irq_desc *desc = irq_to_desc(irq);
  75. if (!desc || !irqd_can_balance(&desc->irq_data) ||
  76. !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
  77. return 0;
  78. return 1;
  79. }
  80. /**
  81. * irq_set_thread_affinity - Notify irq threads to adjust affinity
  82. * @desc: irq descriptor which has affitnity changed
  83. *
  84. * We just set IRQTF_AFFINITY and delegate the affinity setting
  85. * to the interrupt thread itself. We can not call
  86. * set_cpus_allowed_ptr() here as we hold desc->lock and this
  87. * code can be called from hard interrupt context.
  88. */
  89. void irq_set_thread_affinity(struct irq_desc *desc)
  90. {
  91. struct irqaction *action = desc->action;
  92. while (action) {
  93. if (action->thread)
  94. set_bit(IRQTF_AFFINITY, &action->thread_flags);
  95. action = action->next;
  96. }
  97. }
  98. #ifdef CONFIG_GENERIC_PENDING_IRQ
  99. static inline bool irq_can_move_pcntxt(struct irq_data *data)
  100. {
  101. return irqd_can_move_in_process_context(data);
  102. }
  103. static inline bool irq_move_pending(struct irq_data *data)
  104. {
  105. return irqd_is_setaffinity_pending(data);
  106. }
  107. static inline void
  108. irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
  109. {
  110. cpumask_copy(desc->pending_mask, mask);
  111. }
  112. static inline void
  113. irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
  114. {
  115. cpumask_copy(mask, desc->pending_mask);
  116. }
  117. #else
  118. static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
  119. static inline bool irq_move_pending(struct irq_data *data) { return false; }
  120. static inline void
  121. irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
  122. static inline void
  123. irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
  124. #endif
  125. int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
  126. bool force)
  127. {
  128. struct irq_desc *desc = irq_data_to_desc(data);
  129. struct irq_chip *chip = irq_data_get_irq_chip(data);
  130. int ret;
  131. ret = chip->irq_set_affinity(data, mask, false);
  132. switch (ret) {
  133. case IRQ_SET_MASK_OK:
  134. cpumask_copy(data->affinity, mask);
  135. case IRQ_SET_MASK_OK_NOCOPY:
  136. irq_set_thread_affinity(desc);
  137. ret = 0;
  138. }
  139. return ret;
  140. }
  141. int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
  142. {
  143. struct irq_chip *chip = irq_data_get_irq_chip(data);
  144. struct irq_desc *desc = irq_data_to_desc(data);
  145. int ret = 0;
  146. if (!chip || !chip->irq_set_affinity)
  147. return -EINVAL;
  148. if (irq_can_move_pcntxt(data)) {
  149. ret = irq_do_set_affinity(data, mask, false);
  150. } else {
  151. irqd_set_move_pending(data);
  152. irq_copy_pending(desc, mask);
  153. }
  154. if (desc->affinity_notify) {
  155. kref_get(&desc->affinity_notify->kref);
  156. schedule_work(&desc->affinity_notify->work);
  157. }
  158. irqd_set(data, IRQD_AFFINITY_SET);
  159. return ret;
  160. }
  161. /**
  162. * irq_set_affinity - Set the irq affinity of a given irq
  163. * @irq: Interrupt to set affinity
  164. * @mask: cpumask
  165. *
  166. */
  167. int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
  168. {
  169. struct irq_desc *desc = irq_to_desc(irq);
  170. unsigned long flags;
  171. int ret;
  172. if (!desc)
  173. return -EINVAL;
  174. raw_spin_lock_irqsave(&desc->lock, flags);
  175. ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
  176. raw_spin_unlock_irqrestore(&desc->lock, flags);
  177. return ret;
  178. }
  179. int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
  180. {
  181. unsigned long flags;
  182. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  183. if (!desc)
  184. return -EINVAL;
  185. desc->affinity_hint = m;
  186. irq_put_desc_unlock(desc, flags);
  187. return 0;
  188. }
  189. EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
  190. static void irq_affinity_notify(struct work_struct *work)
  191. {
  192. struct irq_affinity_notify *notify =
  193. container_of(work, struct irq_affinity_notify, work);
  194. struct irq_desc *desc = irq_to_desc(notify->irq);
  195. cpumask_var_t cpumask;
  196. unsigned long flags;
  197. if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
  198. goto out;
  199. raw_spin_lock_irqsave(&desc->lock, flags);
  200. if (irq_move_pending(&desc->irq_data))
  201. irq_get_pending(cpumask, desc);
  202. else
  203. cpumask_copy(cpumask, desc->irq_data.affinity);
  204. raw_spin_unlock_irqrestore(&desc->lock, flags);
  205. notify->notify(notify, cpumask);
  206. free_cpumask_var(cpumask);
  207. out:
  208. kref_put(&notify->kref, notify->release);
  209. }
  210. /**
  211. * irq_set_affinity_notifier - control notification of IRQ affinity changes
  212. * @irq: Interrupt for which to enable/disable notification
  213. * @notify: Context for notification, or %NULL to disable
  214. * notification. Function pointers must be initialised;
  215. * the other fields will be initialised by this function.
  216. *
  217. * Must be called in process context. Notification may only be enabled
  218. * after the IRQ is allocated and must be disabled before the IRQ is
  219. * freed using free_irq().
  220. */
  221. int
  222. irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
  223. {
  224. struct irq_desc *desc = irq_to_desc(irq);
  225. struct irq_affinity_notify *old_notify;
  226. unsigned long flags;
  227. /* The release function is promised process context */
  228. might_sleep();
  229. if (!desc)
  230. return -EINVAL;
  231. /* Complete initialisation of *notify */
  232. if (notify) {
  233. notify->irq = irq;
  234. kref_init(&notify->kref);
  235. INIT_WORK(&notify->work, irq_affinity_notify);
  236. }
  237. raw_spin_lock_irqsave(&desc->lock, flags);
  238. old_notify = desc->affinity_notify;
  239. desc->affinity_notify = notify;
  240. raw_spin_unlock_irqrestore(&desc->lock, flags);
  241. if (old_notify)
  242. kref_put(&old_notify->kref, old_notify->release);
  243. return 0;
  244. }
  245. EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
  246. #ifndef CONFIG_AUTO_IRQ_AFFINITY
  247. /*
  248. * Generic version of the affinity autoselector.
  249. */
  250. static int
  251. setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
  252. {
  253. struct cpumask *set = irq_default_affinity;
  254. int node = desc->irq_data.node;
  255. /* Excludes PER_CPU and NO_BALANCE interrupts */
  256. if (!irq_can_set_affinity(irq))
  257. return 0;
  258. /*
  259. * Preserve an userspace affinity setup, but make sure that
  260. * one of the targets is online.
  261. */
  262. if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
  263. if (cpumask_intersects(desc->irq_data.affinity,
  264. cpu_online_mask))
  265. set = desc->irq_data.affinity;
  266. else
  267. irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
  268. }
  269. cpumask_and(mask, cpu_online_mask, set);
  270. if (node != NUMA_NO_NODE) {
  271. const struct cpumask *nodemask = cpumask_of_node(node);
  272. /* make sure at least one of the cpus in nodemask is online */
  273. if (cpumask_intersects(mask, nodemask))
  274. cpumask_and(mask, mask, nodemask);
  275. }
  276. irq_do_set_affinity(&desc->irq_data, mask, false);
  277. return 0;
  278. }
  279. #else
  280. static inline int
  281. setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
  282. {
  283. return irq_select_affinity(irq);
  284. }
  285. #endif
  286. /*
  287. * Called when affinity is set via /proc/irq
  288. */
  289. int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
  290. {
  291. struct irq_desc *desc = irq_to_desc(irq);
  292. unsigned long flags;
  293. int ret;
  294. raw_spin_lock_irqsave(&desc->lock, flags);
  295. ret = setup_affinity(irq, desc, mask);
  296. raw_spin_unlock_irqrestore(&desc->lock, flags);
  297. return ret;
  298. }
  299. #else
  300. static inline int
  301. setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
  302. {
  303. return 0;
  304. }
  305. #endif
  306. void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
  307. {
  308. if (suspend) {
  309. if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
  310. return;
  311. desc->istate |= IRQS_SUSPENDED;
  312. }
  313. if (!desc->depth++)
  314. irq_disable(desc);
  315. }
  316. static int __disable_irq_nosync(unsigned int irq)
  317. {
  318. unsigned long flags;
  319. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  320. if (!desc)
  321. return -EINVAL;
  322. __disable_irq(desc, irq, false);
  323. irq_put_desc_busunlock(desc, flags);
  324. return 0;
  325. }
  326. /**
  327. * disable_irq_nosync - disable an irq without waiting
  328. * @irq: Interrupt to disable
  329. *
  330. * Disable the selected interrupt line. Disables and Enables are
  331. * nested.
  332. * Unlike disable_irq(), this function does not ensure existing
  333. * instances of the IRQ handler have completed before returning.
  334. *
  335. * This function may be called from IRQ context.
  336. */
  337. void disable_irq_nosync(unsigned int irq)
  338. {
  339. __disable_irq_nosync(irq);
  340. }
  341. EXPORT_SYMBOL(disable_irq_nosync);
  342. /**
  343. * disable_irq - disable an irq and wait for completion
  344. * @irq: Interrupt to disable
  345. *
  346. * Disable the selected interrupt line. Enables and Disables are
  347. * nested.
  348. * This function waits for any pending IRQ handlers for this interrupt
  349. * to complete before returning. If you use this function while
  350. * holding a resource the IRQ handler may need you will deadlock.
  351. *
  352. * This function may be called - with care - from IRQ context.
  353. */
  354. void disable_irq(unsigned int irq)
  355. {
  356. if (!__disable_irq_nosync(irq))
  357. synchronize_irq(irq);
  358. }
  359. EXPORT_SYMBOL(disable_irq);
  360. void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
  361. {
  362. if (resume) {
  363. if (!(desc->istate & IRQS_SUSPENDED)) {
  364. if (!desc->action)
  365. return;
  366. if (!(desc->action->flags & IRQF_FORCE_RESUME))
  367. return;
  368. /* Pretend that it got disabled ! */
  369. desc->depth++;
  370. }
  371. desc->istate &= ~IRQS_SUSPENDED;
  372. }
  373. switch (desc->depth) {
  374. case 0:
  375. err_out:
  376. WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
  377. break;
  378. case 1: {
  379. if (desc->istate & IRQS_SUSPENDED)
  380. goto err_out;
  381. /* Prevent probing on this irq: */
  382. irq_settings_set_noprobe(desc);
  383. irq_enable(desc);
  384. check_irq_resend(desc, irq);
  385. /* fall-through */
  386. }
  387. default:
  388. desc->depth--;
  389. }
  390. }
  391. /**
  392. * enable_irq - enable handling of an irq
  393. * @irq: Interrupt to enable
  394. *
  395. * Undoes the effect of one call to disable_irq(). If this
  396. * matches the last disable, processing of interrupts on this
  397. * IRQ line is re-enabled.
  398. *
  399. * This function may be called from IRQ context only when
  400. * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
  401. */
  402. void enable_irq(unsigned int irq)
  403. {
  404. unsigned long flags;
  405. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  406. if (!desc)
  407. return;
  408. if (WARN(!desc->irq_data.chip,
  409. KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
  410. goto out;
  411. __enable_irq(desc, irq, false);
  412. out:
  413. irq_put_desc_busunlock(desc, flags);
  414. }
  415. EXPORT_SYMBOL(enable_irq);
  416. static int set_irq_wake_real(unsigned int irq, unsigned int on)
  417. {
  418. struct irq_desc *desc = irq_to_desc(irq);
  419. int ret = -ENXIO;
  420. if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
  421. return 0;
  422. if (desc->irq_data.chip->irq_set_wake)
  423. ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
  424. return ret;
  425. }
  426. /**
  427. * irq_set_irq_wake - control irq power management wakeup
  428. * @irq: interrupt to control
  429. * @on: enable/disable power management wakeup
  430. *
  431. * Enable/disable power management wakeup mode, which is
  432. * disabled by default. Enables and disables must match,
  433. * just as they match for non-wakeup mode support.
  434. *
  435. * Wakeup mode lets this IRQ wake the system from sleep
  436. * states like "suspend to RAM".
  437. */
  438. int irq_set_irq_wake(unsigned int irq, unsigned int on)
  439. {
  440. unsigned long flags;
  441. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  442. int ret = 0;
  443. if (!desc)
  444. return -EINVAL;
  445. /* wakeup-capable irqs can be shared between drivers that
  446. * don't need to have the same sleep mode behaviors.
  447. */
  448. if (on) {
  449. if (desc->wake_depth++ == 0) {
  450. ret = set_irq_wake_real(irq, on);
  451. if (ret)
  452. desc->wake_depth = 0;
  453. else
  454. irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
  455. }
  456. } else {
  457. if (desc->wake_depth == 0) {
  458. WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
  459. } else if (--desc->wake_depth == 0) {
  460. ret = set_irq_wake_real(irq, on);
  461. if (ret)
  462. desc->wake_depth = 1;
  463. else
  464. irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
  465. }
  466. }
  467. irq_put_desc_busunlock(desc, flags);
  468. return ret;
  469. }
  470. EXPORT_SYMBOL(irq_set_irq_wake);
  471. /*
  472. * Internal function that tells the architecture code whether a
  473. * particular irq has been exclusively allocated or is available
  474. * for driver use.
  475. */
  476. int can_request_irq(unsigned int irq, unsigned long irqflags)
  477. {
  478. unsigned long flags;
  479. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  480. int canrequest = 0;
  481. if (!desc)
  482. return 0;
  483. if (irq_settings_can_request(desc)) {
  484. if (desc->action)
  485. if (irqflags & desc->action->flags & IRQF_SHARED)
  486. canrequest =1;
  487. }
  488. irq_put_desc_unlock(desc, flags);
  489. return canrequest;
  490. }
  491. int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
  492. unsigned long flags)
  493. {
  494. struct irq_chip *chip = desc->irq_data.chip;
  495. int ret, unmask = 0;
  496. if (!chip || !chip->irq_set_type) {
  497. /*
  498. * IRQF_TRIGGER_* but the PIC does not support multiple
  499. * flow-types?
  500. */
  501. pr_debug("No set_type function for IRQ %d (%s)\n", irq,
  502. chip ? (chip->name ? : "unknown") : "unknown");
  503. return 0;
  504. }
  505. flags &= IRQ_TYPE_SENSE_MASK;
  506. if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
  507. if (!irqd_irq_masked(&desc->irq_data))
  508. mask_irq(desc);
  509. if (!irqd_irq_disabled(&desc->irq_data))
  510. unmask = 1;
  511. }
  512. /* caller masked out all except trigger mode flags */
  513. ret = chip->irq_set_type(&desc->irq_data, flags);
  514. switch (ret) {
  515. case IRQ_SET_MASK_OK:
  516. irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
  517. irqd_set(&desc->irq_data, flags);
  518. case IRQ_SET_MASK_OK_NOCOPY:
  519. flags = irqd_get_trigger_type(&desc->irq_data);
  520. irq_settings_set_trigger_mask(desc, flags);
  521. irqd_clear(&desc->irq_data, IRQD_LEVEL);
  522. irq_settings_clr_level(desc);
  523. if (flags & IRQ_TYPE_LEVEL_MASK) {
  524. irq_settings_set_level(desc);
  525. irqd_set(&desc->irq_data, IRQD_LEVEL);
  526. }
  527. ret = 0;
  528. break;
  529. default:
  530. pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
  531. flags, irq, chip->irq_set_type);
  532. }
  533. if (unmask)
  534. unmask_irq(desc);
  535. return ret;
  536. }
  537. /*
  538. * Default primary interrupt handler for threaded interrupts. Is
  539. * assigned as primary handler when request_threaded_irq is called
  540. * with handler == NULL. Useful for oneshot interrupts.
  541. */
  542. static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
  543. {
  544. return IRQ_WAKE_THREAD;
  545. }
  546. /*
  547. * Primary handler for nested threaded interrupts. Should never be
  548. * called.
  549. */
  550. static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
  551. {
  552. WARN(1, "Primary handler called for nested irq %d\n", irq);
  553. return IRQ_NONE;
  554. }
  555. static int irq_wait_for_interrupt(struct irqaction *action)
  556. {
  557. set_current_state(TASK_INTERRUPTIBLE);
  558. while (!kthread_should_stop()) {
  559. if (test_and_clear_bit(IRQTF_RUNTHREAD,
  560. &action->thread_flags)) {
  561. __set_current_state(TASK_RUNNING);
  562. return 0;
  563. }
  564. schedule();
  565. set_current_state(TASK_INTERRUPTIBLE);
  566. }
  567. __set_current_state(TASK_RUNNING);
  568. return -1;
  569. }
  570. /*
  571. * Oneshot interrupts keep the irq line masked until the threaded
  572. * handler finished. unmask if the interrupt has not been disabled and
  573. * is marked MASKED.
  574. */
  575. static void irq_finalize_oneshot(struct irq_desc *desc,
  576. struct irqaction *action)
  577. {
  578. if (!(desc->istate & IRQS_ONESHOT))
  579. return;
  580. again:
  581. chip_bus_lock(desc);
  582. raw_spin_lock_irq(&desc->lock);
  583. /*
  584. * Implausible though it may be we need to protect us against
  585. * the following scenario:
  586. *
  587. * The thread is faster done than the hard interrupt handler
  588. * on the other CPU. If we unmask the irq line then the
  589. * interrupt can come in again and masks the line, leaves due
  590. * to IRQS_INPROGRESS and the irq line is masked forever.
  591. *
  592. * This also serializes the state of shared oneshot handlers
  593. * versus "desc->threads_onehsot |= action->thread_mask;" in
  594. * irq_wake_thread(). See the comment there which explains the
  595. * serialization.
  596. */
  597. if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
  598. raw_spin_unlock_irq(&desc->lock);
  599. chip_bus_sync_unlock(desc);
  600. cpu_relax();
  601. goto again;
  602. }
  603. /*
  604. * Now check again, whether the thread should run. Otherwise
  605. * we would clear the threads_oneshot bit of this thread which
  606. * was just set.
  607. */
  608. if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
  609. goto out_unlock;
  610. desc->threads_oneshot &= ~action->thread_mask;
  611. if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
  612. irqd_irq_masked(&desc->irq_data))
  613. unmask_irq(desc);
  614. out_unlock:
  615. raw_spin_unlock_irq(&desc->lock);
  616. chip_bus_sync_unlock(desc);
  617. }
  618. #ifdef CONFIG_SMP
  619. /*
  620. * Check whether we need to chasnge the affinity of the interrupt thread.
  621. */
  622. static void
  623. irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
  624. {
  625. cpumask_var_t mask;
  626. if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
  627. return;
  628. /*
  629. * In case we are out of memory we set IRQTF_AFFINITY again and
  630. * try again next time
  631. */
  632. if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
  633. set_bit(IRQTF_AFFINITY, &action->thread_flags);
  634. return;
  635. }
  636. raw_spin_lock_irq(&desc->lock);
  637. cpumask_copy(mask, desc->irq_data.affinity);
  638. raw_spin_unlock_irq(&desc->lock);
  639. set_cpus_allowed_ptr(current, mask);
  640. free_cpumask_var(mask);
  641. }
  642. #else
  643. static inline void
  644. irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
  645. #endif
  646. /*
  647. * Interrupts which are not explicitely requested as threaded
  648. * interrupts rely on the implicit bh/preempt disable of the hard irq
  649. * context. So we need to disable bh here to avoid deadlocks and other
  650. * side effects.
  651. */
  652. static irqreturn_t
  653. irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
  654. {
  655. irqreturn_t ret;
  656. local_bh_disable();
  657. ret = action->thread_fn(action->irq, action->dev_id);
  658. irq_finalize_oneshot(desc, action);
  659. local_bh_enable();
  660. return ret;
  661. }
  662. /*
  663. * Interrupts explicitely requested as threaded interupts want to be
  664. * preemtible - many of them need to sleep and wait for slow busses to
  665. * complete.
  666. */
  667. static irqreturn_t irq_thread_fn(struct irq_desc *desc,
  668. struct irqaction *action)
  669. {
  670. irqreturn_t ret;
  671. ret = action->thread_fn(action->irq, action->dev_id);
  672. irq_finalize_oneshot(desc, action);
  673. return ret;
  674. }
  675. static void wake_threads_waitq(struct irq_desc *desc)
  676. {
  677. if (atomic_dec_and_test(&desc->threads_active) &&
  678. waitqueue_active(&desc->wait_for_threads))
  679. wake_up(&desc->wait_for_threads);
  680. }
  681. static void irq_thread_dtor(struct task_work *unused)
  682. {
  683. struct task_struct *tsk = current;
  684. struct irq_desc *desc;
  685. struct irqaction *action;
  686. if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
  687. return;
  688. action = kthread_data(tsk);
  689. pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
  690. tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
  691. desc = irq_to_desc(action->irq);
  692. /*
  693. * If IRQTF_RUNTHREAD is set, we need to decrement
  694. * desc->threads_active and wake possible waiters.
  695. */
  696. if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
  697. wake_threads_waitq(desc);
  698. /* Prevent a stale desc->threads_oneshot */
  699. irq_finalize_oneshot(desc, action);
  700. }
  701. /*
  702. * Interrupt handler thread
  703. */
  704. static int irq_thread(void *data)
  705. {
  706. struct task_work on_exit_work;
  707. static const struct sched_param param = {
  708. .sched_priority = MAX_USER_RT_PRIO/2,
  709. };
  710. struct irqaction *action = data;
  711. struct irq_desc *desc = irq_to_desc(action->irq);
  712. irqreturn_t (*handler_fn)(struct irq_desc *desc,
  713. struct irqaction *action);
  714. if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
  715. &action->thread_flags))
  716. handler_fn = irq_forced_thread_fn;
  717. else
  718. handler_fn = irq_thread_fn;
  719. sched_setscheduler(current, SCHED_FIFO, &param);
  720. init_task_work(&on_exit_work, irq_thread_dtor);
  721. task_work_add(current, &on_exit_work, false);
  722. while (!irq_wait_for_interrupt(action)) {
  723. irqreturn_t action_ret;
  724. irq_thread_check_affinity(desc, action);
  725. action_ret = handler_fn(desc, action);
  726. if (!noirqdebug)
  727. note_interrupt(action->irq, desc, action_ret);
  728. wake_threads_waitq(desc);
  729. }
  730. /*
  731. * This is the regular exit path. __free_irq() is stopping the
  732. * thread via kthread_stop() after calling
  733. * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
  734. * oneshot mask bit can be set. We cannot verify that as we
  735. * cannot touch the oneshot mask at this point anymore as
  736. * __setup_irq() might have given out currents thread_mask
  737. * again.
  738. */
  739. task_work_cancel(current, irq_thread_dtor);
  740. return 0;
  741. }
  742. static void irq_setup_forced_threading(struct irqaction *new)
  743. {
  744. if (!force_irqthreads)
  745. return;
  746. if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
  747. return;
  748. new->flags |= IRQF_ONESHOT;
  749. if (!new->thread_fn) {
  750. set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
  751. new->thread_fn = new->handler;
  752. new->handler = irq_default_primary_handler;
  753. }
  754. }
  755. /*
  756. * Internal function to register an irqaction - typically used to
  757. * allocate special interrupts that are part of the architecture.
  758. */
  759. static int
  760. __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
  761. {
  762. struct irqaction *old, **old_ptr;
  763. unsigned long flags, thread_mask = 0;
  764. int ret, nested, shared = 0;
  765. cpumask_var_t mask;
  766. if (!desc)
  767. return -EINVAL;
  768. if (desc->irq_data.chip == &no_irq_chip)
  769. return -ENOSYS;
  770. if (!try_module_get(desc->owner))
  771. return -ENODEV;
  772. /*
  773. * Some drivers like serial.c use request_irq() heavily,
  774. * so we have to be careful not to interfere with a
  775. * running system.
  776. */
  777. if (new->flags & IRQF_SAMPLE_RANDOM) {
  778. /*
  779. * This function might sleep, we want to call it first,
  780. * outside of the atomic block.
  781. * Yes, this might clear the entropy pool if the wrong
  782. * driver is attempted to be loaded, without actually
  783. * installing a new handler, but is this really a problem,
  784. * only the sysadmin is able to do this.
  785. */
  786. rand_initialize_irq(irq);
  787. }
  788. /*
  789. * Check whether the interrupt nests into another interrupt
  790. * thread.
  791. */
  792. nested = irq_settings_is_nested_thread(desc);
  793. if (nested) {
  794. if (!new->thread_fn) {
  795. ret = -EINVAL;
  796. goto out_mput;
  797. }
  798. /*
  799. * Replace the primary handler which was provided from
  800. * the driver for non nested interrupt handling by the
  801. * dummy function which warns when called.
  802. */
  803. new->handler = irq_nested_primary_handler;
  804. } else {
  805. if (irq_settings_can_thread(desc))
  806. irq_setup_forced_threading(new);
  807. }
  808. /*
  809. * Create a handler thread when a thread function is supplied
  810. * and the interrupt does not nest into another interrupt
  811. * thread.
  812. */
  813. if (new->thread_fn && !nested) {
  814. struct task_struct *t;
  815. t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
  816. new->name);
  817. if (IS_ERR(t)) {
  818. ret = PTR_ERR(t);
  819. goto out_mput;
  820. }
  821. /*
  822. * We keep the reference to the task struct even if
  823. * the thread dies to avoid that the interrupt code
  824. * references an already freed task_struct.
  825. */
  826. get_task_struct(t);
  827. new->thread = t;
  828. }
  829. if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
  830. ret = -ENOMEM;
  831. goto out_thread;
  832. }
  833. /*
  834. * The following block of code has to be executed atomically
  835. */
  836. raw_spin_lock_irqsave(&desc->lock, flags);
  837. old_ptr = &desc->action;
  838. old = *old_ptr;
  839. if (old) {
  840. /*
  841. * Can't share interrupts unless both agree to and are
  842. * the same type (level, edge, polarity). So both flag
  843. * fields must have IRQF_SHARED set and the bits which
  844. * set the trigger type must match. Also all must
  845. * agree on ONESHOT.
  846. */
  847. if (!((old->flags & new->flags) & IRQF_SHARED) ||
  848. ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
  849. ((old->flags ^ new->flags) & IRQF_ONESHOT))
  850. goto mismatch;
  851. /* All handlers must agree on per-cpuness */
  852. if ((old->flags & IRQF_PERCPU) !=
  853. (new->flags & IRQF_PERCPU))
  854. goto mismatch;
  855. /* add new interrupt at end of irq queue */
  856. do {
  857. /*
  858. * Or all existing action->thread_mask bits,
  859. * so we can find the next zero bit for this
  860. * new action.
  861. */
  862. thread_mask |= old->thread_mask;
  863. old_ptr = &old->next;
  864. old = *old_ptr;
  865. } while (old);
  866. shared = 1;
  867. }
  868. /*
  869. * Setup the thread mask for this irqaction for ONESHOT. For
  870. * !ONESHOT irqs the thread mask is 0 so we can avoid a
  871. * conditional in irq_wake_thread().
  872. */
  873. if (new->flags & IRQF_ONESHOT) {
  874. /*
  875. * Unlikely to have 32 resp 64 irqs sharing one line,
  876. * but who knows.
  877. */
  878. if (thread_mask == ~0UL) {
  879. ret = -EBUSY;
  880. goto out_mask;
  881. }
  882. /*
  883. * The thread_mask for the action is or'ed to
  884. * desc->thread_active to indicate that the
  885. * IRQF_ONESHOT thread handler has been woken, but not
  886. * yet finished. The bit is cleared when a thread
  887. * completes. When all threads of a shared interrupt
  888. * line have completed desc->threads_active becomes
  889. * zero and the interrupt line is unmasked. See
  890. * handle.c:irq_wake_thread() for further information.
  891. *
  892. * If no thread is woken by primary (hard irq context)
  893. * interrupt handlers, then desc->threads_active is
  894. * also checked for zero to unmask the irq line in the
  895. * affected hard irq flow handlers
  896. * (handle_[fasteoi|level]_irq).
  897. *
  898. * The new action gets the first zero bit of
  899. * thread_mask assigned. See the loop above which or's
  900. * all existing action->thread_mask bits.
  901. */
  902. new->thread_mask = 1 << ffz(thread_mask);
  903. } else if (new->handler == irq_default_primary_handler) {
  904. /*
  905. * The interrupt was requested with handler = NULL, so
  906. * we use the default primary handler for it. But it
  907. * does not have the oneshot flag set. In combination
  908. * with level interrupts this is deadly, because the
  909. * default primary handler just wakes the thread, then
  910. * the irq lines is reenabled, but the device still
  911. * has the level irq asserted. Rinse and repeat....
  912. *
  913. * While this works for edge type interrupts, we play
  914. * it safe and reject unconditionally because we can't
  915. * say for sure which type this interrupt really
  916. * has. The type flags are unreliable as the
  917. * underlying chip implementation can override them.
  918. */
  919. pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
  920. irq);
  921. ret = -EINVAL;
  922. goto out_mask;
  923. }
  924. if (!shared) {
  925. init_waitqueue_head(&desc->wait_for_threads);
  926. /* Setup the type (level, edge polarity) if configured: */
  927. if (new->flags & IRQF_TRIGGER_MASK) {
  928. ret = __irq_set_trigger(desc, irq,
  929. new->flags & IRQF_TRIGGER_MASK);
  930. if (ret)
  931. goto out_mask;
  932. }
  933. desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
  934. IRQS_ONESHOT | IRQS_WAITING);
  935. irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
  936. if (new->flags & IRQF_PERCPU) {
  937. irqd_set(&desc->irq_data, IRQD_PER_CPU);
  938. irq_settings_set_per_cpu(desc);
  939. }
  940. if (new->flags & IRQF_ONESHOT)
  941. desc->istate |= IRQS_ONESHOT;
  942. if (irq_settings_can_autoenable(desc))
  943. irq_startup(desc, true);
  944. else
  945. /* Undo nested disables: */
  946. desc->depth = 1;
  947. /* Exclude IRQ from balancing if requested */
  948. if (new->flags & IRQF_NOBALANCING) {
  949. irq_settings_set_no_balancing(desc);
  950. irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
  951. }
  952. /* Set default affinity mask once everything is setup */
  953. setup_affinity(irq, desc, mask);
  954. } else if (new->flags & IRQF_TRIGGER_MASK) {
  955. unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
  956. unsigned int omsk = irq_settings_get_trigger_mask(desc);
  957. if (nmsk != omsk)
  958. /* hope the handler works with current trigger mode */
  959. pr_warning("irq %d uses trigger mode %u; requested %u\n",
  960. irq, nmsk, omsk);
  961. }
  962. new->irq = irq;
  963. *old_ptr = new;
  964. /* Reset broken irq detection when installing new handler */
  965. desc->irq_count = 0;
  966. desc->irqs_unhandled = 0;
  967. /*
  968. * Check whether we disabled the irq via the spurious handler
  969. * before. Reenable it and give it another chance.
  970. */
  971. if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
  972. desc->istate &= ~IRQS_SPURIOUS_DISABLED;
  973. __enable_irq(desc, irq, false);
  974. }
  975. raw_spin_unlock_irqrestore(&desc->lock, flags);
  976. /*
  977. * Strictly no need to wake it up, but hung_task complains
  978. * when no hard interrupt wakes the thread up.
  979. */
  980. if (new->thread)
  981. wake_up_process(new->thread);
  982. register_irq_proc(irq, desc);
  983. new->dir = NULL;
  984. register_handler_proc(irq, new);
  985. free_cpumask_var(mask);
  986. return 0;
  987. mismatch:
  988. if (!(new->flags & IRQF_PROBE_SHARED)) {
  989. pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
  990. irq, new->flags, new->name, old->flags, old->name);
  991. #ifdef CONFIG_DEBUG_SHIRQ
  992. dump_stack();
  993. #endif
  994. }
  995. ret = -EBUSY;
  996. out_mask:
  997. raw_spin_unlock_irqrestore(&desc->lock, flags);
  998. free_cpumask_var(mask);
  999. out_thread:
  1000. if (new->thread) {
  1001. struct task_struct *t = new->thread;
  1002. new->thread = NULL;
  1003. kthread_stop(t);
  1004. put_task_struct(t);
  1005. }
  1006. out_mput:
  1007. module_put(desc->owner);
  1008. return ret;
  1009. }
  1010. /**
  1011. * setup_irq - setup an interrupt
  1012. * @irq: Interrupt line to setup
  1013. * @act: irqaction for the interrupt
  1014. *
  1015. * Used to statically setup interrupts in the early boot process.
  1016. */
  1017. int setup_irq(unsigned int irq, struct irqaction *act)
  1018. {
  1019. int retval;
  1020. struct irq_desc *desc = irq_to_desc(irq);
  1021. if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  1022. return -EINVAL;
  1023. chip_bus_lock(desc);
  1024. retval = __setup_irq(irq, desc, act);
  1025. chip_bus_sync_unlock(desc);
  1026. return retval;
  1027. }
  1028. EXPORT_SYMBOL_GPL(setup_irq);
  1029. /*
  1030. * Internal function to unregister an irqaction - used to free
  1031. * regular and special interrupts that are part of the architecture.
  1032. */
  1033. static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
  1034. {
  1035. struct irq_desc *desc = irq_to_desc(irq);
  1036. struct irqaction *action, **action_ptr;
  1037. unsigned long flags;
  1038. WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
  1039. if (!desc)
  1040. return NULL;
  1041. raw_spin_lock_irqsave(&desc->lock, flags);
  1042. /*
  1043. * There can be multiple actions per IRQ descriptor, find the right
  1044. * one based on the dev_id:
  1045. */
  1046. action_ptr = &desc->action;
  1047. for (;;) {
  1048. action = *action_ptr;
  1049. if (!action) {
  1050. WARN(1, "Trying to free already-free IRQ %d\n", irq);
  1051. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1052. return NULL;
  1053. }
  1054. if (action->dev_id == dev_id)
  1055. break;
  1056. action_ptr = &action->next;
  1057. }
  1058. /* Found it - now remove it from the list of entries: */
  1059. *action_ptr = action->next;
  1060. /* If this was the last handler, shut down the IRQ line: */
  1061. if (!desc->action)
  1062. irq_shutdown(desc);
  1063. #ifdef CONFIG_SMP
  1064. /* make sure affinity_hint is cleaned up */
  1065. if (WARN_ON_ONCE(desc->affinity_hint))
  1066. desc->affinity_hint = NULL;
  1067. #endif
  1068. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1069. unregister_handler_proc(irq, action);
  1070. /* Make sure it's not being used on another CPU: */
  1071. synchronize_irq(irq);
  1072. #ifdef CONFIG_DEBUG_SHIRQ
  1073. /*
  1074. * It's a shared IRQ -- the driver ought to be prepared for an IRQ
  1075. * event to happen even now it's being freed, so let's make sure that
  1076. * is so by doing an extra call to the handler ....
  1077. *
  1078. * ( We do this after actually deregistering it, to make sure that a
  1079. * 'real' IRQ doesn't run in * parallel with our fake. )
  1080. */
  1081. if (action->flags & IRQF_SHARED) {
  1082. local_irq_save(flags);
  1083. action->handler(irq, dev_id);
  1084. local_irq_restore(flags);
  1085. }
  1086. #endif
  1087. if (action->thread) {
  1088. kthread_stop(action->thread);
  1089. put_task_struct(action->thread);
  1090. }
  1091. module_put(desc->owner);
  1092. return action;
  1093. }
  1094. /**
  1095. * remove_irq - free an interrupt
  1096. * @irq: Interrupt line to free
  1097. * @act: irqaction for the interrupt
  1098. *
  1099. * Used to remove interrupts statically setup by the early boot process.
  1100. */
  1101. void remove_irq(unsigned int irq, struct irqaction *act)
  1102. {
  1103. struct irq_desc *desc = irq_to_desc(irq);
  1104. if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  1105. __free_irq(irq, act->dev_id);
  1106. }
  1107. EXPORT_SYMBOL_GPL(remove_irq);
  1108. /**
  1109. * free_irq - free an interrupt allocated with request_irq
  1110. * @irq: Interrupt line to free
  1111. * @dev_id: Device identity to free
  1112. *
  1113. * Remove an interrupt handler. The handler is removed and if the
  1114. * interrupt line is no longer in use by any driver it is disabled.
  1115. * On a shared IRQ the caller must ensure the interrupt is disabled
  1116. * on the card it drives before calling this function. The function
  1117. * does not return until any executing interrupts for this IRQ
  1118. * have completed.
  1119. *
  1120. * This function must not be called from interrupt context.
  1121. */
  1122. void free_irq(unsigned int irq, void *dev_id)
  1123. {
  1124. struct irq_desc *desc = irq_to_desc(irq);
  1125. if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  1126. return;
  1127. #ifdef CONFIG_SMP
  1128. if (WARN_ON(desc->affinity_notify))
  1129. desc->affinity_notify = NULL;
  1130. #endif
  1131. chip_bus_lock(desc);
  1132. kfree(__free_irq(irq, dev_id));
  1133. chip_bus_sync_unlock(desc);
  1134. }
  1135. EXPORT_SYMBOL(free_irq);
  1136. /**
  1137. * request_threaded_irq - allocate an interrupt line
  1138. * @irq: Interrupt line to allocate
  1139. * @handler: Function to be called when the IRQ occurs.
  1140. * Primary handler for threaded interrupts
  1141. * If NULL and thread_fn != NULL the default
  1142. * primary handler is installed
  1143. * @thread_fn: Function called from the irq handler thread
  1144. * If NULL, no irq thread is created
  1145. * @irqflags: Interrupt type flags
  1146. * @devname: An ascii name for the claiming device
  1147. * @dev_id: A cookie passed back to the handler function
  1148. *
  1149. * This call allocates interrupt resources and enables the
  1150. * interrupt line and IRQ handling. From the point this
  1151. * call is made your handler function may be invoked. Since
  1152. * your handler function must clear any interrupt the board
  1153. * raises, you must take care both to initialise your hardware
  1154. * and to set up the interrupt handler in the right order.
  1155. *
  1156. * If you want to set up a threaded irq handler for your device
  1157. * then you need to supply @handler and @thread_fn. @handler is
  1158. * still called in hard interrupt context and has to check
  1159. * whether the interrupt originates from the device. If yes it
  1160. * needs to disable the interrupt on the device and return
  1161. * IRQ_WAKE_THREAD which will wake up the handler thread and run
  1162. * @thread_fn. This split handler design is necessary to support
  1163. * shared interrupts.
  1164. *
  1165. * Dev_id must be globally unique. Normally the address of the
  1166. * device data structure is used as the cookie. Since the handler
  1167. * receives this value it makes sense to use it.
  1168. *
  1169. * If your interrupt is shared you must pass a non NULL dev_id
  1170. * as this is required when freeing the interrupt.
  1171. *
  1172. * Flags:
  1173. *
  1174. * IRQF_SHARED Interrupt is shared
  1175. * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
  1176. * IRQF_TRIGGER_* Specify active edge(s) or level
  1177. *
  1178. */
  1179. int request_threaded_irq(unsigned int irq, irq_handler_t handler,
  1180. irq_handler_t thread_fn, unsigned long irqflags,
  1181. const char *devname, void *dev_id)
  1182. {
  1183. struct irqaction *action;
  1184. struct irq_desc *desc;
  1185. int retval;
  1186. /*
  1187. * Sanity-check: shared interrupts must pass in a real dev-ID,
  1188. * otherwise we'll have trouble later trying to figure out
  1189. * which interrupt is which (messes up the interrupt freeing
  1190. * logic etc).
  1191. */
  1192. if ((irqflags & IRQF_SHARED) && !dev_id)
  1193. return -EINVAL;
  1194. desc = irq_to_desc(irq);
  1195. if (!desc)
  1196. return -EINVAL;
  1197. if (!irq_settings_can_request(desc) ||
  1198. WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  1199. return -EINVAL;
  1200. if (!handler) {
  1201. if (!thread_fn)
  1202. return -EINVAL;
  1203. handler = irq_default_primary_handler;
  1204. }
  1205. action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
  1206. if (!action)
  1207. return -ENOMEM;
  1208. action->handler = handler;
  1209. action->thread_fn = thread_fn;
  1210. action->flags = irqflags;
  1211. action->name = devname;
  1212. action->dev_id = dev_id;
  1213. chip_bus_lock(desc);
  1214. retval = __setup_irq(irq, desc, action);
  1215. chip_bus_sync_unlock(desc);
  1216. if (retval)
  1217. kfree(action);
  1218. #ifdef CONFIG_DEBUG_SHIRQ_FIXME
  1219. if (!retval && (irqflags & IRQF_SHARED)) {
  1220. /*
  1221. * It's a shared IRQ -- the driver ought to be prepared for it
  1222. * to happen immediately, so let's make sure....
  1223. * We disable the irq to make sure that a 'real' IRQ doesn't
  1224. * run in parallel with our fake.
  1225. */
  1226. unsigned long flags;
  1227. disable_irq(irq);
  1228. local_irq_save(flags);
  1229. handler(irq, dev_id);
  1230. local_irq_restore(flags);
  1231. enable_irq(irq);
  1232. }
  1233. #endif
  1234. return retval;
  1235. }
  1236. EXPORT_SYMBOL(request_threaded_irq);
  1237. /**
  1238. * request_any_context_irq - allocate an interrupt line
  1239. * @irq: Interrupt line to allocate
  1240. * @handler: Function to be called when the IRQ occurs.
  1241. * Threaded handler for threaded interrupts.
  1242. * @flags: Interrupt type flags
  1243. * @name: An ascii name for the claiming device
  1244. * @dev_id: A cookie passed back to the handler function
  1245. *
  1246. * This call allocates interrupt resources and enables the
  1247. * interrupt line and IRQ handling. It selects either a
  1248. * hardirq or threaded handling method depending on the
  1249. * context.
  1250. *
  1251. * On failure, it returns a negative value. On success,
  1252. * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
  1253. */
  1254. int request_any_context_irq(unsigned int irq, irq_handler_t handler,
  1255. unsigned long flags, const char *name, void *dev_id)
  1256. {
  1257. struct irq_desc *desc = irq_to_desc(irq);
  1258. int ret;
  1259. if (!desc)
  1260. return -EINVAL;
  1261. if (irq_settings_is_nested_thread(desc)) {
  1262. ret = request_threaded_irq(irq, NULL, handler,
  1263. flags, name, dev_id);
  1264. return !ret ? IRQC_IS_NESTED : ret;
  1265. }
  1266. ret = request_irq(irq, handler, flags, name, dev_id);
  1267. return !ret ? IRQC_IS_HARDIRQ : ret;
  1268. }
  1269. EXPORT_SYMBOL_GPL(request_any_context_irq);
  1270. void enable_percpu_irq(unsigned int irq, unsigned int type)
  1271. {
  1272. unsigned int cpu = smp_processor_id();
  1273. unsigned long flags;
  1274. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
  1275. if (!desc)
  1276. return;
  1277. type &= IRQ_TYPE_SENSE_MASK;
  1278. if (type != IRQ_TYPE_NONE) {
  1279. int ret;
  1280. ret = __irq_set_trigger(desc, irq, type);
  1281. if (ret) {
  1282. WARN(1, "failed to set type for IRQ%d\n", irq);
  1283. goto out;
  1284. }
  1285. }
  1286. irq_percpu_enable(desc, cpu);
  1287. out:
  1288. irq_put_desc_unlock(desc, flags);
  1289. }
  1290. void disable_percpu_irq(unsigned int irq)
  1291. {
  1292. unsigned int cpu = smp_processor_id();
  1293. unsigned long flags;
  1294. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
  1295. if (!desc)
  1296. return;
  1297. irq_percpu_disable(desc, cpu);
  1298. irq_put_desc_unlock(desc, flags);
  1299. }
  1300. /*
  1301. * Internal function to unregister a percpu irqaction.
  1302. */
  1303. static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
  1304. {
  1305. struct irq_desc *desc = irq_to_desc(irq);
  1306. struct irqaction *action;
  1307. unsigned long flags;
  1308. WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
  1309. if (!desc)
  1310. return NULL;
  1311. raw_spin_lock_irqsave(&desc->lock, flags);
  1312. action = desc->action;
  1313. if (!action || action->percpu_dev_id != dev_id) {
  1314. WARN(1, "Trying to free already-free IRQ %d\n", irq);
  1315. goto bad;
  1316. }
  1317. if (!cpumask_empty(desc->percpu_enabled)) {
  1318. WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
  1319. irq, cpumask_first(desc->percpu_enabled));
  1320. goto bad;
  1321. }
  1322. /* Found it - now remove it from the list of entries: */
  1323. desc->action = NULL;
  1324. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1325. unregister_handler_proc(irq, action);
  1326. module_put(desc->owner);
  1327. return action;
  1328. bad:
  1329. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1330. return NULL;
  1331. }
  1332. /**
  1333. * remove_percpu_irq - free a per-cpu interrupt
  1334. * @irq: Interrupt line to free
  1335. * @act: irqaction for the interrupt
  1336. *
  1337. * Used to remove interrupts statically setup by the early boot process.
  1338. */
  1339. void remove_percpu_irq(unsigned int irq, struct irqaction *act)
  1340. {
  1341. struct irq_desc *desc = irq_to_desc(irq);
  1342. if (desc && irq_settings_is_per_cpu_devid(desc))
  1343. __free_percpu_irq(irq, act->percpu_dev_id);
  1344. }
  1345. /**
  1346. * free_percpu_irq - free an interrupt allocated with request_percpu_irq
  1347. * @irq: Interrupt line to free
  1348. * @dev_id: Device identity to free
  1349. *
  1350. * Remove a percpu interrupt handler. The handler is removed, but
  1351. * the interrupt line is not disabled. This must be done on each
  1352. * CPU before calling this function. The function does not return
  1353. * until any executing interrupts for this IRQ have completed.
  1354. *
  1355. * This function must not be called from interrupt context.
  1356. */
  1357. void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
  1358. {
  1359. struct irq_desc *desc = irq_to_desc(irq);
  1360. if (!desc || !irq_settings_is_per_cpu_devid(desc))
  1361. return;
  1362. chip_bus_lock(desc);
  1363. kfree(__free_percpu_irq(irq, dev_id));
  1364. chip_bus_sync_unlock(desc);
  1365. }
  1366. /**
  1367. * setup_percpu_irq - setup a per-cpu interrupt
  1368. * @irq: Interrupt line to setup
  1369. * @act: irqaction for the interrupt
  1370. *
  1371. * Used to statically setup per-cpu interrupts in the early boot process.
  1372. */
  1373. int setup_percpu_irq(unsigned int irq, struct irqaction *act)
  1374. {
  1375. struct irq_desc *desc = irq_to_desc(irq);
  1376. int retval;
  1377. if (!desc || !irq_settings_is_per_cpu_devid(desc))
  1378. return -EINVAL;
  1379. chip_bus_lock(desc);
  1380. retval = __setup_irq(irq, desc, act);
  1381. chip_bus_sync_unlock(desc);
  1382. return retval;
  1383. }
  1384. /**
  1385. * request_percpu_irq - allocate a percpu interrupt line
  1386. * @irq: Interrupt line to allocate
  1387. * @handler: Function to be called when the IRQ occurs.
  1388. * @devname: An ascii name for the claiming device
  1389. * @dev_id: A percpu cookie passed back to the handler function
  1390. *
  1391. * This call allocates interrupt resources, but doesn't
  1392. * automatically enable the interrupt. It has to be done on each
  1393. * CPU using enable_percpu_irq().
  1394. *
  1395. * Dev_id must be globally unique. It is a per-cpu variable, and
  1396. * the handler gets called with the interrupted CPU's instance of
  1397. * that variable.
  1398. */
  1399. int request_percpu_irq(unsigned int irq, irq_handler_t handler,
  1400. const char *devname, void __percpu *dev_id)
  1401. {
  1402. struct irqaction *action;
  1403. struct irq_desc *desc;
  1404. int retval;
  1405. if (!dev_id)
  1406. return -EINVAL;
  1407. desc = irq_to_desc(irq);
  1408. if (!desc || !irq_settings_can_request(desc) ||
  1409. !irq_settings_is_per_cpu_devid(desc))
  1410. return -EINVAL;
  1411. action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
  1412. if (!action)
  1413. return -ENOMEM;
  1414. action->handler = handler;
  1415. action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
  1416. action->name = devname;
  1417. action->percpu_dev_id = dev_id;
  1418. chip_bus_lock(desc);
  1419. retval = __setup_irq(irq, desc, action);
  1420. chip_bus_sync_unlock(desc);
  1421. if (retval)
  1422. kfree(action);
  1423. return retval;
  1424. }