interrupt.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479
  1. /* interrupt.h */
  2. #ifndef _LINUX_INTERRUPT_H
  3. #define _LINUX_INTERRUPT_H
  4. #include <linux/kernel.h>
  5. #include <linux/linkage.h>
  6. #include <linux/bitops.h>
  7. #include <linux/preempt.h>
  8. #include <linux/cpumask.h>
  9. #include <linux/irqreturn.h>
  10. #include <linux/irqnr.h>
  11. #include <linux/hardirq.h>
  12. #include <linux/sched.h>
  13. #include <linux/irqflags.h>
  14. #include <linux/smp.h>
  15. #include <linux/percpu.h>
  16. #include <asm/atomic.h>
  17. #include <asm/ptrace.h>
  18. #include <asm/system.h>
  19. /*
  20. * These correspond to the IORESOURCE_IRQ_* defines in
  21. * linux/ioport.h to select the interrupt line behaviour. When
  22. * requesting an interrupt without specifying a IRQF_TRIGGER, the
  23. * setting should be assumed to be "as already configured", which
  24. * may be as per machine or firmware initialisation.
  25. */
  26. #define IRQF_TRIGGER_NONE 0x00000000
  27. #define IRQF_TRIGGER_RISING 0x00000001
  28. #define IRQF_TRIGGER_FALLING 0x00000002
  29. #define IRQF_TRIGGER_HIGH 0x00000004
  30. #define IRQF_TRIGGER_LOW 0x00000008
  31. #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
  32. IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
  33. #define IRQF_TRIGGER_PROBE 0x00000010
  34. /*
  35. * These flags used only by the kernel as part of the
  36. * irq handling routines.
  37. *
  38. * IRQF_DISABLED - keep irqs disabled when calling the action handler
  39. * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator
  40. * IRQF_SHARED - allow sharing the irq among several devices
  41. * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
  42. * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
  43. * IRQF_PERCPU - Interrupt is per cpu
  44. * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
  45. * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
  46. * registered first in an shared interrupt is considered for
  47. * performance reasons)
  48. */
  49. #define IRQF_DISABLED 0x00000020
  50. #define IRQF_SAMPLE_RANDOM 0x00000040
  51. #define IRQF_SHARED 0x00000080
  52. #define IRQF_PROBE_SHARED 0x00000100
  53. #define IRQF_TIMER 0x00000200
  54. #define IRQF_PERCPU 0x00000400
  55. #define IRQF_NOBALANCING 0x00000800
  56. #define IRQF_IRQPOLL 0x00001000
  57. typedef irqreturn_t (*irq_handler_t)(int, void *);
  58. struct irqaction {
  59. irq_handler_t handler;
  60. unsigned long flags;
  61. cpumask_t mask;
  62. const char *name;
  63. void *dev_id;
  64. struct irqaction *next;
  65. int irq;
  66. struct proc_dir_entry *dir;
  67. };
  68. extern irqreturn_t no_action(int cpl, void *dev_id);
  69. extern int __must_check request_irq(unsigned int, irq_handler_t handler,
  70. unsigned long, const char *, void *);
  71. extern void free_irq(unsigned int, void *);
  72. struct device;
  73. extern int __must_check devm_request_irq(struct device *dev, unsigned int irq,
  74. irq_handler_t handler, unsigned long irqflags,
  75. const char *devname, void *dev_id);
  76. extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
  77. /*
  78. * On lockdep we dont want to enable hardirqs in hardirq
  79. * context. Use local_irq_enable_in_hardirq() to annotate
  80. * kernel code that has to do this nevertheless (pretty much
  81. * the only valid case is for old/broken hardware that is
  82. * insanely slow).
  83. *
  84. * NOTE: in theory this might break fragile code that relies
  85. * on hardirq delivery - in practice we dont seem to have such
  86. * places left. So the only effect should be slightly increased
  87. * irqs-off latencies.
  88. */
  89. #ifdef CONFIG_LOCKDEP
  90. # define local_irq_enable_in_hardirq() do { } while (0)
  91. #else
  92. # define local_irq_enable_in_hardirq() local_irq_enable()
  93. #endif
  94. extern void disable_irq_nosync(unsigned int irq);
  95. extern void disable_irq(unsigned int irq);
  96. extern void enable_irq(unsigned int irq);
  97. #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
  98. extern cpumask_var_t irq_default_affinity;
  99. extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
  100. extern int irq_can_set_affinity(unsigned int irq);
  101. extern int irq_select_affinity(unsigned int irq);
  102. #else /* CONFIG_SMP */
  103. static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
  104. {
  105. return -EINVAL;
  106. }
  107. static inline int irq_can_set_affinity(unsigned int irq)
  108. {
  109. return 0;
  110. }
  111. static inline int irq_select_affinity(unsigned int irq) { return 0; }
  112. #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
  113. #ifdef CONFIG_GENERIC_HARDIRQS
  114. /*
  115. * Special lockdep variants of irq disabling/enabling.
  116. * These should be used for locking constructs that
  117. * know that a particular irq context which is disabled,
  118. * and which is the only irq-context user of a lock,
  119. * that it's safe to take the lock in the irq-disabled
  120. * section without disabling hardirqs.
  121. *
  122. * On !CONFIG_LOCKDEP they are equivalent to the normal
  123. * irq disable/enable methods.
  124. */
  125. static inline void disable_irq_nosync_lockdep(unsigned int irq)
  126. {
  127. disable_irq_nosync(irq);
  128. #ifdef CONFIG_LOCKDEP
  129. local_irq_disable();
  130. #endif
  131. }
  132. static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
  133. {
  134. disable_irq_nosync(irq);
  135. #ifdef CONFIG_LOCKDEP
  136. local_irq_save(*flags);
  137. #endif
  138. }
  139. static inline void disable_irq_lockdep(unsigned int irq)
  140. {
  141. disable_irq(irq);
  142. #ifdef CONFIG_LOCKDEP
  143. local_irq_disable();
  144. #endif
  145. }
  146. static inline void enable_irq_lockdep(unsigned int irq)
  147. {
  148. #ifdef CONFIG_LOCKDEP
  149. local_irq_enable();
  150. #endif
  151. enable_irq(irq);
  152. }
  153. static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
  154. {
  155. #ifdef CONFIG_LOCKDEP
  156. local_irq_restore(*flags);
  157. #endif
  158. enable_irq(irq);
  159. }
  160. /* IRQ wakeup (PM) control: */
  161. extern int set_irq_wake(unsigned int irq, unsigned int on);
  162. static inline int enable_irq_wake(unsigned int irq)
  163. {
  164. return set_irq_wake(irq, 1);
  165. }
  166. static inline int disable_irq_wake(unsigned int irq)
  167. {
  168. return set_irq_wake(irq, 0);
  169. }
  170. #else /* !CONFIG_GENERIC_HARDIRQS */
  171. /*
  172. * NOTE: non-genirq architectures, if they want to support the lock
  173. * validator need to define the methods below in their asm/irq.h
  174. * files, under an #ifdef CONFIG_LOCKDEP section.
  175. */
  176. #ifndef CONFIG_LOCKDEP
  177. # define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
  178. # define disable_irq_nosync_lockdep_irqsave(irq, flags) \
  179. disable_irq_nosync(irq)
  180. # define disable_irq_lockdep(irq) disable_irq(irq)
  181. # define enable_irq_lockdep(irq) enable_irq(irq)
  182. # define enable_irq_lockdep_irqrestore(irq, flags) \
  183. enable_irq(irq)
  184. # endif
  185. static inline int enable_irq_wake(unsigned int irq)
  186. {
  187. return 0;
  188. }
  189. static inline int disable_irq_wake(unsigned int irq)
  190. {
  191. return 0;
  192. }
  193. #endif /* CONFIG_GENERIC_HARDIRQS */
  194. #ifndef __ARCH_SET_SOFTIRQ_PENDING
  195. #define set_softirq_pending(x) (local_softirq_pending() = (x))
  196. #define or_softirq_pending(x) (local_softirq_pending() |= (x))
  197. #endif
  198. /* Some architectures might implement lazy enabling/disabling of
  199. * interrupts. In some cases, such as stop_machine, we might want
  200. * to ensure that after a local_irq_disable(), interrupts have
  201. * really been disabled in hardware. Such architectures need to
  202. * implement the following hook.
  203. */
  204. #ifndef hard_irq_disable
  205. #define hard_irq_disable() do { } while(0)
  206. #endif
  207. /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
  208. frequency threaded job scheduling. For almost all the purposes
  209. tasklets are more than enough. F.e. all serial device BHs et
  210. al. should be converted to tasklets, not to softirqs.
  211. */
  212. enum
  213. {
  214. HI_SOFTIRQ=0,
  215. TIMER_SOFTIRQ,
  216. NET_TX_SOFTIRQ,
  217. NET_RX_SOFTIRQ,
  218. BLOCK_SOFTIRQ,
  219. TASKLET_SOFTIRQ,
  220. SCHED_SOFTIRQ,
  221. HRTIMER_SOFTIRQ,
  222. RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
  223. NR_SOFTIRQS
  224. };
  225. /* softirq mask and active fields moved to irq_cpustat_t in
  226. * asm/hardirq.h to get better cache usage. KAO
  227. */
  228. struct softirq_action
  229. {
  230. void (*action)(struct softirq_action *);
  231. };
  232. asmlinkage void do_softirq(void);
  233. asmlinkage void __do_softirq(void);
  234. extern void open_softirq(int nr, void (*action)(struct softirq_action *));
  235. extern void softirq_init(void);
  236. #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
  237. extern void raise_softirq_irqoff(unsigned int nr);
  238. extern void raise_softirq(unsigned int nr);
  239. /* This is the worklist that queues up per-cpu softirq work.
  240. *
  241. * send_remote_sendirq() adds work to these lists, and
  242. * the softirq handler itself dequeues from them. The queues
  243. * are protected by disabling local cpu interrupts and they must
  244. * only be accessed by the local cpu that they are for.
  245. */
  246. DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
  247. /* Try to send a softirq to a remote cpu. If this cannot be done, the
  248. * work will be queued to the local cpu.
  249. */
  250. extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
  251. /* Like send_remote_softirq(), but the caller must disable local cpu interrupts
  252. * and compute the current cpu, passed in as 'this_cpu'.
  253. */
  254. extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
  255. int this_cpu, int softirq);
  256. /* Tasklets --- multithreaded analogue of BHs.
  257. Main feature differing them of generic softirqs: tasklet
  258. is running only on one CPU simultaneously.
  259. Main feature differing them of BHs: different tasklets
  260. may be run simultaneously on different CPUs.
  261. Properties:
  262. * If tasklet_schedule() is called, then tasklet is guaranteed
  263. to be executed on some cpu at least once after this.
  264. * If the tasklet is already scheduled, but its excecution is still not
  265. started, it will be executed only once.
  266. * If this tasklet is already running on another CPU (or schedule is called
  267. from tasklet itself), it is rescheduled for later.
  268. * Tasklet is strictly serialized wrt itself, but not
  269. wrt another tasklets. If client needs some intertask synchronization,
  270. he makes it with spinlocks.
  271. */
  272. struct tasklet_struct
  273. {
  274. struct tasklet_struct *next;
  275. unsigned long state;
  276. atomic_t count;
  277. void (*func)(unsigned long);
  278. unsigned long data;
  279. };
  280. #define DECLARE_TASKLET(name, func, data) \
  281. struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
  282. #define DECLARE_TASKLET_DISABLED(name, func, data) \
  283. struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
  284. enum
  285. {
  286. TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
  287. TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
  288. };
  289. #ifdef CONFIG_SMP
  290. static inline int tasklet_trylock(struct tasklet_struct *t)
  291. {
  292. return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
  293. }
  294. static inline void tasklet_unlock(struct tasklet_struct *t)
  295. {
  296. smp_mb__before_clear_bit();
  297. clear_bit(TASKLET_STATE_RUN, &(t)->state);
  298. }
  299. static inline void tasklet_unlock_wait(struct tasklet_struct *t)
  300. {
  301. while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
  302. }
  303. #else
  304. #define tasklet_trylock(t) 1
  305. #define tasklet_unlock_wait(t) do { } while (0)
  306. #define tasklet_unlock(t) do { } while (0)
  307. #endif
  308. extern void __tasklet_schedule(struct tasklet_struct *t);
  309. static inline void tasklet_schedule(struct tasklet_struct *t)
  310. {
  311. if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
  312. __tasklet_schedule(t);
  313. }
  314. extern void __tasklet_hi_schedule(struct tasklet_struct *t);
  315. static inline void tasklet_hi_schedule(struct tasklet_struct *t)
  316. {
  317. if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
  318. __tasklet_hi_schedule(t);
  319. }
  320. static inline void tasklet_disable_nosync(struct tasklet_struct *t)
  321. {
  322. atomic_inc(&t->count);
  323. smp_mb__after_atomic_inc();
  324. }
  325. static inline void tasklet_disable(struct tasklet_struct *t)
  326. {
  327. tasklet_disable_nosync(t);
  328. tasklet_unlock_wait(t);
  329. smp_mb();
  330. }
  331. static inline void tasklet_enable(struct tasklet_struct *t)
  332. {
  333. smp_mb__before_atomic_dec();
  334. atomic_dec(&t->count);
  335. }
  336. static inline void tasklet_hi_enable(struct tasklet_struct *t)
  337. {
  338. smp_mb__before_atomic_dec();
  339. atomic_dec(&t->count);
  340. }
  341. extern void tasklet_kill(struct tasklet_struct *t);
  342. extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
  343. extern void tasklet_init(struct tasklet_struct *t,
  344. void (*func)(unsigned long), unsigned long data);
  345. /*
  346. * Autoprobing for irqs:
  347. *
  348. * probe_irq_on() and probe_irq_off() provide robust primitives
  349. * for accurate IRQ probing during kernel initialization. They are
  350. * reasonably simple to use, are not "fooled" by spurious interrupts,
  351. * and, unlike other attempts at IRQ probing, they do not get hung on
  352. * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
  353. *
  354. * For reasonably foolproof probing, use them as follows:
  355. *
  356. * 1. clear and/or mask the device's internal interrupt.
  357. * 2. sti();
  358. * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
  359. * 4. enable the device and cause it to trigger an interrupt.
  360. * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
  361. * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
  362. * 7. service the device to clear its pending interrupt.
  363. * 8. loop again if paranoia is required.
  364. *
  365. * probe_irq_on() returns a mask of allocated irq's.
  366. *
  367. * probe_irq_off() takes the mask as a parameter,
  368. * and returns the irq number which occurred,
  369. * or zero if none occurred, or a negative irq number
  370. * if more than one irq occurred.
  371. */
  372. #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
  373. static inline unsigned long probe_irq_on(void)
  374. {
  375. return 0;
  376. }
  377. static inline int probe_irq_off(unsigned long val)
  378. {
  379. return 0;
  380. }
  381. static inline unsigned int probe_irq_mask(unsigned long val)
  382. {
  383. return 0;
  384. }
  385. #else
  386. extern unsigned long probe_irq_on(void); /* returns 0 on failure */
  387. extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
  388. extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
  389. #endif
  390. #ifdef CONFIG_PROC_FS
  391. /* Initialize /proc/irq/ */
  392. extern void init_irq_proc(void);
  393. #else
  394. static inline void init_irq_proc(void)
  395. {
  396. }
  397. #endif
  398. #if defined(CONFIG_GENERIC_HARDIRQS) && defined(CONFIG_DEBUG_SHIRQ)
  399. extern void debug_poll_all_shared_irqs(void);
  400. #else
  401. static inline void debug_poll_all_shared_irqs(void) { }
  402. #endif
  403. int show_interrupts(struct seq_file *p, void *v);
  404. struct irq_desc;
  405. extern int early_irq_init(void);
  406. extern int arch_early_irq_init(void);
  407. extern int arch_init_chip_data(struct irq_desc *desc, int cpu);
  408. #endif