wait.h 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002
  1. #ifndef _LINUX_WAIT_H
  2. #define _LINUX_WAIT_H
  3. #include <linux/list.h>
  4. #include <linux/stddef.h>
  5. #include <linux/spinlock.h>
  6. #include <asm/current.h>
  7. #include <uapi/linux/wait.h>
  8. typedef struct __wait_queue wait_queue_t;
  9. typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
  10. int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
  11. struct __wait_queue {
  12. unsigned int flags;
  13. #define WQ_FLAG_EXCLUSIVE 0x01
  14. void *private;
  15. wait_queue_func_t func;
  16. struct list_head task_list;
  17. };
  18. struct wait_bit_key {
  19. void *flags;
  20. int bit_nr;
  21. #define WAIT_ATOMIC_T_BIT_NR -1
  22. };
  23. struct wait_bit_queue {
  24. struct wait_bit_key key;
  25. wait_queue_t wait;
  26. };
  27. struct __wait_queue_head {
  28. spinlock_t lock;
  29. struct list_head task_list;
  30. };
  31. typedef struct __wait_queue_head wait_queue_head_t;
  32. struct task_struct;
  33. /*
  34. * Macros for declaration and initialisaton of the datatypes
  35. */
  36. #define __WAITQUEUE_INITIALIZER(name, tsk) { \
  37. .private = tsk, \
  38. .func = default_wake_function, \
  39. .task_list = { NULL, NULL } }
  40. #define DECLARE_WAITQUEUE(name, tsk) \
  41. wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
  42. #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
  43. .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
  44. .task_list = { &(name).task_list, &(name).task_list } }
  45. #define DECLARE_WAIT_QUEUE_HEAD(name) \
  46. wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
  47. #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
  48. { .flags = word, .bit_nr = bit, }
  49. #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
  50. { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
  51. extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
  52. #define init_waitqueue_head(q) \
  53. do { \
  54. static struct lock_class_key __key; \
  55. \
  56. __init_waitqueue_head((q), #q, &__key); \
  57. } while (0)
  58. #ifdef CONFIG_LOCKDEP
  59. # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
  60. ({ init_waitqueue_head(&name); name; })
  61. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
  62. wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
  63. #else
  64. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
  65. #endif
  66. static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
  67. {
  68. q->flags = 0;
  69. q->private = p;
  70. q->func = default_wake_function;
  71. }
  72. static inline void init_waitqueue_func_entry(wait_queue_t *q,
  73. wait_queue_func_t func)
  74. {
  75. q->flags = 0;
  76. q->private = NULL;
  77. q->func = func;
  78. }
  79. static inline int waitqueue_active(wait_queue_head_t *q)
  80. {
  81. return !list_empty(&q->task_list);
  82. }
  83. extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
  84. extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
  85. extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
  86. static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
  87. {
  88. list_add(&new->task_list, &head->task_list);
  89. }
  90. /*
  91. * Used for wake-one threads:
  92. */
  93. static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
  94. wait_queue_t *wait)
  95. {
  96. wait->flags |= WQ_FLAG_EXCLUSIVE;
  97. __add_wait_queue(q, wait);
  98. }
  99. static inline void __add_wait_queue_tail(wait_queue_head_t *head,
  100. wait_queue_t *new)
  101. {
  102. list_add_tail(&new->task_list, &head->task_list);
  103. }
  104. static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
  105. wait_queue_t *wait)
  106. {
  107. wait->flags |= WQ_FLAG_EXCLUSIVE;
  108. __add_wait_queue_tail(q, wait);
  109. }
  110. static inline void __remove_wait_queue(wait_queue_head_t *head,
  111. wait_queue_t *old)
  112. {
  113. list_del(&old->task_list);
  114. }
  115. void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
  116. void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
  117. void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
  118. void *key);
  119. void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
  120. void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
  121. void __wake_up_bit(wait_queue_head_t *, void *, int);
  122. int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
  123. int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
  124. void wake_up_bit(void *, int);
  125. void wake_up_atomic_t(atomic_t *);
  126. int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
  127. int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
  128. int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
  129. wait_queue_head_t *bit_waitqueue(void *, int);
  130. #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
  131. #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
  132. #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
  133. #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
  134. #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
  135. #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
  136. #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
  137. #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
  138. #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
  139. /*
  140. * Wakeup macros to be used to report events to the targets.
  141. */
  142. #define wake_up_poll(x, m) \
  143. __wake_up(x, TASK_NORMAL, 1, (void *) (m))
  144. #define wake_up_locked_poll(x, m) \
  145. __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
  146. #define wake_up_interruptible_poll(x, m) \
  147. __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
  148. #define wake_up_interruptible_sync_poll(x, m) \
  149. __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
  150. #define ___wait_cond_timeout(condition, ret) \
  151. ({ \
  152. bool __cond = (condition); \
  153. if (__cond && !ret) \
  154. ret = 1; \
  155. __cond || !ret; \
  156. })
  157. #define ___wait_signal_pending(state) \
  158. ((state == TASK_INTERRUPTIBLE && signal_pending(current)) || \
  159. (state == TASK_KILLABLE && fatal_signal_pending(current)))
  160. #define ___wait_nop_ret int ret __always_unused
  161. #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
  162. do { \
  163. __label__ __out; \
  164. DEFINE_WAIT(__wait); \
  165. \
  166. for (;;) { \
  167. if (exclusive) \
  168. prepare_to_wait_exclusive(&wq, &__wait, state); \
  169. else \
  170. prepare_to_wait(&wq, &__wait, state); \
  171. \
  172. if (condition) \
  173. break; \
  174. \
  175. if (___wait_signal_pending(state)) { \
  176. ret = -ERESTARTSYS; \
  177. if (exclusive) { \
  178. abort_exclusive_wait(&wq, &__wait, \
  179. state, NULL); \
  180. goto __out; \
  181. } \
  182. break; \
  183. } \
  184. \
  185. cmd; \
  186. } \
  187. finish_wait(&wq, &__wait); \
  188. __out: ; \
  189. } while (0)
  190. #define __wait_event(wq, condition) \
  191. ___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
  192. ___wait_nop_ret, schedule())
  193. /**
  194. * wait_event - sleep until a condition gets true
  195. * @wq: the waitqueue to wait on
  196. * @condition: a C expression for the event to wait for
  197. *
  198. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  199. * @condition evaluates to true. The @condition is checked each time
  200. * the waitqueue @wq is woken up.
  201. *
  202. * wake_up() has to be called after changing any variable that could
  203. * change the result of the wait condition.
  204. */
  205. #define wait_event(wq, condition) \
  206. do { \
  207. if (condition) \
  208. break; \
  209. __wait_event(wq, condition); \
  210. } while (0)
  211. #define __wait_event_timeout(wq, condition, ret) \
  212. ___wait_event(wq, ___wait_cond_timeout(condition, ret), \
  213. TASK_UNINTERRUPTIBLE, 0, ret, \
  214. ret = schedule_timeout(ret))
  215. /**
  216. * wait_event_timeout - sleep until a condition gets true or a timeout elapses
  217. * @wq: the waitqueue to wait on
  218. * @condition: a C expression for the event to wait for
  219. * @timeout: timeout, in jiffies
  220. *
  221. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  222. * @condition evaluates to true. The @condition is checked each time
  223. * the waitqueue @wq is woken up.
  224. *
  225. * wake_up() has to be called after changing any variable that could
  226. * change the result of the wait condition.
  227. *
  228. * The function returns 0 if the @timeout elapsed, or the remaining
  229. * jiffies (at least 1) if the @condition evaluated to %true before
  230. * the @timeout elapsed.
  231. */
  232. #define wait_event_timeout(wq, condition, timeout) \
  233. ({ \
  234. long __ret = timeout; \
  235. if (!(condition)) \
  236. __wait_event_timeout(wq, condition, __ret); \
  237. __ret; \
  238. })
  239. #define __wait_event_interruptible(wq, condition, ret) \
  240. do { \
  241. DEFINE_WAIT(__wait); \
  242. \
  243. for (;;) { \
  244. prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
  245. if (condition) \
  246. break; \
  247. if (signal_pending(current)) { \
  248. ret = -ERESTARTSYS; \
  249. break; \
  250. } \
  251. schedule(); \
  252. } \
  253. finish_wait(&wq, &__wait); \
  254. } while (0)
  255. /**
  256. * wait_event_interruptible - sleep until a condition gets true
  257. * @wq: the waitqueue to wait on
  258. * @condition: a C expression for the event to wait for
  259. *
  260. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  261. * @condition evaluates to true or a signal is received.
  262. * The @condition is checked each time the waitqueue @wq is woken up.
  263. *
  264. * wake_up() has to be called after changing any variable that could
  265. * change the result of the wait condition.
  266. *
  267. * The function will return -ERESTARTSYS if it was interrupted by a
  268. * signal and 0 if @condition evaluated to true.
  269. */
  270. #define wait_event_interruptible(wq, condition) \
  271. ({ \
  272. int __ret = 0; \
  273. if (!(condition)) \
  274. __wait_event_interruptible(wq, condition, __ret); \
  275. __ret; \
  276. })
  277. #define __wait_event_interruptible_timeout(wq, condition, ret) \
  278. do { \
  279. DEFINE_WAIT(__wait); \
  280. \
  281. for (;;) { \
  282. prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
  283. if (___wait_cond_timeout(condition, ret)) \
  284. break; \
  285. if (signal_pending(current)) { \
  286. ret = -ERESTARTSYS; \
  287. break; \
  288. } \
  289. ret = schedule_timeout(ret); \
  290. } \
  291. finish_wait(&wq, &__wait); \
  292. } while (0)
  293. /**
  294. * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
  295. * @wq: the waitqueue to wait on
  296. * @condition: a C expression for the event to wait for
  297. * @timeout: timeout, in jiffies
  298. *
  299. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  300. * @condition evaluates to true or a signal is received.
  301. * The @condition is checked each time the waitqueue @wq is woken up.
  302. *
  303. * wake_up() has to be called after changing any variable that could
  304. * change the result of the wait condition.
  305. *
  306. * Returns:
  307. * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
  308. * a signal, or the remaining jiffies (at least 1) if the @condition
  309. * evaluated to %true before the @timeout elapsed.
  310. */
  311. #define wait_event_interruptible_timeout(wq, condition, timeout) \
  312. ({ \
  313. long __ret = timeout; \
  314. if (!(condition)) \
  315. __wait_event_interruptible_timeout(wq, condition, __ret); \
  316. __ret; \
  317. })
  318. #define __wait_event_hrtimeout(wq, condition, timeout, state) \
  319. ({ \
  320. int __ret = 0; \
  321. DEFINE_WAIT(__wait); \
  322. struct hrtimer_sleeper __t; \
  323. \
  324. hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
  325. HRTIMER_MODE_REL); \
  326. hrtimer_init_sleeper(&__t, current); \
  327. if ((timeout).tv64 != KTIME_MAX) \
  328. hrtimer_start_range_ns(&__t.timer, timeout, \
  329. current->timer_slack_ns, \
  330. HRTIMER_MODE_REL); \
  331. \
  332. for (;;) { \
  333. prepare_to_wait(&wq, &__wait, state); \
  334. if (condition) \
  335. break; \
  336. if (state == TASK_INTERRUPTIBLE && \
  337. signal_pending(current)) { \
  338. __ret = -ERESTARTSYS; \
  339. break; \
  340. } \
  341. if (!__t.task) { \
  342. __ret = -ETIME; \
  343. break; \
  344. } \
  345. schedule(); \
  346. } \
  347. \
  348. hrtimer_cancel(&__t.timer); \
  349. destroy_hrtimer_on_stack(&__t.timer); \
  350. finish_wait(&wq, &__wait); \
  351. __ret; \
  352. })
  353. /**
  354. * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
  355. * @wq: the waitqueue to wait on
  356. * @condition: a C expression for the event to wait for
  357. * @timeout: timeout, as a ktime_t
  358. *
  359. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  360. * @condition evaluates to true or a signal is received.
  361. * The @condition is checked each time the waitqueue @wq is woken up.
  362. *
  363. * wake_up() has to be called after changing any variable that could
  364. * change the result of the wait condition.
  365. *
  366. * The function returns 0 if @condition became true, or -ETIME if the timeout
  367. * elapsed.
  368. */
  369. #define wait_event_hrtimeout(wq, condition, timeout) \
  370. ({ \
  371. int __ret = 0; \
  372. if (!(condition)) \
  373. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  374. TASK_UNINTERRUPTIBLE); \
  375. __ret; \
  376. })
  377. /**
  378. * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
  379. * @wq: the waitqueue to wait on
  380. * @condition: a C expression for the event to wait for
  381. * @timeout: timeout, as a ktime_t
  382. *
  383. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  384. * @condition evaluates to true or a signal is received.
  385. * The @condition is checked each time the waitqueue @wq is woken up.
  386. *
  387. * wake_up() has to be called after changing any variable that could
  388. * change the result of the wait condition.
  389. *
  390. * The function returns 0 if @condition became true, -ERESTARTSYS if it was
  391. * interrupted by a signal, or -ETIME if the timeout elapsed.
  392. */
  393. #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
  394. ({ \
  395. long __ret = 0; \
  396. if (!(condition)) \
  397. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  398. TASK_INTERRUPTIBLE); \
  399. __ret; \
  400. })
  401. #define __wait_event_interruptible_exclusive(wq, condition, ret) \
  402. do { \
  403. __label__ __out; \
  404. DEFINE_WAIT(__wait); \
  405. \
  406. for (;;) { \
  407. prepare_to_wait_exclusive(&wq, &__wait, \
  408. TASK_INTERRUPTIBLE); \
  409. if (condition) \
  410. break; \
  411. if (signal_pending(current)) { \
  412. ret = -ERESTARTSYS; \
  413. abort_exclusive_wait(&wq, &__wait, \
  414. TASK_INTERRUPTIBLE, NULL); \
  415. goto __out; \
  416. } \
  417. schedule(); \
  418. } \
  419. finish_wait(&wq, &__wait); \
  420. __out: ; \
  421. } while (0)
  422. #define wait_event_interruptible_exclusive(wq, condition) \
  423. ({ \
  424. int __ret = 0; \
  425. if (!(condition)) \
  426. __wait_event_interruptible_exclusive(wq, condition, __ret);\
  427. __ret; \
  428. })
  429. #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
  430. ({ \
  431. int __ret = 0; \
  432. DEFINE_WAIT(__wait); \
  433. if (exclusive) \
  434. __wait.flags |= WQ_FLAG_EXCLUSIVE; \
  435. do { \
  436. if (likely(list_empty(&__wait.task_list))) \
  437. __add_wait_queue_tail(&(wq), &__wait); \
  438. set_current_state(TASK_INTERRUPTIBLE); \
  439. if (signal_pending(current)) { \
  440. __ret = -ERESTARTSYS; \
  441. break; \
  442. } \
  443. if (irq) \
  444. spin_unlock_irq(&(wq).lock); \
  445. else \
  446. spin_unlock(&(wq).lock); \
  447. schedule(); \
  448. if (irq) \
  449. spin_lock_irq(&(wq).lock); \
  450. else \
  451. spin_lock(&(wq).lock); \
  452. } while (!(condition)); \
  453. __remove_wait_queue(&(wq), &__wait); \
  454. __set_current_state(TASK_RUNNING); \
  455. __ret; \
  456. })
  457. /**
  458. * wait_event_interruptible_locked - sleep until a condition gets true
  459. * @wq: the waitqueue to wait on
  460. * @condition: a C expression for the event to wait for
  461. *
  462. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  463. * @condition evaluates to true or a signal is received.
  464. * The @condition is checked each time the waitqueue @wq is woken up.
  465. *
  466. * It must be called with wq.lock being held. This spinlock is
  467. * unlocked while sleeping but @condition testing is done while lock
  468. * is held and when this macro exits the lock is held.
  469. *
  470. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  471. * functions which must match the way they are locked/unlocked outside
  472. * of this macro.
  473. *
  474. * wake_up_locked() has to be called after changing any variable that could
  475. * change the result of the wait condition.
  476. *
  477. * The function will return -ERESTARTSYS if it was interrupted by a
  478. * signal and 0 if @condition evaluated to true.
  479. */
  480. #define wait_event_interruptible_locked(wq, condition) \
  481. ((condition) \
  482. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
  483. /**
  484. * wait_event_interruptible_locked_irq - sleep until a condition gets true
  485. * @wq: the waitqueue to wait on
  486. * @condition: a C expression for the event to wait for
  487. *
  488. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  489. * @condition evaluates to true or a signal is received.
  490. * The @condition is checked each time the waitqueue @wq is woken up.
  491. *
  492. * It must be called with wq.lock being held. This spinlock is
  493. * unlocked while sleeping but @condition testing is done while lock
  494. * is held and when this macro exits the lock is held.
  495. *
  496. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  497. * functions which must match the way they are locked/unlocked outside
  498. * of this macro.
  499. *
  500. * wake_up_locked() has to be called after changing any variable that could
  501. * change the result of the wait condition.
  502. *
  503. * The function will return -ERESTARTSYS if it was interrupted by a
  504. * signal and 0 if @condition evaluated to true.
  505. */
  506. #define wait_event_interruptible_locked_irq(wq, condition) \
  507. ((condition) \
  508. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
  509. /**
  510. * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
  511. * @wq: the waitqueue to wait on
  512. * @condition: a C expression for the event to wait for
  513. *
  514. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  515. * @condition evaluates to true or a signal is received.
  516. * The @condition is checked each time the waitqueue @wq is woken up.
  517. *
  518. * It must be called with wq.lock being held. This spinlock is
  519. * unlocked while sleeping but @condition testing is done while lock
  520. * is held and when this macro exits the lock is held.
  521. *
  522. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  523. * functions which must match the way they are locked/unlocked outside
  524. * of this macro.
  525. *
  526. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  527. * set thus when other process waits process on the list if this
  528. * process is awaken further processes are not considered.
  529. *
  530. * wake_up_locked() has to be called after changing any variable that could
  531. * change the result of the wait condition.
  532. *
  533. * The function will return -ERESTARTSYS if it was interrupted by a
  534. * signal and 0 if @condition evaluated to true.
  535. */
  536. #define wait_event_interruptible_exclusive_locked(wq, condition) \
  537. ((condition) \
  538. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
  539. /**
  540. * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
  541. * @wq: the waitqueue to wait on
  542. * @condition: a C expression for the event to wait for
  543. *
  544. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  545. * @condition evaluates to true or a signal is received.
  546. * The @condition is checked each time the waitqueue @wq is woken up.
  547. *
  548. * It must be called with wq.lock being held. This spinlock is
  549. * unlocked while sleeping but @condition testing is done while lock
  550. * is held and when this macro exits the lock is held.
  551. *
  552. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  553. * functions which must match the way they are locked/unlocked outside
  554. * of this macro.
  555. *
  556. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  557. * set thus when other process waits process on the list if this
  558. * process is awaken further processes are not considered.
  559. *
  560. * wake_up_locked() has to be called after changing any variable that could
  561. * change the result of the wait condition.
  562. *
  563. * The function will return -ERESTARTSYS if it was interrupted by a
  564. * signal and 0 if @condition evaluated to true.
  565. */
  566. #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
  567. ((condition) \
  568. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
  569. #define __wait_event_killable(wq, condition, ret) \
  570. do { \
  571. DEFINE_WAIT(__wait); \
  572. \
  573. for (;;) { \
  574. prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
  575. if (condition) \
  576. break; \
  577. if (!fatal_signal_pending(current)) { \
  578. schedule(); \
  579. continue; \
  580. } \
  581. ret = -ERESTARTSYS; \
  582. break; \
  583. } \
  584. finish_wait(&wq, &__wait); \
  585. } while (0)
  586. /**
  587. * wait_event_killable - sleep until a condition gets true
  588. * @wq: the waitqueue to wait on
  589. * @condition: a C expression for the event to wait for
  590. *
  591. * The process is put to sleep (TASK_KILLABLE) until the
  592. * @condition evaluates to true or a signal is received.
  593. * The @condition is checked each time the waitqueue @wq is woken up.
  594. *
  595. * wake_up() has to be called after changing any variable that could
  596. * change the result of the wait condition.
  597. *
  598. * The function will return -ERESTARTSYS if it was interrupted by a
  599. * signal and 0 if @condition evaluated to true.
  600. */
  601. #define wait_event_killable(wq, condition) \
  602. ({ \
  603. int __ret = 0; \
  604. if (!(condition)) \
  605. __wait_event_killable(wq, condition, __ret); \
  606. __ret; \
  607. })
  608. #define __wait_event_lock_irq(wq, condition, lock, cmd) \
  609. do { \
  610. DEFINE_WAIT(__wait); \
  611. \
  612. for (;;) { \
  613. prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
  614. if (condition) \
  615. break; \
  616. spin_unlock_irq(&lock); \
  617. cmd; \
  618. schedule(); \
  619. spin_lock_irq(&lock); \
  620. } \
  621. finish_wait(&wq, &__wait); \
  622. } while (0)
  623. /**
  624. * wait_event_lock_irq_cmd - sleep until a condition gets true. The
  625. * condition is checked under the lock. This
  626. * is expected to be called with the lock
  627. * taken.
  628. * @wq: the waitqueue to wait on
  629. * @condition: a C expression for the event to wait for
  630. * @lock: a locked spinlock_t, which will be released before cmd
  631. * and schedule() and reacquired afterwards.
  632. * @cmd: a command which is invoked outside the critical section before
  633. * sleep
  634. *
  635. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  636. * @condition evaluates to true. The @condition is checked each time
  637. * the waitqueue @wq is woken up.
  638. *
  639. * wake_up() has to be called after changing any variable that could
  640. * change the result of the wait condition.
  641. *
  642. * This is supposed to be called while holding the lock. The lock is
  643. * dropped before invoking the cmd and going to sleep and is reacquired
  644. * afterwards.
  645. */
  646. #define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
  647. do { \
  648. if (condition) \
  649. break; \
  650. __wait_event_lock_irq(wq, condition, lock, cmd); \
  651. } while (0)
  652. /**
  653. * wait_event_lock_irq - sleep until a condition gets true. The
  654. * condition is checked under the lock. This
  655. * is expected to be called with the lock
  656. * taken.
  657. * @wq: the waitqueue to wait on
  658. * @condition: a C expression for the event to wait for
  659. * @lock: a locked spinlock_t, which will be released before schedule()
  660. * and reacquired afterwards.
  661. *
  662. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  663. * @condition evaluates to true. The @condition is checked each time
  664. * the waitqueue @wq is woken up.
  665. *
  666. * wake_up() has to be called after changing any variable that could
  667. * change the result of the wait condition.
  668. *
  669. * This is supposed to be called while holding the lock. The lock is
  670. * dropped before going to sleep and is reacquired afterwards.
  671. */
  672. #define wait_event_lock_irq(wq, condition, lock) \
  673. do { \
  674. if (condition) \
  675. break; \
  676. __wait_event_lock_irq(wq, condition, lock, ); \
  677. } while (0)
  678. #define __wait_event_interruptible_lock_irq(wq, condition, \
  679. lock, ret, cmd) \
  680. do { \
  681. DEFINE_WAIT(__wait); \
  682. \
  683. for (;;) { \
  684. prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
  685. if (condition) \
  686. break; \
  687. if (signal_pending(current)) { \
  688. ret = -ERESTARTSYS; \
  689. break; \
  690. } \
  691. spin_unlock_irq(&lock); \
  692. cmd; \
  693. schedule(); \
  694. spin_lock_irq(&lock); \
  695. } \
  696. finish_wait(&wq, &__wait); \
  697. } while (0)
  698. /**
  699. * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
  700. * The condition is checked under the lock. This is expected to
  701. * be called with the lock taken.
  702. * @wq: the waitqueue to wait on
  703. * @condition: a C expression for the event to wait for
  704. * @lock: a locked spinlock_t, which will be released before cmd and
  705. * schedule() and reacquired afterwards.
  706. * @cmd: a command which is invoked outside the critical section before
  707. * sleep
  708. *
  709. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  710. * @condition evaluates to true or a signal is received. The @condition is
  711. * checked each time the waitqueue @wq is woken up.
  712. *
  713. * wake_up() has to be called after changing any variable that could
  714. * change the result of the wait condition.
  715. *
  716. * This is supposed to be called while holding the lock. The lock is
  717. * dropped before invoking the cmd and going to sleep and is reacquired
  718. * afterwards.
  719. *
  720. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  721. * and 0 if @condition evaluated to true.
  722. */
  723. #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
  724. ({ \
  725. int __ret = 0; \
  726. \
  727. if (!(condition)) \
  728. __wait_event_interruptible_lock_irq(wq, condition, \
  729. lock, __ret, cmd); \
  730. __ret; \
  731. })
  732. /**
  733. * wait_event_interruptible_lock_irq - sleep until a condition gets true.
  734. * The condition is checked under the lock. This is expected
  735. * to be called with the lock taken.
  736. * @wq: the waitqueue to wait on
  737. * @condition: a C expression for the event to wait for
  738. * @lock: a locked spinlock_t, which will be released before schedule()
  739. * and reacquired afterwards.
  740. *
  741. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  742. * @condition evaluates to true or signal is received. The @condition is
  743. * checked each time the waitqueue @wq is woken up.
  744. *
  745. * wake_up() has to be called after changing any variable that could
  746. * change the result of the wait condition.
  747. *
  748. * This is supposed to be called while holding the lock. The lock is
  749. * dropped before going to sleep and is reacquired afterwards.
  750. *
  751. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  752. * and 0 if @condition evaluated to true.
  753. */
  754. #define wait_event_interruptible_lock_irq(wq, condition, lock) \
  755. ({ \
  756. int __ret = 0; \
  757. \
  758. if (!(condition)) \
  759. __wait_event_interruptible_lock_irq(wq, condition, \
  760. lock, __ret, ); \
  761. __ret; \
  762. })
  763. #define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
  764. lock, ret) \
  765. do { \
  766. DEFINE_WAIT(__wait); \
  767. \
  768. for (;;) { \
  769. prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
  770. if (___wait_cond_timeout(condition, ret)) \
  771. break; \
  772. if (signal_pending(current)) { \
  773. ret = -ERESTARTSYS; \
  774. break; \
  775. } \
  776. spin_unlock_irq(&lock); \
  777. ret = schedule_timeout(ret); \
  778. spin_lock_irq(&lock); \
  779. } \
  780. finish_wait(&wq, &__wait); \
  781. } while (0)
  782. /**
  783. * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
  784. * The condition is checked under the lock. This is expected
  785. * to be called with the lock taken.
  786. * @wq: the waitqueue to wait on
  787. * @condition: a C expression for the event to wait for
  788. * @lock: a locked spinlock_t, which will be released before schedule()
  789. * and reacquired afterwards.
  790. * @timeout: timeout, in jiffies
  791. *
  792. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  793. * @condition evaluates to true or signal is received. The @condition is
  794. * checked each time the waitqueue @wq is woken up.
  795. *
  796. * wake_up() has to be called after changing any variable that could
  797. * change the result of the wait condition.
  798. *
  799. * This is supposed to be called while holding the lock. The lock is
  800. * dropped before going to sleep and is reacquired afterwards.
  801. *
  802. * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
  803. * was interrupted by a signal, and the remaining jiffies otherwise
  804. * if the condition evaluated to true before the timeout elapsed.
  805. */
  806. #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
  807. timeout) \
  808. ({ \
  809. int __ret = timeout; \
  810. \
  811. if (!(condition)) \
  812. __wait_event_interruptible_lock_irq_timeout( \
  813. wq, condition, lock, __ret); \
  814. __ret; \
  815. })
  816. /*
  817. * These are the old interfaces to sleep waiting for an event.
  818. * They are racy. DO NOT use them, use the wait_event* interfaces above.
  819. * We plan to remove these interfaces.
  820. */
  821. extern void sleep_on(wait_queue_head_t *q);
  822. extern long sleep_on_timeout(wait_queue_head_t *q,
  823. signed long timeout);
  824. extern void interruptible_sleep_on(wait_queue_head_t *q);
  825. extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
  826. signed long timeout);
  827. /*
  828. * Waitqueues which are removed from the waitqueue_head at wakeup time
  829. */
  830. void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
  831. void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
  832. void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
  833. void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
  834. unsigned int mode, void *key);
  835. int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  836. int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  837. #define DEFINE_WAIT_FUNC(name, function) \
  838. wait_queue_t name = { \
  839. .private = current, \
  840. .func = function, \
  841. .task_list = LIST_HEAD_INIT((name).task_list), \
  842. }
  843. #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
  844. #define DEFINE_WAIT_BIT(name, word, bit) \
  845. struct wait_bit_queue name = { \
  846. .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
  847. .wait = { \
  848. .private = current, \
  849. .func = wake_bit_function, \
  850. .task_list = \
  851. LIST_HEAD_INIT((name).wait.task_list), \
  852. }, \
  853. }
  854. #define init_wait(wait) \
  855. do { \
  856. (wait)->private = current; \
  857. (wait)->func = autoremove_wake_function; \
  858. INIT_LIST_HEAD(&(wait)->task_list); \
  859. (wait)->flags = 0; \
  860. } while (0)
  861. /**
  862. * wait_on_bit - wait for a bit to be cleared
  863. * @word: the word being waited on, a kernel virtual address
  864. * @bit: the bit of the word being waited on
  865. * @action: the function used to sleep, which may take special actions
  866. * @mode: the task state to sleep in
  867. *
  868. * There is a standard hashed waitqueue table for generic use. This
  869. * is the part of the hashtable's accessor API that waits on a bit.
  870. * For instance, if one were to have waiters on a bitflag, one would
  871. * call wait_on_bit() in threads waiting for the bit to clear.
  872. * One uses wait_on_bit() where one is waiting for the bit to clear,
  873. * but has no intention of setting it.
  874. */
  875. static inline int wait_on_bit(void *word, int bit,
  876. int (*action)(void *), unsigned mode)
  877. {
  878. if (!test_bit(bit, word))
  879. return 0;
  880. return out_of_line_wait_on_bit(word, bit, action, mode);
  881. }
  882. /**
  883. * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
  884. * @word: the word being waited on, a kernel virtual address
  885. * @bit: the bit of the word being waited on
  886. * @action: the function used to sleep, which may take special actions
  887. * @mode: the task state to sleep in
  888. *
  889. * There is a standard hashed waitqueue table for generic use. This
  890. * is the part of the hashtable's accessor API that waits on a bit
  891. * when one intends to set it, for instance, trying to lock bitflags.
  892. * For instance, if one were to have waiters trying to set bitflag
  893. * and waiting for it to clear before setting it, one would call
  894. * wait_on_bit() in threads waiting to be able to set the bit.
  895. * One uses wait_on_bit_lock() where one is waiting for the bit to
  896. * clear with the intention of setting it, and when done, clearing it.
  897. */
  898. static inline int wait_on_bit_lock(void *word, int bit,
  899. int (*action)(void *), unsigned mode)
  900. {
  901. if (!test_and_set_bit(bit, word))
  902. return 0;
  903. return out_of_line_wait_on_bit_lock(word, bit, action, mode);
  904. }
  905. /**
  906. * wait_on_atomic_t - Wait for an atomic_t to become 0
  907. * @val: The atomic value being waited on, a kernel virtual address
  908. * @action: the function used to sleep, which may take special actions
  909. * @mode: the task state to sleep in
  910. *
  911. * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
  912. * the purpose of getting a waitqueue, but we set the key to a bit number
  913. * outside of the target 'word'.
  914. */
  915. static inline
  916. int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
  917. {
  918. if (atomic_read(val) == 0)
  919. return 0;
  920. return out_of_line_wait_on_atomic_t(val, action, mode);
  921. }
  922. #endif