wait.h 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984
  1. #ifndef _LINUX_WAIT_H
  2. #define _LINUX_WAIT_H
  3. #include <linux/list.h>
  4. #include <linux/stddef.h>
  5. #include <linux/spinlock.h>
  6. #include <asm/current.h>
  7. #include <uapi/linux/wait.h>
  8. typedef struct __wait_queue wait_queue_t;
  9. typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
  10. int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
  11. struct __wait_queue {
  12. unsigned int flags;
  13. #define WQ_FLAG_EXCLUSIVE 0x01
  14. void *private;
  15. wait_queue_func_t func;
  16. struct list_head task_list;
  17. };
  18. struct wait_bit_key {
  19. void *flags;
  20. int bit_nr;
  21. #define WAIT_ATOMIC_T_BIT_NR -1
  22. };
  23. struct wait_bit_queue {
  24. struct wait_bit_key key;
  25. wait_queue_t wait;
  26. };
  27. struct __wait_queue_head {
  28. spinlock_t lock;
  29. struct list_head task_list;
  30. };
  31. typedef struct __wait_queue_head wait_queue_head_t;
  32. struct task_struct;
  33. /*
  34. * Macros for declaration and initialisaton of the datatypes
  35. */
  36. #define __WAITQUEUE_INITIALIZER(name, tsk) { \
  37. .private = tsk, \
  38. .func = default_wake_function, \
  39. .task_list = { NULL, NULL } }
  40. #define DECLARE_WAITQUEUE(name, tsk) \
  41. wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
  42. #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
  43. .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
  44. .task_list = { &(name).task_list, &(name).task_list } }
  45. #define DECLARE_WAIT_QUEUE_HEAD(name) \
  46. wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
  47. #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
  48. { .flags = word, .bit_nr = bit, }
  49. #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
  50. { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
  51. extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
  52. #define init_waitqueue_head(q) \
  53. do { \
  54. static struct lock_class_key __key; \
  55. \
  56. __init_waitqueue_head((q), #q, &__key); \
  57. } while (0)
  58. #ifdef CONFIG_LOCKDEP
  59. # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
  60. ({ init_waitqueue_head(&name); name; })
  61. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
  62. wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
  63. #else
  64. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
  65. #endif
  66. static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
  67. {
  68. q->flags = 0;
  69. q->private = p;
  70. q->func = default_wake_function;
  71. }
  72. static inline void init_waitqueue_func_entry(wait_queue_t *q,
  73. wait_queue_func_t func)
  74. {
  75. q->flags = 0;
  76. q->private = NULL;
  77. q->func = func;
  78. }
  79. static inline int waitqueue_active(wait_queue_head_t *q)
  80. {
  81. return !list_empty(&q->task_list);
  82. }
  83. extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
  84. extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
  85. extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
  86. static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
  87. {
  88. list_add(&new->task_list, &head->task_list);
  89. }
  90. /*
  91. * Used for wake-one threads:
  92. */
  93. static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
  94. wait_queue_t *wait)
  95. {
  96. wait->flags |= WQ_FLAG_EXCLUSIVE;
  97. __add_wait_queue(q, wait);
  98. }
  99. static inline void __add_wait_queue_tail(wait_queue_head_t *head,
  100. wait_queue_t *new)
  101. {
  102. list_add_tail(&new->task_list, &head->task_list);
  103. }
  104. static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
  105. wait_queue_t *wait)
  106. {
  107. wait->flags |= WQ_FLAG_EXCLUSIVE;
  108. __add_wait_queue_tail(q, wait);
  109. }
  110. static inline void __remove_wait_queue(wait_queue_head_t *head,
  111. wait_queue_t *old)
  112. {
  113. list_del(&old->task_list);
  114. }
  115. void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
  116. void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
  117. void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
  118. void *key);
  119. void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
  120. void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
  121. void __wake_up_bit(wait_queue_head_t *, void *, int);
  122. int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
  123. int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
  124. void wake_up_bit(void *, int);
  125. void wake_up_atomic_t(atomic_t *);
  126. int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
  127. int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
  128. int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
  129. wait_queue_head_t *bit_waitqueue(void *, int);
  130. #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
  131. #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
  132. #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
  133. #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
  134. #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
  135. #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
  136. #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
  137. #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
  138. #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
  139. /*
  140. * Wakeup macros to be used to report events to the targets.
  141. */
  142. #define wake_up_poll(x, m) \
  143. __wake_up(x, TASK_NORMAL, 1, (void *) (m))
  144. #define wake_up_locked_poll(x, m) \
  145. __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
  146. #define wake_up_interruptible_poll(x, m) \
  147. __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
  148. #define wake_up_interruptible_sync_poll(x, m) \
  149. __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
  150. #define __wait_event(wq, condition) \
  151. do { \
  152. DEFINE_WAIT(__wait); \
  153. \
  154. for (;;) { \
  155. prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
  156. if (condition) \
  157. break; \
  158. schedule(); \
  159. } \
  160. finish_wait(&wq, &__wait); \
  161. } while (0)
  162. /**
  163. * wait_event - sleep until a condition gets true
  164. * @wq: the waitqueue to wait on
  165. * @condition: a C expression for the event to wait for
  166. *
  167. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  168. * @condition evaluates to true. The @condition is checked each time
  169. * the waitqueue @wq is woken up.
  170. *
  171. * wake_up() has to be called after changing any variable that could
  172. * change the result of the wait condition.
  173. */
  174. #define wait_event(wq, condition) \
  175. do { \
  176. if (condition) \
  177. break; \
  178. __wait_event(wq, condition); \
  179. } while (0)
  180. #define __wait_event_timeout(wq, condition, ret) \
  181. do { \
  182. DEFINE_WAIT(__wait); \
  183. \
  184. for (;;) { \
  185. prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
  186. if (condition) \
  187. break; \
  188. ret = schedule_timeout(ret); \
  189. if (!ret) \
  190. break; \
  191. } \
  192. if (!ret && (condition)) \
  193. ret = 1; \
  194. finish_wait(&wq, &__wait); \
  195. } while (0)
  196. /**
  197. * wait_event_timeout - sleep until a condition gets true or a timeout elapses
  198. * @wq: the waitqueue to wait on
  199. * @condition: a C expression for the event to wait for
  200. * @timeout: timeout, in jiffies
  201. *
  202. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  203. * @condition evaluates to true. The @condition is checked each time
  204. * the waitqueue @wq is woken up.
  205. *
  206. * wake_up() has to be called after changing any variable that could
  207. * change the result of the wait condition.
  208. *
  209. * The function returns 0 if the @timeout elapsed, or the remaining
  210. * jiffies (at least 1) if the @condition evaluated to %true before
  211. * the @timeout elapsed.
  212. */
  213. #define wait_event_timeout(wq, condition, timeout) \
  214. ({ \
  215. long __ret = timeout; \
  216. if (!(condition)) \
  217. __wait_event_timeout(wq, condition, __ret); \
  218. __ret; \
  219. })
  220. #define __wait_event_interruptible(wq, condition, ret) \
  221. do { \
  222. DEFINE_WAIT(__wait); \
  223. \
  224. for (;;) { \
  225. prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
  226. if (condition) \
  227. break; \
  228. if (signal_pending(current)) { \
  229. ret = -ERESTARTSYS; \
  230. break; \
  231. } \
  232. schedule(); \
  233. } \
  234. finish_wait(&wq, &__wait); \
  235. } while (0)
  236. /**
  237. * wait_event_interruptible - sleep until a condition gets true
  238. * @wq: the waitqueue to wait on
  239. * @condition: a C expression for the event to wait for
  240. *
  241. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  242. * @condition evaluates to true or a signal is received.
  243. * The @condition is checked each time the waitqueue @wq is woken up.
  244. *
  245. * wake_up() has to be called after changing any variable that could
  246. * change the result of the wait condition.
  247. *
  248. * The function will return -ERESTARTSYS if it was interrupted by a
  249. * signal and 0 if @condition evaluated to true.
  250. */
  251. #define wait_event_interruptible(wq, condition) \
  252. ({ \
  253. int __ret = 0; \
  254. if (!(condition)) \
  255. __wait_event_interruptible(wq, condition, __ret); \
  256. __ret; \
  257. })
  258. #define __wait_event_interruptible_timeout(wq, condition, ret) \
  259. do { \
  260. DEFINE_WAIT(__wait); \
  261. \
  262. for (;;) { \
  263. prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
  264. if (condition) \
  265. break; \
  266. if (signal_pending(current)) { \
  267. ret = -ERESTARTSYS; \
  268. break; \
  269. } \
  270. ret = schedule_timeout(ret); \
  271. if (!ret) \
  272. break; \
  273. } \
  274. if (!ret && (condition)) \
  275. ret = 1; \
  276. finish_wait(&wq, &__wait); \
  277. } while (0)
  278. /**
  279. * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
  280. * @wq: the waitqueue to wait on
  281. * @condition: a C expression for the event to wait for
  282. * @timeout: timeout, in jiffies
  283. *
  284. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  285. * @condition evaluates to true or a signal is received.
  286. * The @condition is checked each time the waitqueue @wq is woken up.
  287. *
  288. * wake_up() has to be called after changing any variable that could
  289. * change the result of the wait condition.
  290. *
  291. * Returns:
  292. * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
  293. * a signal, or the remaining jiffies (at least 1) if the @condition
  294. * evaluated to %true before the @timeout elapsed.
  295. */
  296. #define wait_event_interruptible_timeout(wq, condition, timeout) \
  297. ({ \
  298. long __ret = timeout; \
  299. if (!(condition)) \
  300. __wait_event_interruptible_timeout(wq, condition, __ret); \
  301. __ret; \
  302. })
  303. #define __wait_event_hrtimeout(wq, condition, timeout, state) \
  304. ({ \
  305. int __ret = 0; \
  306. DEFINE_WAIT(__wait); \
  307. struct hrtimer_sleeper __t; \
  308. \
  309. hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
  310. HRTIMER_MODE_REL); \
  311. hrtimer_init_sleeper(&__t, current); \
  312. if ((timeout).tv64 != KTIME_MAX) \
  313. hrtimer_start_range_ns(&__t.timer, timeout, \
  314. current->timer_slack_ns, \
  315. HRTIMER_MODE_REL); \
  316. \
  317. for (;;) { \
  318. prepare_to_wait(&wq, &__wait, state); \
  319. if (condition) \
  320. break; \
  321. if (state == TASK_INTERRUPTIBLE && \
  322. signal_pending(current)) { \
  323. __ret = -ERESTARTSYS; \
  324. break; \
  325. } \
  326. if (!__t.task) { \
  327. __ret = -ETIME; \
  328. break; \
  329. } \
  330. schedule(); \
  331. } \
  332. \
  333. hrtimer_cancel(&__t.timer); \
  334. destroy_hrtimer_on_stack(&__t.timer); \
  335. finish_wait(&wq, &__wait); \
  336. __ret; \
  337. })
  338. /**
  339. * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
  340. * @wq: the waitqueue to wait on
  341. * @condition: a C expression for the event to wait for
  342. * @timeout: timeout, as a ktime_t
  343. *
  344. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  345. * @condition evaluates to true or a signal is received.
  346. * The @condition is checked each time the waitqueue @wq is woken up.
  347. *
  348. * wake_up() has to be called after changing any variable that could
  349. * change the result of the wait condition.
  350. *
  351. * The function returns 0 if @condition became true, or -ETIME if the timeout
  352. * elapsed.
  353. */
  354. #define wait_event_hrtimeout(wq, condition, timeout) \
  355. ({ \
  356. int __ret = 0; \
  357. if (!(condition)) \
  358. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  359. TASK_UNINTERRUPTIBLE); \
  360. __ret; \
  361. })
  362. /**
  363. * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
  364. * @wq: the waitqueue to wait on
  365. * @condition: a C expression for the event to wait for
  366. * @timeout: timeout, as a ktime_t
  367. *
  368. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  369. * @condition evaluates to true or a signal is received.
  370. * The @condition is checked each time the waitqueue @wq is woken up.
  371. *
  372. * wake_up() has to be called after changing any variable that could
  373. * change the result of the wait condition.
  374. *
  375. * The function returns 0 if @condition became true, -ERESTARTSYS if it was
  376. * interrupted by a signal, or -ETIME if the timeout elapsed.
  377. */
  378. #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
  379. ({ \
  380. long __ret = 0; \
  381. if (!(condition)) \
  382. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  383. TASK_INTERRUPTIBLE); \
  384. __ret; \
  385. })
  386. #define __wait_event_interruptible_exclusive(wq, condition, ret) \
  387. do { \
  388. DEFINE_WAIT(__wait); \
  389. \
  390. for (;;) { \
  391. prepare_to_wait_exclusive(&wq, &__wait, \
  392. TASK_INTERRUPTIBLE); \
  393. if (condition) { \
  394. finish_wait(&wq, &__wait); \
  395. break; \
  396. } \
  397. if (signal_pending(current)) { \
  398. ret = -ERESTARTSYS; \
  399. abort_exclusive_wait(&wq, &__wait, \
  400. TASK_INTERRUPTIBLE, NULL); \
  401. break; \
  402. } \
  403. schedule(); \
  404. } \
  405. } while (0)
  406. #define wait_event_interruptible_exclusive(wq, condition) \
  407. ({ \
  408. int __ret = 0; \
  409. if (!(condition)) \
  410. __wait_event_interruptible_exclusive(wq, condition, __ret);\
  411. __ret; \
  412. })
  413. #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
  414. ({ \
  415. int __ret = 0; \
  416. DEFINE_WAIT(__wait); \
  417. if (exclusive) \
  418. __wait.flags |= WQ_FLAG_EXCLUSIVE; \
  419. do { \
  420. if (likely(list_empty(&__wait.task_list))) \
  421. __add_wait_queue_tail(&(wq), &__wait); \
  422. set_current_state(TASK_INTERRUPTIBLE); \
  423. if (signal_pending(current)) { \
  424. __ret = -ERESTARTSYS; \
  425. break; \
  426. } \
  427. if (irq) \
  428. spin_unlock_irq(&(wq).lock); \
  429. else \
  430. spin_unlock(&(wq).lock); \
  431. schedule(); \
  432. if (irq) \
  433. spin_lock_irq(&(wq).lock); \
  434. else \
  435. spin_lock(&(wq).lock); \
  436. } while (!(condition)); \
  437. __remove_wait_queue(&(wq), &__wait); \
  438. __set_current_state(TASK_RUNNING); \
  439. __ret; \
  440. })
  441. /**
  442. * wait_event_interruptible_locked - sleep until a condition gets true
  443. * @wq: the waitqueue to wait on
  444. * @condition: a C expression for the event to wait for
  445. *
  446. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  447. * @condition evaluates to true or a signal is received.
  448. * The @condition is checked each time the waitqueue @wq is woken up.
  449. *
  450. * It must be called with wq.lock being held. This spinlock is
  451. * unlocked while sleeping but @condition testing is done while lock
  452. * is held and when this macro exits the lock is held.
  453. *
  454. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  455. * functions which must match the way they are locked/unlocked outside
  456. * of this macro.
  457. *
  458. * wake_up_locked() has to be called after changing any variable that could
  459. * change the result of the wait condition.
  460. *
  461. * The function will return -ERESTARTSYS if it was interrupted by a
  462. * signal and 0 if @condition evaluated to true.
  463. */
  464. #define wait_event_interruptible_locked(wq, condition) \
  465. ((condition) \
  466. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
  467. /**
  468. * wait_event_interruptible_locked_irq - sleep until a condition gets true
  469. * @wq: the waitqueue to wait on
  470. * @condition: a C expression for the event to wait for
  471. *
  472. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  473. * @condition evaluates to true or a signal is received.
  474. * The @condition is checked each time the waitqueue @wq is woken up.
  475. *
  476. * It must be called with wq.lock being held. This spinlock is
  477. * unlocked while sleeping but @condition testing is done while lock
  478. * is held and when this macro exits the lock is held.
  479. *
  480. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  481. * functions which must match the way they are locked/unlocked outside
  482. * of this macro.
  483. *
  484. * wake_up_locked() has to be called after changing any variable that could
  485. * change the result of the wait condition.
  486. *
  487. * The function will return -ERESTARTSYS if it was interrupted by a
  488. * signal and 0 if @condition evaluated to true.
  489. */
  490. #define wait_event_interruptible_locked_irq(wq, condition) \
  491. ((condition) \
  492. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
  493. /**
  494. * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
  495. * @wq: the waitqueue to wait on
  496. * @condition: a C expression for the event to wait for
  497. *
  498. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  499. * @condition evaluates to true or a signal is received.
  500. * The @condition is checked each time the waitqueue @wq is woken up.
  501. *
  502. * It must be called with wq.lock being held. This spinlock is
  503. * unlocked while sleeping but @condition testing is done while lock
  504. * is held and when this macro exits the lock is held.
  505. *
  506. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  507. * functions which must match the way they are locked/unlocked outside
  508. * of this macro.
  509. *
  510. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  511. * set thus when other process waits process on the list if this
  512. * process is awaken further processes are not considered.
  513. *
  514. * wake_up_locked() has to be called after changing any variable that could
  515. * change the result of the wait condition.
  516. *
  517. * The function will return -ERESTARTSYS if it was interrupted by a
  518. * signal and 0 if @condition evaluated to true.
  519. */
  520. #define wait_event_interruptible_exclusive_locked(wq, condition) \
  521. ((condition) \
  522. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
  523. /**
  524. * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
  525. * @wq: the waitqueue to wait on
  526. * @condition: a C expression for the event to wait for
  527. *
  528. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  529. * @condition evaluates to true or a signal is received.
  530. * The @condition is checked each time the waitqueue @wq is woken up.
  531. *
  532. * It must be called with wq.lock being held. This spinlock is
  533. * unlocked while sleeping but @condition testing is done while lock
  534. * is held and when this macro exits the lock is held.
  535. *
  536. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  537. * functions which must match the way they are locked/unlocked outside
  538. * of this macro.
  539. *
  540. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  541. * set thus when other process waits process on the list if this
  542. * process is awaken further processes are not considered.
  543. *
  544. * wake_up_locked() has to be called after changing any variable that could
  545. * change the result of the wait condition.
  546. *
  547. * The function will return -ERESTARTSYS if it was interrupted by a
  548. * signal and 0 if @condition evaluated to true.
  549. */
  550. #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
  551. ((condition) \
  552. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
  553. #define __wait_event_killable(wq, condition, ret) \
  554. do { \
  555. DEFINE_WAIT(__wait); \
  556. \
  557. for (;;) { \
  558. prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
  559. if (condition) \
  560. break; \
  561. if (!fatal_signal_pending(current)) { \
  562. schedule(); \
  563. continue; \
  564. } \
  565. ret = -ERESTARTSYS; \
  566. break; \
  567. } \
  568. finish_wait(&wq, &__wait); \
  569. } while (0)
  570. /**
  571. * wait_event_killable - sleep until a condition gets true
  572. * @wq: the waitqueue to wait on
  573. * @condition: a C expression for the event to wait for
  574. *
  575. * The process is put to sleep (TASK_KILLABLE) until the
  576. * @condition evaluates to true or a signal is received.
  577. * The @condition is checked each time the waitqueue @wq is woken up.
  578. *
  579. * wake_up() has to be called after changing any variable that could
  580. * change the result of the wait condition.
  581. *
  582. * The function will return -ERESTARTSYS if it was interrupted by a
  583. * signal and 0 if @condition evaluated to true.
  584. */
  585. #define wait_event_killable(wq, condition) \
  586. ({ \
  587. int __ret = 0; \
  588. if (!(condition)) \
  589. __wait_event_killable(wq, condition, __ret); \
  590. __ret; \
  591. })
  592. #define __wait_event_lock_irq(wq, condition, lock, cmd) \
  593. do { \
  594. DEFINE_WAIT(__wait); \
  595. \
  596. for (;;) { \
  597. prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
  598. if (condition) \
  599. break; \
  600. spin_unlock_irq(&lock); \
  601. cmd; \
  602. schedule(); \
  603. spin_lock_irq(&lock); \
  604. } \
  605. finish_wait(&wq, &__wait); \
  606. } while (0)
  607. /**
  608. * wait_event_lock_irq_cmd - sleep until a condition gets true. The
  609. * condition is checked under the lock. This
  610. * is expected to be called with the lock
  611. * taken.
  612. * @wq: the waitqueue to wait on
  613. * @condition: a C expression for the event to wait for
  614. * @lock: a locked spinlock_t, which will be released before cmd
  615. * and schedule() and reacquired afterwards.
  616. * @cmd: a command which is invoked outside the critical section before
  617. * sleep
  618. *
  619. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  620. * @condition evaluates to true. The @condition is checked each time
  621. * the waitqueue @wq is woken up.
  622. *
  623. * wake_up() has to be called after changing any variable that could
  624. * change the result of the wait condition.
  625. *
  626. * This is supposed to be called while holding the lock. The lock is
  627. * dropped before invoking the cmd and going to sleep and is reacquired
  628. * afterwards.
  629. */
  630. #define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
  631. do { \
  632. if (condition) \
  633. break; \
  634. __wait_event_lock_irq(wq, condition, lock, cmd); \
  635. } while (0)
  636. /**
  637. * wait_event_lock_irq - sleep until a condition gets true. The
  638. * condition is checked under the lock. This
  639. * is expected to be called with the lock
  640. * taken.
  641. * @wq: the waitqueue to wait on
  642. * @condition: a C expression for the event to wait for
  643. * @lock: a locked spinlock_t, which will be released before schedule()
  644. * and reacquired afterwards.
  645. *
  646. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  647. * @condition evaluates to true. The @condition is checked each time
  648. * the waitqueue @wq is woken up.
  649. *
  650. * wake_up() has to be called after changing any variable that could
  651. * change the result of the wait condition.
  652. *
  653. * This is supposed to be called while holding the lock. The lock is
  654. * dropped before going to sleep and is reacquired afterwards.
  655. */
  656. #define wait_event_lock_irq(wq, condition, lock) \
  657. do { \
  658. if (condition) \
  659. break; \
  660. __wait_event_lock_irq(wq, condition, lock, ); \
  661. } while (0)
  662. #define __wait_event_interruptible_lock_irq(wq, condition, \
  663. lock, ret, cmd) \
  664. do { \
  665. DEFINE_WAIT(__wait); \
  666. \
  667. for (;;) { \
  668. prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
  669. if (condition) \
  670. break; \
  671. if (signal_pending(current)) { \
  672. ret = -ERESTARTSYS; \
  673. break; \
  674. } \
  675. spin_unlock_irq(&lock); \
  676. cmd; \
  677. schedule(); \
  678. spin_lock_irq(&lock); \
  679. } \
  680. finish_wait(&wq, &__wait); \
  681. } while (0)
  682. /**
  683. * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
  684. * The condition is checked under the lock. This is expected to
  685. * be called with the lock taken.
  686. * @wq: the waitqueue to wait on
  687. * @condition: a C expression for the event to wait for
  688. * @lock: a locked spinlock_t, which will be released before cmd and
  689. * schedule() and reacquired afterwards.
  690. * @cmd: a command which is invoked outside the critical section before
  691. * sleep
  692. *
  693. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  694. * @condition evaluates to true or a signal is received. The @condition is
  695. * checked each time the waitqueue @wq is woken up.
  696. *
  697. * wake_up() has to be called after changing any variable that could
  698. * change the result of the wait condition.
  699. *
  700. * This is supposed to be called while holding the lock. The lock is
  701. * dropped before invoking the cmd and going to sleep and is reacquired
  702. * afterwards.
  703. *
  704. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  705. * and 0 if @condition evaluated to true.
  706. */
  707. #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
  708. ({ \
  709. int __ret = 0; \
  710. \
  711. if (!(condition)) \
  712. __wait_event_interruptible_lock_irq(wq, condition, \
  713. lock, __ret, cmd); \
  714. __ret; \
  715. })
  716. /**
  717. * wait_event_interruptible_lock_irq - sleep until a condition gets true.
  718. * The condition is checked under the lock. This is expected
  719. * to be called with the lock taken.
  720. * @wq: the waitqueue to wait on
  721. * @condition: a C expression for the event to wait for
  722. * @lock: a locked spinlock_t, which will be released before schedule()
  723. * and reacquired afterwards.
  724. *
  725. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  726. * @condition evaluates to true or signal is received. The @condition is
  727. * checked each time the waitqueue @wq is woken up.
  728. *
  729. * wake_up() has to be called after changing any variable that could
  730. * change the result of the wait condition.
  731. *
  732. * This is supposed to be called while holding the lock. The lock is
  733. * dropped before going to sleep and is reacquired afterwards.
  734. *
  735. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  736. * and 0 if @condition evaluated to true.
  737. */
  738. #define wait_event_interruptible_lock_irq(wq, condition, lock) \
  739. ({ \
  740. int __ret = 0; \
  741. \
  742. if (!(condition)) \
  743. __wait_event_interruptible_lock_irq(wq, condition, \
  744. lock, __ret, ); \
  745. __ret; \
  746. })
  747. #define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
  748. lock, ret) \
  749. do { \
  750. DEFINE_WAIT(__wait); \
  751. \
  752. for (;;) { \
  753. prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
  754. if (condition) \
  755. break; \
  756. if (signal_pending(current)) { \
  757. ret = -ERESTARTSYS; \
  758. break; \
  759. } \
  760. spin_unlock_irq(&lock); \
  761. ret = schedule_timeout(ret); \
  762. spin_lock_irq(&lock); \
  763. if (!ret) \
  764. break; \
  765. } \
  766. finish_wait(&wq, &__wait); \
  767. } while (0)
  768. /**
  769. * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
  770. * The condition is checked under the lock. This is expected
  771. * to be called with the lock taken.
  772. * @wq: the waitqueue to wait on
  773. * @condition: a C expression for the event to wait for
  774. * @lock: a locked spinlock_t, which will be released before schedule()
  775. * and reacquired afterwards.
  776. * @timeout: timeout, in jiffies
  777. *
  778. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  779. * @condition evaluates to true or signal is received. The @condition is
  780. * checked each time the waitqueue @wq is woken up.
  781. *
  782. * wake_up() has to be called after changing any variable that could
  783. * change the result of the wait condition.
  784. *
  785. * This is supposed to be called while holding the lock. The lock is
  786. * dropped before going to sleep and is reacquired afterwards.
  787. *
  788. * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
  789. * was interrupted by a signal, and the remaining jiffies otherwise
  790. * if the condition evaluated to true before the timeout elapsed.
  791. */
  792. #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
  793. timeout) \
  794. ({ \
  795. int __ret = timeout; \
  796. \
  797. if (!(condition)) \
  798. __wait_event_interruptible_lock_irq_timeout( \
  799. wq, condition, lock, __ret); \
  800. __ret; \
  801. })
  802. /*
  803. * These are the old interfaces to sleep waiting for an event.
  804. * They are racy. DO NOT use them, use the wait_event* interfaces above.
  805. * We plan to remove these interfaces.
  806. */
  807. extern void sleep_on(wait_queue_head_t *q);
  808. extern long sleep_on_timeout(wait_queue_head_t *q,
  809. signed long timeout);
  810. extern void interruptible_sleep_on(wait_queue_head_t *q);
  811. extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
  812. signed long timeout);
  813. /*
  814. * Waitqueues which are removed from the waitqueue_head at wakeup time
  815. */
  816. void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
  817. void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
  818. void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
  819. void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
  820. unsigned int mode, void *key);
  821. int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  822. int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  823. #define DEFINE_WAIT_FUNC(name, function) \
  824. wait_queue_t name = { \
  825. .private = current, \
  826. .func = function, \
  827. .task_list = LIST_HEAD_INIT((name).task_list), \
  828. }
  829. #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
  830. #define DEFINE_WAIT_BIT(name, word, bit) \
  831. struct wait_bit_queue name = { \
  832. .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
  833. .wait = { \
  834. .private = current, \
  835. .func = wake_bit_function, \
  836. .task_list = \
  837. LIST_HEAD_INIT((name).wait.task_list), \
  838. }, \
  839. }
  840. #define init_wait(wait) \
  841. do { \
  842. (wait)->private = current; \
  843. (wait)->func = autoremove_wake_function; \
  844. INIT_LIST_HEAD(&(wait)->task_list); \
  845. (wait)->flags = 0; \
  846. } while (0)
  847. /**
  848. * wait_on_bit - wait for a bit to be cleared
  849. * @word: the word being waited on, a kernel virtual address
  850. * @bit: the bit of the word being waited on
  851. * @action: the function used to sleep, which may take special actions
  852. * @mode: the task state to sleep in
  853. *
  854. * There is a standard hashed waitqueue table for generic use. This
  855. * is the part of the hashtable's accessor API that waits on a bit.
  856. * For instance, if one were to have waiters on a bitflag, one would
  857. * call wait_on_bit() in threads waiting for the bit to clear.
  858. * One uses wait_on_bit() where one is waiting for the bit to clear,
  859. * but has no intention of setting it.
  860. */
  861. static inline int wait_on_bit(void *word, int bit,
  862. int (*action)(void *), unsigned mode)
  863. {
  864. if (!test_bit(bit, word))
  865. return 0;
  866. return out_of_line_wait_on_bit(word, bit, action, mode);
  867. }
  868. /**
  869. * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
  870. * @word: the word being waited on, a kernel virtual address
  871. * @bit: the bit of the word being waited on
  872. * @action: the function used to sleep, which may take special actions
  873. * @mode: the task state to sleep in
  874. *
  875. * There is a standard hashed waitqueue table for generic use. This
  876. * is the part of the hashtable's accessor API that waits on a bit
  877. * when one intends to set it, for instance, trying to lock bitflags.
  878. * For instance, if one were to have waiters trying to set bitflag
  879. * and waiting for it to clear before setting it, one would call
  880. * wait_on_bit() in threads waiting to be able to set the bit.
  881. * One uses wait_on_bit_lock() where one is waiting for the bit to
  882. * clear with the intention of setting it, and when done, clearing it.
  883. */
  884. static inline int wait_on_bit_lock(void *word, int bit,
  885. int (*action)(void *), unsigned mode)
  886. {
  887. if (!test_and_set_bit(bit, word))
  888. return 0;
  889. return out_of_line_wait_on_bit_lock(word, bit, action, mode);
  890. }
  891. /**
  892. * wait_on_atomic_t - Wait for an atomic_t to become 0
  893. * @val: The atomic value being waited on, a kernel virtual address
  894. * @action: the function used to sleep, which may take special actions
  895. * @mode: the task state to sleep in
  896. *
  897. * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
  898. * the purpose of getting a waitqueue, but we set the key to a bit number
  899. * outside of the target 'word'.
  900. */
  901. static inline
  902. int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
  903. {
  904. if (atomic_read(val) == 0)
  905. return 0;
  906. return out_of_line_wait_on_atomic_t(val, action, mode);
  907. }
  908. #endif