wait.h 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920
  1. #ifndef _LINUX_WAIT_H
  2. #define _LINUX_WAIT_H
  3. /*
  4. * Linux wait queue related types and methods
  5. */
  6. #include <linux/list.h>
  7. #include <linux/stddef.h>
  8. #include <linux/spinlock.h>
  9. #include <asm/current.h>
  10. #include <uapi/linux/wait.h>
  11. typedef struct __wait_queue wait_queue_t;
  12. typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
  13. int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
  14. struct __wait_queue {
  15. unsigned int flags;
  16. #define WQ_FLAG_EXCLUSIVE 0x01
  17. void *private;
  18. wait_queue_func_t func;
  19. struct list_head task_list;
  20. };
  21. struct wait_bit_key {
  22. void *flags;
  23. int bit_nr;
  24. #define WAIT_ATOMIC_T_BIT_NR -1
  25. };
  26. struct wait_bit_queue {
  27. struct wait_bit_key key;
  28. wait_queue_t wait;
  29. };
  30. struct __wait_queue_head {
  31. spinlock_t lock;
  32. struct list_head task_list;
  33. };
  34. typedef struct __wait_queue_head wait_queue_head_t;
  35. struct task_struct;
  36. /*
  37. * Macros for declaration and initialisaton of the datatypes
  38. */
  39. #define __WAITQUEUE_INITIALIZER(name, tsk) { \
  40. .private = tsk, \
  41. .func = default_wake_function, \
  42. .task_list = { NULL, NULL } }
  43. #define DECLARE_WAITQUEUE(name, tsk) \
  44. wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
  45. #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
  46. .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
  47. .task_list = { &(name).task_list, &(name).task_list } }
  48. #define DECLARE_WAIT_QUEUE_HEAD(name) \
  49. wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
  50. #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
  51. { .flags = word, .bit_nr = bit, }
  52. #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
  53. { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
  54. extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
  55. #define init_waitqueue_head(q) \
  56. do { \
  57. static struct lock_class_key __key; \
  58. \
  59. __init_waitqueue_head((q), #q, &__key); \
  60. } while (0)
  61. #ifdef CONFIG_LOCKDEP
  62. # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
  63. ({ init_waitqueue_head(&name); name; })
  64. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
  65. wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
  66. #else
  67. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
  68. #endif
  69. static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
  70. {
  71. q->flags = 0;
  72. q->private = p;
  73. q->func = default_wake_function;
  74. }
  75. static inline void
  76. init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
  77. {
  78. q->flags = 0;
  79. q->private = NULL;
  80. q->func = func;
  81. }
  82. static inline int waitqueue_active(wait_queue_head_t *q)
  83. {
  84. return !list_empty(&q->task_list);
  85. }
  86. extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
  87. extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
  88. extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
  89. static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
  90. {
  91. list_add(&new->task_list, &head->task_list);
  92. }
  93. /*
  94. * Used for wake-one threads:
  95. */
  96. static inline void
  97. __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
  98. {
  99. wait->flags |= WQ_FLAG_EXCLUSIVE;
  100. __add_wait_queue(q, wait);
  101. }
  102. static inline void __add_wait_queue_tail(wait_queue_head_t *head,
  103. wait_queue_t *new)
  104. {
  105. list_add_tail(&new->task_list, &head->task_list);
  106. }
  107. static inline void
  108. __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
  109. {
  110. wait->flags |= WQ_FLAG_EXCLUSIVE;
  111. __add_wait_queue_tail(q, wait);
  112. }
  113. static inline void
  114. __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
  115. {
  116. list_del(&old->task_list);
  117. }
  118. void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
  119. void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
  120. void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
  121. void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
  122. void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
  123. void __wake_up_bit(wait_queue_head_t *, void *, int);
  124. int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
  125. int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
  126. void wake_up_bit(void *, int);
  127. void wake_up_atomic_t(atomic_t *);
  128. int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
  129. int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
  130. int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
  131. wait_queue_head_t *bit_waitqueue(void *, int);
  132. #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
  133. #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
  134. #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
  135. #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
  136. #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
  137. #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
  138. #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
  139. #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
  140. #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
  141. /*
  142. * Wakeup macros to be used to report events to the targets.
  143. */
  144. #define wake_up_poll(x, m) \
  145. __wake_up(x, TASK_NORMAL, 1, (void *) (m))
  146. #define wake_up_locked_poll(x, m) \
  147. __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
  148. #define wake_up_interruptible_poll(x, m) \
  149. __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
  150. #define wake_up_interruptible_sync_poll(x, m) \
  151. __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
  152. #define ___wait_cond_timeout(condition) \
  153. ({ \
  154. bool __cond = (condition); \
  155. if (__cond && !__ret) \
  156. __ret = 1; \
  157. __cond || !__ret; \
  158. })
  159. #define ___wait_is_interruptible(state) \
  160. (!__builtin_constant_p(state) || \
  161. state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
  162. #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
  163. ({ \
  164. __label__ __out; \
  165. wait_queue_t __wait; \
  166. long __ret = ret; \
  167. \
  168. INIT_LIST_HEAD(&__wait.task_list); \
  169. if (exclusive) \
  170. __wait.flags = WQ_FLAG_EXCLUSIVE; \
  171. else \
  172. __wait.flags = 0; \
  173. \
  174. for (;;) { \
  175. long __int = prepare_to_wait_event(&wq, &__wait, state);\
  176. \
  177. if (condition) \
  178. break; \
  179. \
  180. if (___wait_is_interruptible(state) && __int) { \
  181. __ret = __int; \
  182. if (exclusive) { \
  183. abort_exclusive_wait(&wq, &__wait, \
  184. state, NULL); \
  185. goto __out; \
  186. } \
  187. break; \
  188. } \
  189. \
  190. cmd; \
  191. } \
  192. finish_wait(&wq, &__wait); \
  193. __out: __ret; \
  194. })
  195. #define __wait_event(wq, condition) \
  196. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  197. schedule())
  198. /**
  199. * wait_event - sleep until a condition gets true
  200. * @wq: the waitqueue to wait on
  201. * @condition: a C expression for the event to wait for
  202. *
  203. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  204. * @condition evaluates to true. The @condition is checked each time
  205. * the waitqueue @wq is woken up.
  206. *
  207. * wake_up() has to be called after changing any variable that could
  208. * change the result of the wait condition.
  209. */
  210. #define wait_event(wq, condition) \
  211. do { \
  212. if (condition) \
  213. break; \
  214. __wait_event(wq, condition); \
  215. } while (0)
  216. #define __wait_event_timeout(wq, condition, timeout) \
  217. ___wait_event(wq, ___wait_cond_timeout(condition), \
  218. TASK_UNINTERRUPTIBLE, 0, timeout, \
  219. __ret = schedule_timeout(__ret))
  220. /**
  221. * wait_event_timeout - sleep until a condition gets true or a timeout elapses
  222. * @wq: the waitqueue to wait on
  223. * @condition: a C expression for the event to wait for
  224. * @timeout: timeout, in jiffies
  225. *
  226. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  227. * @condition evaluates to true. The @condition is checked each time
  228. * the waitqueue @wq is woken up.
  229. *
  230. * wake_up() has to be called after changing any variable that could
  231. * change the result of the wait condition.
  232. *
  233. * The function returns 0 if the @timeout elapsed, or the remaining
  234. * jiffies (at least 1) if the @condition evaluated to %true before
  235. * the @timeout elapsed.
  236. */
  237. #define wait_event_timeout(wq, condition, timeout) \
  238. ({ \
  239. long __ret = timeout; \
  240. if (!___wait_cond_timeout(condition)) \
  241. __ret = __wait_event_timeout(wq, condition, timeout); \
  242. __ret; \
  243. })
  244. #define __wait_event_cmd(wq, condition, cmd1, cmd2) \
  245. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  246. cmd1; schedule(); cmd2)
  247. /**
  248. * wait_event_cmd - sleep until a condition gets true
  249. * @wq: the waitqueue to wait on
  250. * @condition: a C expression for the event to wait for
  251. * cmd1: the command will be executed before sleep
  252. * cmd2: the command will be executed after sleep
  253. *
  254. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  255. * @condition evaluates to true. The @condition is checked each time
  256. * the waitqueue @wq is woken up.
  257. *
  258. * wake_up() has to be called after changing any variable that could
  259. * change the result of the wait condition.
  260. */
  261. #define wait_event_cmd(wq, condition, cmd1, cmd2) \
  262. do { \
  263. if (condition) \
  264. break; \
  265. __wait_event_cmd(wq, condition, cmd1, cmd2); \
  266. } while (0)
  267. #define __wait_event_interruptible(wq, condition) \
  268. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  269. schedule())
  270. /**
  271. * wait_event_interruptible - sleep until a condition gets true
  272. * @wq: the waitqueue to wait on
  273. * @condition: a C expression for the event to wait for
  274. *
  275. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  276. * @condition evaluates to true or a signal is received.
  277. * The @condition is checked each time the waitqueue @wq is woken up.
  278. *
  279. * wake_up() has to be called after changing any variable that could
  280. * change the result of the wait condition.
  281. *
  282. * The function will return -ERESTARTSYS if it was interrupted by a
  283. * signal and 0 if @condition evaluated to true.
  284. */
  285. #define wait_event_interruptible(wq, condition) \
  286. ({ \
  287. int __ret = 0; \
  288. if (!(condition)) \
  289. __ret = __wait_event_interruptible(wq, condition); \
  290. __ret; \
  291. })
  292. #define __wait_event_interruptible_timeout(wq, condition, timeout) \
  293. ___wait_event(wq, ___wait_cond_timeout(condition), \
  294. TASK_INTERRUPTIBLE, 0, timeout, \
  295. __ret = schedule_timeout(__ret))
  296. /**
  297. * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
  298. * @wq: the waitqueue to wait on
  299. * @condition: a C expression for the event to wait for
  300. * @timeout: timeout, in jiffies
  301. *
  302. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  303. * @condition evaluates to true or a signal is received.
  304. * The @condition is checked each time the waitqueue @wq is woken up.
  305. *
  306. * wake_up() has to be called after changing any variable that could
  307. * change the result of the wait condition.
  308. *
  309. * Returns:
  310. * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
  311. * a signal, or the remaining jiffies (at least 1) if the @condition
  312. * evaluated to %true before the @timeout elapsed.
  313. */
  314. #define wait_event_interruptible_timeout(wq, condition, timeout) \
  315. ({ \
  316. long __ret = timeout; \
  317. if (!___wait_cond_timeout(condition)) \
  318. __ret = __wait_event_interruptible_timeout(wq, \
  319. condition, timeout); \
  320. __ret; \
  321. })
  322. #define __wait_event_hrtimeout(wq, condition, timeout, state) \
  323. ({ \
  324. int __ret = 0; \
  325. struct hrtimer_sleeper __t; \
  326. \
  327. hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
  328. HRTIMER_MODE_REL); \
  329. hrtimer_init_sleeper(&__t, current); \
  330. if ((timeout).tv64 != KTIME_MAX) \
  331. hrtimer_start_range_ns(&__t.timer, timeout, \
  332. current->timer_slack_ns, \
  333. HRTIMER_MODE_REL); \
  334. \
  335. __ret = ___wait_event(wq, condition, state, 0, 0, \
  336. if (!__t.task) { \
  337. __ret = -ETIME; \
  338. break; \
  339. } \
  340. schedule()); \
  341. \
  342. hrtimer_cancel(&__t.timer); \
  343. destroy_hrtimer_on_stack(&__t.timer); \
  344. __ret; \
  345. })
  346. /**
  347. * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
  348. * @wq: the waitqueue to wait on
  349. * @condition: a C expression for the event to wait for
  350. * @timeout: timeout, as a ktime_t
  351. *
  352. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  353. * @condition evaluates to true or a signal is received.
  354. * The @condition is checked each time the waitqueue @wq is woken up.
  355. *
  356. * wake_up() has to be called after changing any variable that could
  357. * change the result of the wait condition.
  358. *
  359. * The function returns 0 if @condition became true, or -ETIME if the timeout
  360. * elapsed.
  361. */
  362. #define wait_event_hrtimeout(wq, condition, timeout) \
  363. ({ \
  364. int __ret = 0; \
  365. if (!(condition)) \
  366. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  367. TASK_UNINTERRUPTIBLE); \
  368. __ret; \
  369. })
  370. /**
  371. * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
  372. * @wq: the waitqueue to wait on
  373. * @condition: a C expression for the event to wait for
  374. * @timeout: timeout, as a ktime_t
  375. *
  376. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  377. * @condition evaluates to true or a signal is received.
  378. * The @condition is checked each time the waitqueue @wq is woken up.
  379. *
  380. * wake_up() has to be called after changing any variable that could
  381. * change the result of the wait condition.
  382. *
  383. * The function returns 0 if @condition became true, -ERESTARTSYS if it was
  384. * interrupted by a signal, or -ETIME if the timeout elapsed.
  385. */
  386. #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
  387. ({ \
  388. long __ret = 0; \
  389. if (!(condition)) \
  390. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  391. TASK_INTERRUPTIBLE); \
  392. __ret; \
  393. })
  394. #define __wait_event_interruptible_exclusive(wq, condition) \
  395. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  396. schedule())
  397. #define wait_event_interruptible_exclusive(wq, condition) \
  398. ({ \
  399. int __ret = 0; \
  400. if (!(condition)) \
  401. __ret = __wait_event_interruptible_exclusive(wq, condition);\
  402. __ret; \
  403. })
  404. #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
  405. ({ \
  406. int __ret = 0; \
  407. DEFINE_WAIT(__wait); \
  408. if (exclusive) \
  409. __wait.flags |= WQ_FLAG_EXCLUSIVE; \
  410. do { \
  411. if (likely(list_empty(&__wait.task_list))) \
  412. __add_wait_queue_tail(&(wq), &__wait); \
  413. set_current_state(TASK_INTERRUPTIBLE); \
  414. if (signal_pending(current)) { \
  415. __ret = -ERESTARTSYS; \
  416. break; \
  417. } \
  418. if (irq) \
  419. spin_unlock_irq(&(wq).lock); \
  420. else \
  421. spin_unlock(&(wq).lock); \
  422. schedule(); \
  423. if (irq) \
  424. spin_lock_irq(&(wq).lock); \
  425. else \
  426. spin_lock(&(wq).lock); \
  427. } while (!(condition)); \
  428. __remove_wait_queue(&(wq), &__wait); \
  429. __set_current_state(TASK_RUNNING); \
  430. __ret; \
  431. })
  432. /**
  433. * wait_event_interruptible_locked - sleep until a condition gets true
  434. * @wq: the waitqueue to wait on
  435. * @condition: a C expression for the event to wait for
  436. *
  437. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  438. * @condition evaluates to true or a signal is received.
  439. * The @condition is checked each time the waitqueue @wq is woken up.
  440. *
  441. * It must be called with wq.lock being held. This spinlock is
  442. * unlocked while sleeping but @condition testing is done while lock
  443. * is held and when this macro exits the lock is held.
  444. *
  445. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  446. * functions which must match the way they are locked/unlocked outside
  447. * of this macro.
  448. *
  449. * wake_up_locked() has to be called after changing any variable that could
  450. * change the result of the wait condition.
  451. *
  452. * The function will return -ERESTARTSYS if it was interrupted by a
  453. * signal and 0 if @condition evaluated to true.
  454. */
  455. #define wait_event_interruptible_locked(wq, condition) \
  456. ((condition) \
  457. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
  458. /**
  459. * wait_event_interruptible_locked_irq - sleep until a condition gets true
  460. * @wq: the waitqueue to wait on
  461. * @condition: a C expression for the event to wait for
  462. *
  463. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  464. * @condition evaluates to true or a signal is received.
  465. * The @condition is checked each time the waitqueue @wq is woken up.
  466. *
  467. * It must be called with wq.lock being held. This spinlock is
  468. * unlocked while sleeping but @condition testing is done while lock
  469. * is held and when this macro exits the lock is held.
  470. *
  471. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  472. * functions which must match the way they are locked/unlocked outside
  473. * of this macro.
  474. *
  475. * wake_up_locked() has to be called after changing any variable that could
  476. * change the result of the wait condition.
  477. *
  478. * The function will return -ERESTARTSYS if it was interrupted by a
  479. * signal and 0 if @condition evaluated to true.
  480. */
  481. #define wait_event_interruptible_locked_irq(wq, condition) \
  482. ((condition) \
  483. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
  484. /**
  485. * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
  486. * @wq: the waitqueue to wait on
  487. * @condition: a C expression for the event to wait for
  488. *
  489. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  490. * @condition evaluates to true or a signal is received.
  491. * The @condition is checked each time the waitqueue @wq is woken up.
  492. *
  493. * It must be called with wq.lock being held. This spinlock is
  494. * unlocked while sleeping but @condition testing is done while lock
  495. * is held and when this macro exits the lock is held.
  496. *
  497. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  498. * functions which must match the way they are locked/unlocked outside
  499. * of this macro.
  500. *
  501. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  502. * set thus when other process waits process on the list if this
  503. * process is awaken further processes are not considered.
  504. *
  505. * wake_up_locked() has to be called after changing any variable that could
  506. * change the result of the wait condition.
  507. *
  508. * The function will return -ERESTARTSYS if it was interrupted by a
  509. * signal and 0 if @condition evaluated to true.
  510. */
  511. #define wait_event_interruptible_exclusive_locked(wq, condition) \
  512. ((condition) \
  513. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
  514. /**
  515. * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
  516. * @wq: the waitqueue to wait on
  517. * @condition: a C expression for the event to wait for
  518. *
  519. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  520. * @condition evaluates to true or a signal is received.
  521. * The @condition is checked each time the waitqueue @wq is woken up.
  522. *
  523. * It must be called with wq.lock being held. This spinlock is
  524. * unlocked while sleeping but @condition testing is done while lock
  525. * is held and when this macro exits the lock is held.
  526. *
  527. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  528. * functions which must match the way they are locked/unlocked outside
  529. * of this macro.
  530. *
  531. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  532. * set thus when other process waits process on the list if this
  533. * process is awaken further processes are not considered.
  534. *
  535. * wake_up_locked() has to be called after changing any variable that could
  536. * change the result of the wait condition.
  537. *
  538. * The function will return -ERESTARTSYS if it was interrupted by a
  539. * signal and 0 if @condition evaluated to true.
  540. */
  541. #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
  542. ((condition) \
  543. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
  544. #define __wait_event_killable(wq, condition) \
  545. ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
  546. /**
  547. * wait_event_killable - sleep until a condition gets true
  548. * @wq: the waitqueue to wait on
  549. * @condition: a C expression for the event to wait for
  550. *
  551. * The process is put to sleep (TASK_KILLABLE) until the
  552. * @condition evaluates to true or a signal is received.
  553. * The @condition is checked each time the waitqueue @wq is woken up.
  554. *
  555. * wake_up() has to be called after changing any variable that could
  556. * change the result of the wait condition.
  557. *
  558. * The function will return -ERESTARTSYS if it was interrupted by a
  559. * signal and 0 if @condition evaluated to true.
  560. */
  561. #define wait_event_killable(wq, condition) \
  562. ({ \
  563. int __ret = 0; \
  564. if (!(condition)) \
  565. __ret = __wait_event_killable(wq, condition); \
  566. __ret; \
  567. })
  568. #define __wait_event_lock_irq(wq, condition, lock, cmd) \
  569. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  570. spin_unlock_irq(&lock); \
  571. cmd; \
  572. schedule(); \
  573. spin_lock_irq(&lock))
  574. /**
  575. * wait_event_lock_irq_cmd - sleep until a condition gets true. The
  576. * condition is checked under the lock. This
  577. * is expected to be called with the lock
  578. * taken.
  579. * @wq: the waitqueue to wait on
  580. * @condition: a C expression for the event to wait for
  581. * @lock: a locked spinlock_t, which will be released before cmd
  582. * and schedule() and reacquired afterwards.
  583. * @cmd: a command which is invoked outside the critical section before
  584. * sleep
  585. *
  586. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  587. * @condition evaluates to true. The @condition is checked each time
  588. * the waitqueue @wq is woken up.
  589. *
  590. * wake_up() has to be called after changing any variable that could
  591. * change the result of the wait condition.
  592. *
  593. * This is supposed to be called while holding the lock. The lock is
  594. * dropped before invoking the cmd and going to sleep and is reacquired
  595. * afterwards.
  596. */
  597. #define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
  598. do { \
  599. if (condition) \
  600. break; \
  601. __wait_event_lock_irq(wq, condition, lock, cmd); \
  602. } while (0)
  603. /**
  604. * wait_event_lock_irq - sleep until a condition gets true. The
  605. * condition is checked under the lock. This
  606. * is expected to be called with the lock
  607. * taken.
  608. * @wq: the waitqueue to wait on
  609. * @condition: a C expression for the event to wait for
  610. * @lock: a locked spinlock_t, which will be released before schedule()
  611. * and reacquired afterwards.
  612. *
  613. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  614. * @condition evaluates to true. The @condition is checked each time
  615. * the waitqueue @wq is woken up.
  616. *
  617. * wake_up() has to be called after changing any variable that could
  618. * change the result of the wait condition.
  619. *
  620. * This is supposed to be called while holding the lock. The lock is
  621. * dropped before going to sleep and is reacquired afterwards.
  622. */
  623. #define wait_event_lock_irq(wq, condition, lock) \
  624. do { \
  625. if (condition) \
  626. break; \
  627. __wait_event_lock_irq(wq, condition, lock, ); \
  628. } while (0)
  629. #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
  630. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  631. spin_unlock_irq(&lock); \
  632. cmd; \
  633. schedule(); \
  634. spin_lock_irq(&lock))
  635. /**
  636. * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
  637. * The condition is checked under the lock. This is expected to
  638. * be called with the lock taken.
  639. * @wq: the waitqueue to wait on
  640. * @condition: a C expression for the event to wait for
  641. * @lock: a locked spinlock_t, which will be released before cmd and
  642. * schedule() and reacquired afterwards.
  643. * @cmd: a command which is invoked outside the critical section before
  644. * sleep
  645. *
  646. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  647. * @condition evaluates to true or a signal is received. The @condition is
  648. * checked each time the waitqueue @wq is woken up.
  649. *
  650. * wake_up() has to be called after changing any variable that could
  651. * change the result of the wait condition.
  652. *
  653. * This is supposed to be called while holding the lock. The lock is
  654. * dropped before invoking the cmd and going to sleep and is reacquired
  655. * afterwards.
  656. *
  657. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  658. * and 0 if @condition evaluated to true.
  659. */
  660. #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
  661. ({ \
  662. int __ret = 0; \
  663. if (!(condition)) \
  664. __ret = __wait_event_interruptible_lock_irq(wq, \
  665. condition, lock, cmd); \
  666. __ret; \
  667. })
  668. /**
  669. * wait_event_interruptible_lock_irq - sleep until a condition gets true.
  670. * The condition is checked under the lock. This is expected
  671. * to be called with the lock taken.
  672. * @wq: the waitqueue to wait on
  673. * @condition: a C expression for the event to wait for
  674. * @lock: a locked spinlock_t, which will be released before schedule()
  675. * and reacquired afterwards.
  676. *
  677. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  678. * @condition evaluates to true or signal is received. The @condition is
  679. * checked each time the waitqueue @wq is woken up.
  680. *
  681. * wake_up() has to be called after changing any variable that could
  682. * change the result of the wait condition.
  683. *
  684. * This is supposed to be called while holding the lock. The lock is
  685. * dropped before going to sleep and is reacquired afterwards.
  686. *
  687. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  688. * and 0 if @condition evaluated to true.
  689. */
  690. #define wait_event_interruptible_lock_irq(wq, condition, lock) \
  691. ({ \
  692. int __ret = 0; \
  693. if (!(condition)) \
  694. __ret = __wait_event_interruptible_lock_irq(wq, \
  695. condition, lock,); \
  696. __ret; \
  697. })
  698. #define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
  699. lock, timeout) \
  700. ___wait_event(wq, ___wait_cond_timeout(condition), \
  701. TASK_INTERRUPTIBLE, 0, timeout, \
  702. spin_unlock_irq(&lock); \
  703. __ret = schedule_timeout(__ret); \
  704. spin_lock_irq(&lock));
  705. /**
  706. * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
  707. * true or a timeout elapses. The condition is checked under
  708. * the lock. This is expected to be called with the lock taken.
  709. * @wq: the waitqueue to wait on
  710. * @condition: a C expression for the event to wait for
  711. * @lock: a locked spinlock_t, which will be released before schedule()
  712. * and reacquired afterwards.
  713. * @timeout: timeout, in jiffies
  714. *
  715. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  716. * @condition evaluates to true or signal is received. The @condition is
  717. * checked each time the waitqueue @wq is woken up.
  718. *
  719. * wake_up() has to be called after changing any variable that could
  720. * change the result of the wait condition.
  721. *
  722. * This is supposed to be called while holding the lock. The lock is
  723. * dropped before going to sleep and is reacquired afterwards.
  724. *
  725. * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
  726. * was interrupted by a signal, and the remaining jiffies otherwise
  727. * if the condition evaluated to true before the timeout elapsed.
  728. */
  729. #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
  730. timeout) \
  731. ({ \
  732. long __ret = timeout; \
  733. if (!___wait_cond_timeout(condition)) \
  734. __ret = __wait_event_interruptible_lock_irq_timeout( \
  735. wq, condition, lock, timeout); \
  736. __ret; \
  737. })
  738. /*
  739. * These are the old interfaces to sleep waiting for an event.
  740. * They are racy. DO NOT use them, use the wait_event* interfaces above.
  741. * We plan to remove these interfaces.
  742. */
  743. extern void sleep_on(wait_queue_head_t *q);
  744. extern long sleep_on_timeout(wait_queue_head_t *q, signed long timeout);
  745. extern void interruptible_sleep_on(wait_queue_head_t *q);
  746. extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, signed long timeout);
  747. /*
  748. * Waitqueues which are removed from the waitqueue_head at wakeup time
  749. */
  750. void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
  751. void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
  752. long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
  753. void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
  754. void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
  755. int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  756. int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  757. #define DEFINE_WAIT_FUNC(name, function) \
  758. wait_queue_t name = { \
  759. .private = current, \
  760. .func = function, \
  761. .task_list = LIST_HEAD_INIT((name).task_list), \
  762. }
  763. #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
  764. #define DEFINE_WAIT_BIT(name, word, bit) \
  765. struct wait_bit_queue name = { \
  766. .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
  767. .wait = { \
  768. .private = current, \
  769. .func = wake_bit_function, \
  770. .task_list = \
  771. LIST_HEAD_INIT((name).wait.task_list), \
  772. }, \
  773. }
  774. #define init_wait(wait) \
  775. do { \
  776. (wait)->private = current; \
  777. (wait)->func = autoremove_wake_function; \
  778. INIT_LIST_HEAD(&(wait)->task_list); \
  779. (wait)->flags = 0; \
  780. } while (0)
  781. /**
  782. * wait_on_bit - wait for a bit to be cleared
  783. * @word: the word being waited on, a kernel virtual address
  784. * @bit: the bit of the word being waited on
  785. * @action: the function used to sleep, which may take special actions
  786. * @mode: the task state to sleep in
  787. *
  788. * There is a standard hashed waitqueue table for generic use. This
  789. * is the part of the hashtable's accessor API that waits on a bit.
  790. * For instance, if one were to have waiters on a bitflag, one would
  791. * call wait_on_bit() in threads waiting for the bit to clear.
  792. * One uses wait_on_bit() where one is waiting for the bit to clear,
  793. * but has no intention of setting it.
  794. */
  795. static inline int
  796. wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode)
  797. {
  798. if (!test_bit(bit, word))
  799. return 0;
  800. return out_of_line_wait_on_bit(word, bit, action, mode);
  801. }
  802. /**
  803. * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
  804. * @word: the word being waited on, a kernel virtual address
  805. * @bit: the bit of the word being waited on
  806. * @action: the function used to sleep, which may take special actions
  807. * @mode: the task state to sleep in
  808. *
  809. * There is a standard hashed waitqueue table for generic use. This
  810. * is the part of the hashtable's accessor API that waits on a bit
  811. * when one intends to set it, for instance, trying to lock bitflags.
  812. * For instance, if one were to have waiters trying to set bitflag
  813. * and waiting for it to clear before setting it, one would call
  814. * wait_on_bit() in threads waiting to be able to set the bit.
  815. * One uses wait_on_bit_lock() where one is waiting for the bit to
  816. * clear with the intention of setting it, and when done, clearing it.
  817. */
  818. static inline int
  819. wait_on_bit_lock(void *word, int bit, int (*action)(void *), unsigned mode)
  820. {
  821. if (!test_and_set_bit(bit, word))
  822. return 0;
  823. return out_of_line_wait_on_bit_lock(word, bit, action, mode);
  824. }
  825. /**
  826. * wait_on_atomic_t - Wait for an atomic_t to become 0
  827. * @val: The atomic value being waited on, a kernel virtual address
  828. * @action: the function used to sleep, which may take special actions
  829. * @mode: the task state to sleep in
  830. *
  831. * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
  832. * the purpose of getting a waitqueue, but we set the key to a bit number
  833. * outside of the target 'word'.
  834. */
  835. static inline
  836. int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
  837. {
  838. if (atomic_read(val) == 0)
  839. return 0;
  840. return out_of_line_wait_on_atomic_t(val, action, mode);
  841. }
  842. #endif /* _LINUX_WAIT_H */