wait.h 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042
  1. #ifndef _LINUX_WAIT_H
  2. #define _LINUX_WAIT_H
  3. /*
  4. * Linux wait queue related types and methods
  5. */
  6. #include <linux/list.h>
  7. #include <linux/stddef.h>
  8. #include <linux/spinlock.h>
  9. #include <asm/current.h>
  10. #include <uapi/linux/wait.h>
  11. typedef struct __wait_queue wait_queue_t;
  12. typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
  13. int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
  14. /* __wait_queue::flags */
  15. #define WQ_FLAG_EXCLUSIVE 0x01
  16. #define WQ_FLAG_WOKEN 0x02
  17. struct __wait_queue {
  18. unsigned int flags;
  19. void *private;
  20. wait_queue_func_t func;
  21. struct list_head task_list;
  22. };
  23. struct wait_bit_key {
  24. void *flags;
  25. int bit_nr;
  26. #define WAIT_ATOMIC_T_BIT_NR -1
  27. unsigned long timeout;
  28. };
  29. struct wait_bit_queue {
  30. struct wait_bit_key key;
  31. wait_queue_t wait;
  32. };
  33. struct __wait_queue_head {
  34. spinlock_t lock;
  35. struct list_head task_list;
  36. };
  37. typedef struct __wait_queue_head wait_queue_head_t;
  38. struct task_struct;
  39. /*
  40. * Macros for declaration and initialisaton of the datatypes
  41. */
  42. #define __WAITQUEUE_INITIALIZER(name, tsk) { \
  43. .private = tsk, \
  44. .func = default_wake_function, \
  45. .task_list = { NULL, NULL } }
  46. #define DECLARE_WAITQUEUE(name, tsk) \
  47. wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
  48. #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
  49. .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
  50. .task_list = { &(name).task_list, &(name).task_list } }
  51. #define DECLARE_WAIT_QUEUE_HEAD(name) \
  52. wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
  53. #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
  54. { .flags = word, .bit_nr = bit, }
  55. #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
  56. { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
  57. extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
  58. #define init_waitqueue_head(q) \
  59. do { \
  60. static struct lock_class_key __key; \
  61. \
  62. __init_waitqueue_head((q), #q, &__key); \
  63. } while (0)
  64. #ifdef CONFIG_LOCKDEP
  65. # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
  66. ({ init_waitqueue_head(&name); name; })
  67. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
  68. wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
  69. #else
  70. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
  71. #endif
  72. static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
  73. {
  74. q->flags = 0;
  75. q->private = p;
  76. q->func = default_wake_function;
  77. }
  78. static inline void
  79. init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
  80. {
  81. q->flags = 0;
  82. q->private = NULL;
  83. q->func = func;
  84. }
  85. static inline int waitqueue_active(wait_queue_head_t *q)
  86. {
  87. return !list_empty(&q->task_list);
  88. }
  89. extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
  90. extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
  91. extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
  92. static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
  93. {
  94. list_add(&new->task_list, &head->task_list);
  95. }
  96. /*
  97. * Used for wake-one threads:
  98. */
  99. static inline void
  100. __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
  101. {
  102. wait->flags |= WQ_FLAG_EXCLUSIVE;
  103. __add_wait_queue(q, wait);
  104. }
  105. static inline void __add_wait_queue_tail(wait_queue_head_t *head,
  106. wait_queue_t *new)
  107. {
  108. list_add_tail(&new->task_list, &head->task_list);
  109. }
  110. static inline void
  111. __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
  112. {
  113. wait->flags |= WQ_FLAG_EXCLUSIVE;
  114. __add_wait_queue_tail(q, wait);
  115. }
  116. static inline void
  117. __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
  118. {
  119. list_del(&old->task_list);
  120. }
  121. typedef int wait_bit_action_f(struct wait_bit_key *);
  122. void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
  123. void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
  124. void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
  125. void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
  126. void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
  127. void __wake_up_bit(wait_queue_head_t *, void *, int);
  128. int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
  129. int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
  130. void wake_up_bit(void *, int);
  131. void wake_up_atomic_t(atomic_t *);
  132. int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
  133. int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
  134. int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
  135. int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
  136. wait_queue_head_t *bit_waitqueue(void *, int);
  137. #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
  138. #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
  139. #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
  140. #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
  141. #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
  142. #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
  143. #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
  144. #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
  145. #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
  146. /*
  147. * Wakeup macros to be used to report events to the targets.
  148. */
  149. #define wake_up_poll(x, m) \
  150. __wake_up(x, TASK_NORMAL, 1, (void *) (m))
  151. #define wake_up_locked_poll(x, m) \
  152. __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
  153. #define wake_up_interruptible_poll(x, m) \
  154. __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
  155. #define wake_up_interruptible_sync_poll(x, m) \
  156. __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
  157. #define ___wait_cond_timeout(condition) \
  158. ({ \
  159. bool __cond = (condition); \
  160. if (__cond && !__ret) \
  161. __ret = 1; \
  162. __cond || !__ret; \
  163. })
  164. #define ___wait_is_interruptible(state) \
  165. (!__builtin_constant_p(state) || \
  166. state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
  167. /*
  168. * The below macro ___wait_event() has an explicit shadow of the __ret
  169. * variable when used from the wait_event_*() macros.
  170. *
  171. * This is so that both can use the ___wait_cond_timeout() construct
  172. * to wrap the condition.
  173. *
  174. * The type inconsistency of the wait_event_*() __ret variable is also
  175. * on purpose; we use long where we can return timeout values and int
  176. * otherwise.
  177. */
  178. #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
  179. ({ \
  180. __label__ __out; \
  181. wait_queue_t __wait; \
  182. long __ret = ret; /* explicit shadow */ \
  183. \
  184. INIT_LIST_HEAD(&__wait.task_list); \
  185. if (exclusive) \
  186. __wait.flags = WQ_FLAG_EXCLUSIVE; \
  187. else \
  188. __wait.flags = 0; \
  189. \
  190. for (;;) { \
  191. long __int = prepare_to_wait_event(&wq, &__wait, state);\
  192. \
  193. if (condition) \
  194. break; \
  195. \
  196. if (___wait_is_interruptible(state) && __int) { \
  197. __ret = __int; \
  198. if (exclusive) { \
  199. abort_exclusive_wait(&wq, &__wait, \
  200. state, NULL); \
  201. goto __out; \
  202. } \
  203. break; \
  204. } \
  205. \
  206. cmd; \
  207. } \
  208. finish_wait(&wq, &__wait); \
  209. __out: __ret; \
  210. })
  211. #define __wait_event(wq, condition) \
  212. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  213. schedule())
  214. /**
  215. * wait_event - sleep until a condition gets true
  216. * @wq: the waitqueue to wait on
  217. * @condition: a C expression for the event to wait for
  218. *
  219. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  220. * @condition evaluates to true. The @condition is checked each time
  221. * the waitqueue @wq is woken up.
  222. *
  223. * wake_up() has to be called after changing any variable that could
  224. * change the result of the wait condition.
  225. */
  226. #define wait_event(wq, condition) \
  227. do { \
  228. if (condition) \
  229. break; \
  230. __wait_event(wq, condition); \
  231. } while (0)
  232. #define __wait_event_timeout(wq, condition, timeout) \
  233. ___wait_event(wq, ___wait_cond_timeout(condition), \
  234. TASK_UNINTERRUPTIBLE, 0, timeout, \
  235. __ret = schedule_timeout(__ret))
  236. /**
  237. * wait_event_timeout - sleep until a condition gets true or a timeout elapses
  238. * @wq: the waitqueue to wait on
  239. * @condition: a C expression for the event to wait for
  240. * @timeout: timeout, in jiffies
  241. *
  242. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  243. * @condition evaluates to true. The @condition is checked each time
  244. * the waitqueue @wq is woken up.
  245. *
  246. * wake_up() has to be called after changing any variable that could
  247. * change the result of the wait condition.
  248. *
  249. * Returns:
  250. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  251. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  252. * or the remaining jiffies (at least 1) if the @condition evaluated
  253. * to %true before the @timeout elapsed.
  254. */
  255. #define wait_event_timeout(wq, condition, timeout) \
  256. ({ \
  257. long __ret = timeout; \
  258. if (!___wait_cond_timeout(condition)) \
  259. __ret = __wait_event_timeout(wq, condition, timeout); \
  260. __ret; \
  261. })
  262. #define __wait_event_cmd(wq, condition, cmd1, cmd2) \
  263. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  264. cmd1; schedule(); cmd2)
  265. /**
  266. * wait_event_cmd - sleep until a condition gets true
  267. * @wq: the waitqueue to wait on
  268. * @condition: a C expression for the event to wait for
  269. * @cmd1: the command will be executed before sleep
  270. * @cmd2: the command will be executed after sleep
  271. *
  272. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  273. * @condition evaluates to true. The @condition is checked each time
  274. * the waitqueue @wq is woken up.
  275. *
  276. * wake_up() has to be called after changing any variable that could
  277. * change the result of the wait condition.
  278. */
  279. #define wait_event_cmd(wq, condition, cmd1, cmd2) \
  280. do { \
  281. if (condition) \
  282. break; \
  283. __wait_event_cmd(wq, condition, cmd1, cmd2); \
  284. } while (0)
  285. #define __wait_event_interruptible(wq, condition) \
  286. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  287. schedule())
  288. /**
  289. * wait_event_interruptible - sleep until a condition gets true
  290. * @wq: the waitqueue to wait on
  291. * @condition: a C expression for the event to wait for
  292. *
  293. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  294. * @condition evaluates to true or a signal is received.
  295. * The @condition is checked each time the waitqueue @wq is woken up.
  296. *
  297. * wake_up() has to be called after changing any variable that could
  298. * change the result of the wait condition.
  299. *
  300. * The function will return -ERESTARTSYS if it was interrupted by a
  301. * signal and 0 if @condition evaluated to true.
  302. */
  303. #define wait_event_interruptible(wq, condition) \
  304. ({ \
  305. int __ret = 0; \
  306. if (!(condition)) \
  307. __ret = __wait_event_interruptible(wq, condition); \
  308. __ret; \
  309. })
  310. #define __wait_event_interruptible_timeout(wq, condition, timeout) \
  311. ___wait_event(wq, ___wait_cond_timeout(condition), \
  312. TASK_INTERRUPTIBLE, 0, timeout, \
  313. __ret = schedule_timeout(__ret))
  314. /**
  315. * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
  316. * @wq: the waitqueue to wait on
  317. * @condition: a C expression for the event to wait for
  318. * @timeout: timeout, in jiffies
  319. *
  320. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  321. * @condition evaluates to true or a signal is received.
  322. * The @condition is checked each time the waitqueue @wq is woken up.
  323. *
  324. * wake_up() has to be called after changing any variable that could
  325. * change the result of the wait condition.
  326. *
  327. * Returns:
  328. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  329. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  330. * the remaining jiffies (at least 1) if the @condition evaluated
  331. * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
  332. * interrupted by a signal.
  333. */
  334. #define wait_event_interruptible_timeout(wq, condition, timeout) \
  335. ({ \
  336. long __ret = timeout; \
  337. if (!___wait_cond_timeout(condition)) \
  338. __ret = __wait_event_interruptible_timeout(wq, \
  339. condition, timeout); \
  340. __ret; \
  341. })
  342. #define __wait_event_hrtimeout(wq, condition, timeout, state) \
  343. ({ \
  344. int __ret = 0; \
  345. struct hrtimer_sleeper __t; \
  346. \
  347. hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
  348. HRTIMER_MODE_REL); \
  349. hrtimer_init_sleeper(&__t, current); \
  350. if ((timeout).tv64 != KTIME_MAX) \
  351. hrtimer_start_range_ns(&__t.timer, timeout, \
  352. current->timer_slack_ns, \
  353. HRTIMER_MODE_REL); \
  354. \
  355. __ret = ___wait_event(wq, condition, state, 0, 0, \
  356. if (!__t.task) { \
  357. __ret = -ETIME; \
  358. break; \
  359. } \
  360. schedule()); \
  361. \
  362. hrtimer_cancel(&__t.timer); \
  363. destroy_hrtimer_on_stack(&__t.timer); \
  364. __ret; \
  365. })
  366. /**
  367. * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
  368. * @wq: the waitqueue to wait on
  369. * @condition: a C expression for the event to wait for
  370. * @timeout: timeout, as a ktime_t
  371. *
  372. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  373. * @condition evaluates to true or a signal is received.
  374. * The @condition is checked each time the waitqueue @wq is woken up.
  375. *
  376. * wake_up() has to be called after changing any variable that could
  377. * change the result of the wait condition.
  378. *
  379. * The function returns 0 if @condition became true, or -ETIME if the timeout
  380. * elapsed.
  381. */
  382. #define wait_event_hrtimeout(wq, condition, timeout) \
  383. ({ \
  384. int __ret = 0; \
  385. if (!(condition)) \
  386. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  387. TASK_UNINTERRUPTIBLE); \
  388. __ret; \
  389. })
  390. /**
  391. * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
  392. * @wq: the waitqueue to wait on
  393. * @condition: a C expression for the event to wait for
  394. * @timeout: timeout, as a ktime_t
  395. *
  396. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  397. * @condition evaluates to true or a signal is received.
  398. * The @condition is checked each time the waitqueue @wq is woken up.
  399. *
  400. * wake_up() has to be called after changing any variable that could
  401. * change the result of the wait condition.
  402. *
  403. * The function returns 0 if @condition became true, -ERESTARTSYS if it was
  404. * interrupted by a signal, or -ETIME if the timeout elapsed.
  405. */
  406. #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
  407. ({ \
  408. long __ret = 0; \
  409. if (!(condition)) \
  410. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  411. TASK_INTERRUPTIBLE); \
  412. __ret; \
  413. })
  414. #define __wait_event_interruptible_exclusive(wq, condition) \
  415. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  416. schedule())
  417. #define wait_event_interruptible_exclusive(wq, condition) \
  418. ({ \
  419. int __ret = 0; \
  420. if (!(condition)) \
  421. __ret = __wait_event_interruptible_exclusive(wq, condition);\
  422. __ret; \
  423. })
  424. #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
  425. ({ \
  426. int __ret = 0; \
  427. DEFINE_WAIT(__wait); \
  428. if (exclusive) \
  429. __wait.flags |= WQ_FLAG_EXCLUSIVE; \
  430. do { \
  431. if (likely(list_empty(&__wait.task_list))) \
  432. __add_wait_queue_tail(&(wq), &__wait); \
  433. set_current_state(TASK_INTERRUPTIBLE); \
  434. if (signal_pending(current)) { \
  435. __ret = -ERESTARTSYS; \
  436. break; \
  437. } \
  438. if (irq) \
  439. spin_unlock_irq(&(wq).lock); \
  440. else \
  441. spin_unlock(&(wq).lock); \
  442. schedule(); \
  443. if (irq) \
  444. spin_lock_irq(&(wq).lock); \
  445. else \
  446. spin_lock(&(wq).lock); \
  447. } while (!(condition)); \
  448. __remove_wait_queue(&(wq), &__wait); \
  449. __set_current_state(TASK_RUNNING); \
  450. __ret; \
  451. })
  452. /**
  453. * wait_event_interruptible_locked - sleep until a condition gets true
  454. * @wq: the waitqueue to wait on
  455. * @condition: a C expression for the event to wait for
  456. *
  457. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  458. * @condition evaluates to true or a signal is received.
  459. * The @condition is checked each time the waitqueue @wq is woken up.
  460. *
  461. * It must be called with wq.lock being held. This spinlock is
  462. * unlocked while sleeping but @condition testing is done while lock
  463. * is held and when this macro exits the lock is held.
  464. *
  465. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  466. * functions which must match the way they are locked/unlocked outside
  467. * of this macro.
  468. *
  469. * wake_up_locked() has to be called after changing any variable that could
  470. * change the result of the wait condition.
  471. *
  472. * The function will return -ERESTARTSYS if it was interrupted by a
  473. * signal and 0 if @condition evaluated to true.
  474. */
  475. #define wait_event_interruptible_locked(wq, condition) \
  476. ((condition) \
  477. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
  478. /**
  479. * wait_event_interruptible_locked_irq - sleep until a condition gets true
  480. * @wq: the waitqueue to wait on
  481. * @condition: a C expression for the event to wait for
  482. *
  483. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  484. * @condition evaluates to true or a signal is received.
  485. * The @condition is checked each time the waitqueue @wq is woken up.
  486. *
  487. * It must be called with wq.lock being held. This spinlock is
  488. * unlocked while sleeping but @condition testing is done while lock
  489. * is held and when this macro exits the lock is held.
  490. *
  491. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  492. * functions which must match the way they are locked/unlocked outside
  493. * of this macro.
  494. *
  495. * wake_up_locked() has to be called after changing any variable that could
  496. * change the result of the wait condition.
  497. *
  498. * The function will return -ERESTARTSYS if it was interrupted by a
  499. * signal and 0 if @condition evaluated to true.
  500. */
  501. #define wait_event_interruptible_locked_irq(wq, condition) \
  502. ((condition) \
  503. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
  504. /**
  505. * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
  506. * @wq: the waitqueue to wait on
  507. * @condition: a C expression for the event to wait for
  508. *
  509. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  510. * @condition evaluates to true or a signal is received.
  511. * The @condition is checked each time the waitqueue @wq is woken up.
  512. *
  513. * It must be called with wq.lock being held. This spinlock is
  514. * unlocked while sleeping but @condition testing is done while lock
  515. * is held and when this macro exits the lock is held.
  516. *
  517. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  518. * functions which must match the way they are locked/unlocked outside
  519. * of this macro.
  520. *
  521. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  522. * set thus when other process waits process on the list if this
  523. * process is awaken further processes are not considered.
  524. *
  525. * wake_up_locked() has to be called after changing any variable that could
  526. * change the result of the wait condition.
  527. *
  528. * The function will return -ERESTARTSYS if it was interrupted by a
  529. * signal and 0 if @condition evaluated to true.
  530. */
  531. #define wait_event_interruptible_exclusive_locked(wq, condition) \
  532. ((condition) \
  533. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
  534. /**
  535. * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
  536. * @wq: the waitqueue to wait on
  537. * @condition: a C expression for the event to wait for
  538. *
  539. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  540. * @condition evaluates to true or a signal is received.
  541. * The @condition is checked each time the waitqueue @wq is woken up.
  542. *
  543. * It must be called with wq.lock being held. This spinlock is
  544. * unlocked while sleeping but @condition testing is done while lock
  545. * is held and when this macro exits the lock is held.
  546. *
  547. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  548. * functions which must match the way they are locked/unlocked outside
  549. * of this macro.
  550. *
  551. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  552. * set thus when other process waits process on the list if this
  553. * process is awaken further processes are not considered.
  554. *
  555. * wake_up_locked() has to be called after changing any variable that could
  556. * change the result of the wait condition.
  557. *
  558. * The function will return -ERESTARTSYS if it was interrupted by a
  559. * signal and 0 if @condition evaluated to true.
  560. */
  561. #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
  562. ((condition) \
  563. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
  564. #define __wait_event_killable(wq, condition) \
  565. ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
  566. /**
  567. * wait_event_killable - sleep until a condition gets true
  568. * @wq: the waitqueue to wait on
  569. * @condition: a C expression for the event to wait for
  570. *
  571. * The process is put to sleep (TASK_KILLABLE) until the
  572. * @condition evaluates to true or a signal is received.
  573. * The @condition is checked each time the waitqueue @wq is woken up.
  574. *
  575. * wake_up() has to be called after changing any variable that could
  576. * change the result of the wait condition.
  577. *
  578. * The function will return -ERESTARTSYS if it was interrupted by a
  579. * signal and 0 if @condition evaluated to true.
  580. */
  581. #define wait_event_killable(wq, condition) \
  582. ({ \
  583. int __ret = 0; \
  584. if (!(condition)) \
  585. __ret = __wait_event_killable(wq, condition); \
  586. __ret; \
  587. })
  588. #define __wait_event_lock_irq(wq, condition, lock, cmd) \
  589. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  590. spin_unlock_irq(&lock); \
  591. cmd; \
  592. schedule(); \
  593. spin_lock_irq(&lock))
  594. /**
  595. * wait_event_lock_irq_cmd - sleep until a condition gets true. The
  596. * condition is checked under the lock. This
  597. * is expected to be called with the lock
  598. * taken.
  599. * @wq: the waitqueue to wait on
  600. * @condition: a C expression for the event to wait for
  601. * @lock: a locked spinlock_t, which will be released before cmd
  602. * and schedule() and reacquired afterwards.
  603. * @cmd: a command which is invoked outside the critical section before
  604. * sleep
  605. *
  606. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  607. * @condition evaluates to true. The @condition is checked each time
  608. * the waitqueue @wq is woken up.
  609. *
  610. * wake_up() has to be called after changing any variable that could
  611. * change the result of the wait condition.
  612. *
  613. * This is supposed to be called while holding the lock. The lock is
  614. * dropped before invoking the cmd and going to sleep and is reacquired
  615. * afterwards.
  616. */
  617. #define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
  618. do { \
  619. if (condition) \
  620. break; \
  621. __wait_event_lock_irq(wq, condition, lock, cmd); \
  622. } while (0)
  623. /**
  624. * wait_event_lock_irq - sleep until a condition gets true. The
  625. * condition is checked under the lock. This
  626. * is expected to be called with the lock
  627. * taken.
  628. * @wq: the waitqueue to wait on
  629. * @condition: a C expression for the event to wait for
  630. * @lock: a locked spinlock_t, which will be released before schedule()
  631. * and reacquired afterwards.
  632. *
  633. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  634. * @condition evaluates to true. The @condition is checked each time
  635. * the waitqueue @wq is woken up.
  636. *
  637. * wake_up() has to be called after changing any variable that could
  638. * change the result of the wait condition.
  639. *
  640. * This is supposed to be called while holding the lock. The lock is
  641. * dropped before going to sleep and is reacquired afterwards.
  642. */
  643. #define wait_event_lock_irq(wq, condition, lock) \
  644. do { \
  645. if (condition) \
  646. break; \
  647. __wait_event_lock_irq(wq, condition, lock, ); \
  648. } while (0)
  649. #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
  650. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  651. spin_unlock_irq(&lock); \
  652. cmd; \
  653. schedule(); \
  654. spin_lock_irq(&lock))
  655. /**
  656. * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
  657. * The condition is checked under the lock. This is expected to
  658. * be called with the lock taken.
  659. * @wq: the waitqueue to wait on
  660. * @condition: a C expression for the event to wait for
  661. * @lock: a locked spinlock_t, which will be released before cmd and
  662. * schedule() and reacquired afterwards.
  663. * @cmd: a command which is invoked outside the critical section before
  664. * sleep
  665. *
  666. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  667. * @condition evaluates to true or a signal is received. The @condition is
  668. * checked each time the waitqueue @wq is woken up.
  669. *
  670. * wake_up() has to be called after changing any variable that could
  671. * change the result of the wait condition.
  672. *
  673. * This is supposed to be called while holding the lock. The lock is
  674. * dropped before invoking the cmd and going to sleep and is reacquired
  675. * afterwards.
  676. *
  677. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  678. * and 0 if @condition evaluated to true.
  679. */
  680. #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
  681. ({ \
  682. int __ret = 0; \
  683. if (!(condition)) \
  684. __ret = __wait_event_interruptible_lock_irq(wq, \
  685. condition, lock, cmd); \
  686. __ret; \
  687. })
  688. /**
  689. * wait_event_interruptible_lock_irq - sleep until a condition gets true.
  690. * The condition is checked under the lock. This is expected
  691. * to be called with the lock taken.
  692. * @wq: the waitqueue to wait on
  693. * @condition: a C expression for the event to wait for
  694. * @lock: a locked spinlock_t, which will be released before schedule()
  695. * and reacquired afterwards.
  696. *
  697. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  698. * @condition evaluates to true or signal is received. The @condition is
  699. * checked each time the waitqueue @wq is woken up.
  700. *
  701. * wake_up() has to be called after changing any variable that could
  702. * change the result of the wait condition.
  703. *
  704. * This is supposed to be called while holding the lock. The lock is
  705. * dropped before going to sleep and is reacquired afterwards.
  706. *
  707. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  708. * and 0 if @condition evaluated to true.
  709. */
  710. #define wait_event_interruptible_lock_irq(wq, condition, lock) \
  711. ({ \
  712. int __ret = 0; \
  713. if (!(condition)) \
  714. __ret = __wait_event_interruptible_lock_irq(wq, \
  715. condition, lock,); \
  716. __ret; \
  717. })
  718. #define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
  719. lock, timeout) \
  720. ___wait_event(wq, ___wait_cond_timeout(condition), \
  721. TASK_INTERRUPTIBLE, 0, timeout, \
  722. spin_unlock_irq(&lock); \
  723. __ret = schedule_timeout(__ret); \
  724. spin_lock_irq(&lock));
  725. /**
  726. * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
  727. * true or a timeout elapses. The condition is checked under
  728. * the lock. This is expected to be called with the lock taken.
  729. * @wq: the waitqueue to wait on
  730. * @condition: a C expression for the event to wait for
  731. * @lock: a locked spinlock_t, which will be released before schedule()
  732. * and reacquired afterwards.
  733. * @timeout: timeout, in jiffies
  734. *
  735. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  736. * @condition evaluates to true or signal is received. The @condition is
  737. * checked each time the waitqueue @wq is woken up.
  738. *
  739. * wake_up() has to be called after changing any variable that could
  740. * change the result of the wait condition.
  741. *
  742. * This is supposed to be called while holding the lock. The lock is
  743. * dropped before going to sleep and is reacquired afterwards.
  744. *
  745. * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
  746. * was interrupted by a signal, and the remaining jiffies otherwise
  747. * if the condition evaluated to true before the timeout elapsed.
  748. */
  749. #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
  750. timeout) \
  751. ({ \
  752. long __ret = timeout; \
  753. if (!___wait_cond_timeout(condition)) \
  754. __ret = __wait_event_interruptible_lock_irq_timeout( \
  755. wq, condition, lock, timeout); \
  756. __ret; \
  757. })
  758. /*
  759. * Waitqueues which are removed from the waitqueue_head at wakeup time
  760. */
  761. void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
  762. void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
  763. long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
  764. void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
  765. void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
  766. long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
  767. int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  768. int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  769. int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  770. #define DEFINE_WAIT_FUNC(name, function) \
  771. wait_queue_t name = { \
  772. .private = current, \
  773. .func = function, \
  774. .task_list = LIST_HEAD_INIT((name).task_list), \
  775. }
  776. #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
  777. #define DEFINE_WAIT_BIT(name, word, bit) \
  778. struct wait_bit_queue name = { \
  779. .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
  780. .wait = { \
  781. .private = current, \
  782. .func = wake_bit_function, \
  783. .task_list = \
  784. LIST_HEAD_INIT((name).wait.task_list), \
  785. }, \
  786. }
  787. #define init_wait(wait) \
  788. do { \
  789. (wait)->private = current; \
  790. (wait)->func = autoremove_wake_function; \
  791. INIT_LIST_HEAD(&(wait)->task_list); \
  792. (wait)->flags = 0; \
  793. } while (0)
  794. extern int bit_wait(struct wait_bit_key *);
  795. extern int bit_wait_io(struct wait_bit_key *);
  796. extern int bit_wait_timeout(struct wait_bit_key *);
  797. extern int bit_wait_io_timeout(struct wait_bit_key *);
  798. /**
  799. * wait_on_bit - wait for a bit to be cleared
  800. * @word: the word being waited on, a kernel virtual address
  801. * @bit: the bit of the word being waited on
  802. * @mode: the task state to sleep in
  803. *
  804. * There is a standard hashed waitqueue table for generic use. This
  805. * is the part of the hashtable's accessor API that waits on a bit.
  806. * For instance, if one were to have waiters on a bitflag, one would
  807. * call wait_on_bit() in threads waiting for the bit to clear.
  808. * One uses wait_on_bit() where one is waiting for the bit to clear,
  809. * but has no intention of setting it.
  810. * Returned value will be zero if the bit was cleared, or non-zero
  811. * if the process received a signal and the mode permitted wakeup
  812. * on that signal.
  813. */
  814. static inline int
  815. wait_on_bit(void *word, int bit, unsigned mode)
  816. {
  817. if (!test_bit(bit, word))
  818. return 0;
  819. return out_of_line_wait_on_bit(word, bit,
  820. bit_wait,
  821. mode);
  822. }
  823. /**
  824. * wait_on_bit_io - wait for a bit to be cleared
  825. * @word: the word being waited on, a kernel virtual address
  826. * @bit: the bit of the word being waited on
  827. * @mode: the task state to sleep in
  828. *
  829. * Use the standard hashed waitqueue table to wait for a bit
  830. * to be cleared. This is similar to wait_on_bit(), but calls
  831. * io_schedule() instead of schedule() for the actual waiting.
  832. *
  833. * Returned value will be zero if the bit was cleared, or non-zero
  834. * if the process received a signal and the mode permitted wakeup
  835. * on that signal.
  836. */
  837. static inline int
  838. wait_on_bit_io(void *word, int bit, unsigned mode)
  839. {
  840. if (!test_bit(bit, word))
  841. return 0;
  842. return out_of_line_wait_on_bit(word, bit,
  843. bit_wait_io,
  844. mode);
  845. }
  846. /**
  847. * wait_on_bit_action - wait for a bit to be cleared
  848. * @word: the word being waited on, a kernel virtual address
  849. * @bit: the bit of the word being waited on
  850. * @action: the function used to sleep, which may take special actions
  851. * @mode: the task state to sleep in
  852. *
  853. * Use the standard hashed waitqueue table to wait for a bit
  854. * to be cleared, and allow the waiting action to be specified.
  855. * This is like wait_on_bit() but allows fine control of how the waiting
  856. * is done.
  857. *
  858. * Returned value will be zero if the bit was cleared, or non-zero
  859. * if the process received a signal and the mode permitted wakeup
  860. * on that signal.
  861. */
  862. static inline int
  863. wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
  864. {
  865. if (!test_bit(bit, word))
  866. return 0;
  867. return out_of_line_wait_on_bit(word, bit, action, mode);
  868. }
  869. /**
  870. * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
  871. * @word: the word being waited on, a kernel virtual address
  872. * @bit: the bit of the word being waited on
  873. * @mode: the task state to sleep in
  874. *
  875. * There is a standard hashed waitqueue table for generic use. This
  876. * is the part of the hashtable's accessor API that waits on a bit
  877. * when one intends to set it, for instance, trying to lock bitflags.
  878. * For instance, if one were to have waiters trying to set bitflag
  879. * and waiting for it to clear before setting it, one would call
  880. * wait_on_bit() in threads waiting to be able to set the bit.
  881. * One uses wait_on_bit_lock() where one is waiting for the bit to
  882. * clear with the intention of setting it, and when done, clearing it.
  883. *
  884. * Returns zero if the bit was (eventually) found to be clear and was
  885. * set. Returns non-zero if a signal was delivered to the process and
  886. * the @mode allows that signal to wake the process.
  887. */
  888. static inline int
  889. wait_on_bit_lock(void *word, int bit, unsigned mode)
  890. {
  891. if (!test_and_set_bit(bit, word))
  892. return 0;
  893. return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
  894. }
  895. /**
  896. * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
  897. * @word: the word being waited on, a kernel virtual address
  898. * @bit: the bit of the word being waited on
  899. * @mode: the task state to sleep in
  900. *
  901. * Use the standard hashed waitqueue table to wait for a bit
  902. * to be cleared and then to atomically set it. This is similar
  903. * to wait_on_bit(), but calls io_schedule() instead of schedule()
  904. * for the actual waiting.
  905. *
  906. * Returns zero if the bit was (eventually) found to be clear and was
  907. * set. Returns non-zero if a signal was delivered to the process and
  908. * the @mode allows that signal to wake the process.
  909. */
  910. static inline int
  911. wait_on_bit_lock_io(void *word, int bit, unsigned mode)
  912. {
  913. if (!test_and_set_bit(bit, word))
  914. return 0;
  915. return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
  916. }
  917. /**
  918. * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
  919. * @word: the word being waited on, a kernel virtual address
  920. * @bit: the bit of the word being waited on
  921. * @action: the function used to sleep, which may take special actions
  922. * @mode: the task state to sleep in
  923. *
  924. * Use the standard hashed waitqueue table to wait for a bit
  925. * to be cleared and then to set it, and allow the waiting action
  926. * to be specified.
  927. * This is like wait_on_bit() but allows fine control of how the waiting
  928. * is done.
  929. *
  930. * Returns zero if the bit was (eventually) found to be clear and was
  931. * set. Returns non-zero if a signal was delivered to the process and
  932. * the @mode allows that signal to wake the process.
  933. */
  934. static inline int
  935. wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
  936. {
  937. if (!test_and_set_bit(bit, word))
  938. return 0;
  939. return out_of_line_wait_on_bit_lock(word, bit, action, mode);
  940. }
  941. /**
  942. * wait_on_atomic_t - Wait for an atomic_t to become 0
  943. * @val: The atomic value being waited on, a kernel virtual address
  944. * @action: the function used to sleep, which may take special actions
  945. * @mode: the task state to sleep in
  946. *
  947. * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
  948. * the purpose of getting a waitqueue, but we set the key to a bit number
  949. * outside of the target 'word'.
  950. */
  951. static inline
  952. int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
  953. {
  954. if (atomic_read(val) == 0)
  955. return 0;
  956. return out_of_line_wait_on_atomic_t(val, action, mode);
  957. }
  958. #endif /* _LINUX_WAIT_H */