wait.h 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227
  1. #ifndef _LINUX_WAIT_H
  2. #define _LINUX_WAIT_H
  3. /*
  4. * Linux wait queue related types and methods
  5. */
  6. #include <linux/list.h>
  7. #include <linux/stddef.h>
  8. #include <linux/spinlock.h>
  9. #include <asm/current.h>
  10. #include <uapi/linux/wait.h>
  11. typedef struct __wait_queue wait_queue_t;
  12. typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
  13. int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
  14. /* __wait_queue::flags */
  15. #define WQ_FLAG_EXCLUSIVE 0x01
  16. #define WQ_FLAG_WOKEN 0x02
  17. struct __wait_queue {
  18. unsigned int flags;
  19. void *private;
  20. wait_queue_func_t func;
  21. struct list_head task_list;
  22. };
  23. struct wait_bit_key {
  24. void *flags;
  25. int bit_nr;
  26. #define WAIT_ATOMIC_T_BIT_NR -1
  27. unsigned long timeout;
  28. };
  29. struct wait_bit_queue {
  30. struct wait_bit_key key;
  31. wait_queue_t wait;
  32. };
  33. struct __wait_queue_head {
  34. spinlock_t lock;
  35. struct list_head task_list;
  36. };
  37. typedef struct __wait_queue_head wait_queue_head_t;
  38. struct task_struct;
  39. /*
  40. * Macros for declaration and initialisaton of the datatypes
  41. */
  42. #define __WAITQUEUE_INITIALIZER(name, tsk) { \
  43. .private = tsk, \
  44. .func = default_wake_function, \
  45. .task_list = { NULL, NULL } }
  46. #define DECLARE_WAITQUEUE(name, tsk) \
  47. wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
  48. #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
  49. .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
  50. .task_list = { &(name).task_list, &(name).task_list } }
  51. #define DECLARE_WAIT_QUEUE_HEAD(name) \
  52. wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
  53. #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
  54. { .flags = word, .bit_nr = bit, }
  55. #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
  56. { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
  57. extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
  58. #define init_waitqueue_head(q) \
  59. do { \
  60. static struct lock_class_key __key; \
  61. \
  62. __init_waitqueue_head((q), #q, &__key); \
  63. } while (0)
  64. #ifdef CONFIG_LOCKDEP
  65. # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
  66. ({ init_waitqueue_head(&name); name; })
  67. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
  68. wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
  69. #else
  70. # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
  71. #endif
  72. static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
  73. {
  74. q->flags = 0;
  75. q->private = p;
  76. q->func = default_wake_function;
  77. }
  78. static inline void
  79. init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
  80. {
  81. q->flags = 0;
  82. q->private = NULL;
  83. q->func = func;
  84. }
  85. /**
  86. * waitqueue_active -- locklessly test for waiters on the queue
  87. * @q: the waitqueue to test for waiters
  88. *
  89. * returns true if the wait list is not empty
  90. *
  91. * NOTE: this function is lockless and requires care, incorrect usage _will_
  92. * lead to sporadic and non-obvious failure.
  93. *
  94. * Use either while holding wait_queue_head_t::lock or when used for wakeups
  95. * with an extra smp_mb() like:
  96. *
  97. * CPU0 - waker CPU1 - waiter
  98. *
  99. * for (;;) {
  100. * @cond = true; prepare_to_wait(&wq, &wait, state);
  101. * smp_mb(); // smp_mb() from set_current_state()
  102. * if (waitqueue_active(wq)) if (@cond)
  103. * wake_up(wq); break;
  104. * schedule();
  105. * }
  106. * finish_wait(&wq, &wait);
  107. *
  108. * Because without the explicit smp_mb() it's possible for the
  109. * waitqueue_active() load to get hoisted over the @cond store such that we'll
  110. * observe an empty wait list while the waiter might not observe @cond.
  111. *
  112. * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
  113. * which (when the lock is uncontended) are of roughly equal cost.
  114. */
  115. static inline int waitqueue_active(wait_queue_head_t *q)
  116. {
  117. return !list_empty(&q->task_list);
  118. }
  119. /**
  120. * wq_has_sleeper - check if there are any waiting processes
  121. * @wq: wait queue head
  122. *
  123. * Returns true if wq has waiting processes
  124. *
  125. * Please refer to the comment for waitqueue_active.
  126. */
  127. static inline bool wq_has_sleeper(wait_queue_head_t *wq)
  128. {
  129. /*
  130. * We need to be sure we are in sync with the
  131. * add_wait_queue modifications to the wait queue.
  132. *
  133. * This memory barrier should be paired with one on the
  134. * waiting side.
  135. */
  136. smp_mb();
  137. return waitqueue_active(wq);
  138. }
  139. extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
  140. extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
  141. extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
  142. static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
  143. {
  144. list_add(&new->task_list, &head->task_list);
  145. }
  146. /*
  147. * Used for wake-one threads:
  148. */
  149. static inline void
  150. __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
  151. {
  152. wait->flags |= WQ_FLAG_EXCLUSIVE;
  153. __add_wait_queue(q, wait);
  154. }
  155. static inline void __add_wait_queue_tail(wait_queue_head_t *head,
  156. wait_queue_t *new)
  157. {
  158. list_add_tail(&new->task_list, &head->task_list);
  159. }
  160. static inline void
  161. __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
  162. {
  163. wait->flags |= WQ_FLAG_EXCLUSIVE;
  164. __add_wait_queue_tail(q, wait);
  165. }
  166. static inline void
  167. __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
  168. {
  169. list_del(&old->task_list);
  170. }
  171. typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
  172. void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
  173. void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
  174. void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
  175. void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
  176. void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
  177. void __wake_up_bit(wait_queue_head_t *, void *, int);
  178. int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
  179. int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
  180. void wake_up_bit(void *, int);
  181. void wake_up_atomic_t(atomic_t *);
  182. int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
  183. int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
  184. int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
  185. int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
  186. wait_queue_head_t *bit_waitqueue(void *, int);
  187. #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
  188. #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
  189. #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
  190. #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
  191. #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
  192. #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
  193. #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
  194. #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
  195. #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
  196. /*
  197. * Wakeup macros to be used to report events to the targets.
  198. */
  199. #define wake_up_poll(x, m) \
  200. __wake_up(x, TASK_NORMAL, 1, (void *) (m))
  201. #define wake_up_locked_poll(x, m) \
  202. __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
  203. #define wake_up_interruptible_poll(x, m) \
  204. __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
  205. #define wake_up_interruptible_sync_poll(x, m) \
  206. __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
  207. #define ___wait_cond_timeout(condition) \
  208. ({ \
  209. bool __cond = (condition); \
  210. if (__cond && !__ret) \
  211. __ret = 1; \
  212. __cond || !__ret; \
  213. })
  214. #define ___wait_is_interruptible(state) \
  215. (!__builtin_constant_p(state) || \
  216. state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
  217. extern void init_wait_entry(wait_queue_t *__wait, int flags);
  218. /*
  219. * The below macro ___wait_event() has an explicit shadow of the __ret
  220. * variable when used from the wait_event_*() macros.
  221. *
  222. * This is so that both can use the ___wait_cond_timeout() construct
  223. * to wrap the condition.
  224. *
  225. * The type inconsistency of the wait_event_*() __ret variable is also
  226. * on purpose; we use long where we can return timeout values and int
  227. * otherwise.
  228. */
  229. #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
  230. ({ \
  231. __label__ __out; \
  232. wait_queue_t __wait; \
  233. long __ret = ret; /* explicit shadow */ \
  234. \
  235. init_wait_entry(&__wait, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
  236. for (;;) { \
  237. long __int = prepare_to_wait_event(&wq, &__wait, state);\
  238. \
  239. if (condition) \
  240. break; \
  241. \
  242. if (___wait_is_interruptible(state) && __int) { \
  243. __ret = __int; \
  244. goto __out; \
  245. } \
  246. \
  247. cmd; \
  248. } \
  249. finish_wait(&wq, &__wait); \
  250. __out: __ret; \
  251. })
  252. #define __wait_event(wq, condition) \
  253. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  254. schedule())
  255. /**
  256. * wait_event - sleep until a condition gets true
  257. * @wq: the waitqueue to wait on
  258. * @condition: a C expression for the event to wait for
  259. *
  260. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  261. * @condition evaluates to true. The @condition is checked each time
  262. * the waitqueue @wq is woken up.
  263. *
  264. * wake_up() has to be called after changing any variable that could
  265. * change the result of the wait condition.
  266. */
  267. #define wait_event(wq, condition) \
  268. do { \
  269. might_sleep(); \
  270. if (condition) \
  271. break; \
  272. __wait_event(wq, condition); \
  273. } while (0)
  274. #define __io_wait_event(wq, condition) \
  275. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  276. io_schedule())
  277. /*
  278. * io_wait_event() -- like wait_event() but with io_schedule()
  279. */
  280. #define io_wait_event(wq, condition) \
  281. do { \
  282. might_sleep(); \
  283. if (condition) \
  284. break; \
  285. __io_wait_event(wq, condition); \
  286. } while (0)
  287. #define __wait_event_freezable(wq, condition) \
  288. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  289. schedule(); try_to_freeze())
  290. /**
  291. * wait_event_freezable - sleep (or freeze) until a condition gets true
  292. * @wq: the waitqueue to wait on
  293. * @condition: a C expression for the event to wait for
  294. *
  295. * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
  296. * to system load) until the @condition evaluates to true. The
  297. * @condition is checked each time the waitqueue @wq is woken up.
  298. *
  299. * wake_up() has to be called after changing any variable that could
  300. * change the result of the wait condition.
  301. */
  302. #define wait_event_freezable(wq, condition) \
  303. ({ \
  304. int __ret = 0; \
  305. might_sleep(); \
  306. if (!(condition)) \
  307. __ret = __wait_event_freezable(wq, condition); \
  308. __ret; \
  309. })
  310. #define __wait_event_timeout(wq, condition, timeout) \
  311. ___wait_event(wq, ___wait_cond_timeout(condition), \
  312. TASK_UNINTERRUPTIBLE, 0, timeout, \
  313. __ret = schedule_timeout(__ret))
  314. /**
  315. * wait_event_timeout - sleep until a condition gets true or a timeout elapses
  316. * @wq: the waitqueue to wait on
  317. * @condition: a C expression for the event to wait for
  318. * @timeout: timeout, in jiffies
  319. *
  320. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  321. * @condition evaluates to true. The @condition is checked each time
  322. * the waitqueue @wq is woken up.
  323. *
  324. * wake_up() has to be called after changing any variable that could
  325. * change the result of the wait condition.
  326. *
  327. * Returns:
  328. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  329. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  330. * or the remaining jiffies (at least 1) if the @condition evaluated
  331. * to %true before the @timeout elapsed.
  332. */
  333. #define wait_event_timeout(wq, condition, timeout) \
  334. ({ \
  335. long __ret = timeout; \
  336. might_sleep(); \
  337. if (!___wait_cond_timeout(condition)) \
  338. __ret = __wait_event_timeout(wq, condition, timeout); \
  339. __ret; \
  340. })
  341. #define __wait_event_freezable_timeout(wq, condition, timeout) \
  342. ___wait_event(wq, ___wait_cond_timeout(condition), \
  343. TASK_INTERRUPTIBLE, 0, timeout, \
  344. __ret = schedule_timeout(__ret); try_to_freeze())
  345. /*
  346. * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
  347. * increasing load and is freezable.
  348. */
  349. #define wait_event_freezable_timeout(wq, condition, timeout) \
  350. ({ \
  351. long __ret = timeout; \
  352. might_sleep(); \
  353. if (!___wait_cond_timeout(condition)) \
  354. __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
  355. __ret; \
  356. })
  357. #define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
  358. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
  359. cmd1; schedule(); cmd2)
  360. /*
  361. * Just like wait_event_cmd(), except it sets exclusive flag
  362. */
  363. #define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
  364. do { \
  365. if (condition) \
  366. break; \
  367. __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2); \
  368. } while (0)
  369. #define __wait_event_cmd(wq, condition, cmd1, cmd2) \
  370. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  371. cmd1; schedule(); cmd2)
  372. /**
  373. * wait_event_cmd - sleep until a condition gets true
  374. * @wq: the waitqueue to wait on
  375. * @condition: a C expression for the event to wait for
  376. * @cmd1: the command will be executed before sleep
  377. * @cmd2: the command will be executed after sleep
  378. *
  379. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  380. * @condition evaluates to true. The @condition is checked each time
  381. * the waitqueue @wq is woken up.
  382. *
  383. * wake_up() has to be called after changing any variable that could
  384. * change the result of the wait condition.
  385. */
  386. #define wait_event_cmd(wq, condition, cmd1, cmd2) \
  387. do { \
  388. if (condition) \
  389. break; \
  390. __wait_event_cmd(wq, condition, cmd1, cmd2); \
  391. } while (0)
  392. #define __wait_event_interruptible(wq, condition) \
  393. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  394. schedule())
  395. /**
  396. * wait_event_interruptible - sleep until a condition gets true
  397. * @wq: the waitqueue to wait on
  398. * @condition: a C expression for the event to wait for
  399. *
  400. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  401. * @condition evaluates to true or a signal is received.
  402. * The @condition is checked each time the waitqueue @wq is woken up.
  403. *
  404. * wake_up() has to be called after changing any variable that could
  405. * change the result of the wait condition.
  406. *
  407. * The function will return -ERESTARTSYS if it was interrupted by a
  408. * signal and 0 if @condition evaluated to true.
  409. */
  410. #define wait_event_interruptible(wq, condition) \
  411. ({ \
  412. int __ret = 0; \
  413. might_sleep(); \
  414. if (!(condition)) \
  415. __ret = __wait_event_interruptible(wq, condition); \
  416. __ret; \
  417. })
  418. #define __wait_event_interruptible_timeout(wq, condition, timeout) \
  419. ___wait_event(wq, ___wait_cond_timeout(condition), \
  420. TASK_INTERRUPTIBLE, 0, timeout, \
  421. __ret = schedule_timeout(__ret))
  422. /**
  423. * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
  424. * @wq: the waitqueue to wait on
  425. * @condition: a C expression for the event to wait for
  426. * @timeout: timeout, in jiffies
  427. *
  428. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  429. * @condition evaluates to true or a signal is received.
  430. * The @condition is checked each time the waitqueue @wq is woken up.
  431. *
  432. * wake_up() has to be called after changing any variable that could
  433. * change the result of the wait condition.
  434. *
  435. * Returns:
  436. * 0 if the @condition evaluated to %false after the @timeout elapsed,
  437. * 1 if the @condition evaluated to %true after the @timeout elapsed,
  438. * the remaining jiffies (at least 1) if the @condition evaluated
  439. * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
  440. * interrupted by a signal.
  441. */
  442. #define wait_event_interruptible_timeout(wq, condition, timeout) \
  443. ({ \
  444. long __ret = timeout; \
  445. might_sleep(); \
  446. if (!___wait_cond_timeout(condition)) \
  447. __ret = __wait_event_interruptible_timeout(wq, \
  448. condition, timeout); \
  449. __ret; \
  450. })
  451. #define __wait_event_hrtimeout(wq, condition, timeout, state) \
  452. ({ \
  453. int __ret = 0; \
  454. struct hrtimer_sleeper __t; \
  455. \
  456. hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
  457. HRTIMER_MODE_REL); \
  458. hrtimer_init_sleeper(&__t, current); \
  459. if ((timeout).tv64 != KTIME_MAX) \
  460. hrtimer_start_range_ns(&__t.timer, timeout, \
  461. current->timer_slack_ns, \
  462. HRTIMER_MODE_REL); \
  463. \
  464. __ret = ___wait_event(wq, condition, state, 0, 0, \
  465. if (!__t.task) { \
  466. __ret = -ETIME; \
  467. break; \
  468. } \
  469. schedule()); \
  470. \
  471. hrtimer_cancel(&__t.timer); \
  472. destroy_hrtimer_on_stack(&__t.timer); \
  473. __ret; \
  474. })
  475. /**
  476. * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
  477. * @wq: the waitqueue to wait on
  478. * @condition: a C expression for the event to wait for
  479. * @timeout: timeout, as a ktime_t
  480. *
  481. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  482. * @condition evaluates to true or a signal is received.
  483. * The @condition is checked each time the waitqueue @wq is woken up.
  484. *
  485. * wake_up() has to be called after changing any variable that could
  486. * change the result of the wait condition.
  487. *
  488. * The function returns 0 if @condition became true, or -ETIME if the timeout
  489. * elapsed.
  490. */
  491. #define wait_event_hrtimeout(wq, condition, timeout) \
  492. ({ \
  493. int __ret = 0; \
  494. might_sleep(); \
  495. if (!(condition)) \
  496. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  497. TASK_UNINTERRUPTIBLE); \
  498. __ret; \
  499. })
  500. /**
  501. * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
  502. * @wq: the waitqueue to wait on
  503. * @condition: a C expression for the event to wait for
  504. * @timeout: timeout, as a ktime_t
  505. *
  506. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  507. * @condition evaluates to true or a signal is received.
  508. * The @condition is checked each time the waitqueue @wq is woken up.
  509. *
  510. * wake_up() has to be called after changing any variable that could
  511. * change the result of the wait condition.
  512. *
  513. * The function returns 0 if @condition became true, -ERESTARTSYS if it was
  514. * interrupted by a signal, or -ETIME if the timeout elapsed.
  515. */
  516. #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
  517. ({ \
  518. long __ret = 0; \
  519. might_sleep(); \
  520. if (!(condition)) \
  521. __ret = __wait_event_hrtimeout(wq, condition, timeout, \
  522. TASK_INTERRUPTIBLE); \
  523. __ret; \
  524. })
  525. #define __wait_event_interruptible_exclusive(wq, condition) \
  526. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  527. schedule())
  528. #define wait_event_interruptible_exclusive(wq, condition) \
  529. ({ \
  530. int __ret = 0; \
  531. might_sleep(); \
  532. if (!(condition)) \
  533. __ret = __wait_event_interruptible_exclusive(wq, condition);\
  534. __ret; \
  535. })
  536. #define __wait_event_killable_exclusive(wq, condition) \
  537. ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
  538. schedule())
  539. #define wait_event_killable_exclusive(wq, condition) \
  540. ({ \
  541. int __ret = 0; \
  542. might_sleep(); \
  543. if (!(condition)) \
  544. __ret = __wait_event_killable_exclusive(wq, condition); \
  545. __ret; \
  546. })
  547. #define __wait_event_freezable_exclusive(wq, condition) \
  548. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
  549. schedule(); try_to_freeze())
  550. #define wait_event_freezable_exclusive(wq, condition) \
  551. ({ \
  552. int __ret = 0; \
  553. might_sleep(); \
  554. if (!(condition)) \
  555. __ret = __wait_event_freezable_exclusive(wq, condition);\
  556. __ret; \
  557. })
  558. #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
  559. ({ \
  560. int __ret = 0; \
  561. DEFINE_WAIT(__wait); \
  562. if (exclusive) \
  563. __wait.flags |= WQ_FLAG_EXCLUSIVE; \
  564. do { \
  565. if (likely(list_empty(&__wait.task_list))) \
  566. __add_wait_queue_tail(&(wq), &__wait); \
  567. set_current_state(TASK_INTERRUPTIBLE); \
  568. if (signal_pending(current)) { \
  569. __ret = -ERESTARTSYS; \
  570. break; \
  571. } \
  572. if (irq) \
  573. spin_unlock_irq(&(wq).lock); \
  574. else \
  575. spin_unlock(&(wq).lock); \
  576. schedule(); \
  577. if (irq) \
  578. spin_lock_irq(&(wq).lock); \
  579. else \
  580. spin_lock(&(wq).lock); \
  581. } while (!(condition)); \
  582. __remove_wait_queue(&(wq), &__wait); \
  583. __set_current_state(TASK_RUNNING); \
  584. __ret; \
  585. })
  586. /**
  587. * wait_event_interruptible_locked - sleep until a condition gets true
  588. * @wq: the waitqueue to wait on
  589. * @condition: a C expression for the event to wait for
  590. *
  591. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  592. * @condition evaluates to true or a signal is received.
  593. * The @condition is checked each time the waitqueue @wq is woken up.
  594. *
  595. * It must be called with wq.lock being held. This spinlock is
  596. * unlocked while sleeping but @condition testing is done while lock
  597. * is held and when this macro exits the lock is held.
  598. *
  599. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  600. * functions which must match the way they are locked/unlocked outside
  601. * of this macro.
  602. *
  603. * wake_up_locked() has to be called after changing any variable that could
  604. * change the result of the wait condition.
  605. *
  606. * The function will return -ERESTARTSYS if it was interrupted by a
  607. * signal and 0 if @condition evaluated to true.
  608. */
  609. #define wait_event_interruptible_locked(wq, condition) \
  610. ((condition) \
  611. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
  612. /**
  613. * wait_event_interruptible_locked_irq - sleep until a condition gets true
  614. * @wq: the waitqueue to wait on
  615. * @condition: a C expression for the event to wait for
  616. *
  617. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  618. * @condition evaluates to true or a signal is received.
  619. * The @condition is checked each time the waitqueue @wq is woken up.
  620. *
  621. * It must be called with wq.lock being held. This spinlock is
  622. * unlocked while sleeping but @condition testing is done while lock
  623. * is held and when this macro exits the lock is held.
  624. *
  625. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  626. * functions which must match the way they are locked/unlocked outside
  627. * of this macro.
  628. *
  629. * wake_up_locked() has to be called after changing any variable that could
  630. * change the result of the wait condition.
  631. *
  632. * The function will return -ERESTARTSYS if it was interrupted by a
  633. * signal and 0 if @condition evaluated to true.
  634. */
  635. #define wait_event_interruptible_locked_irq(wq, condition) \
  636. ((condition) \
  637. ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
  638. /**
  639. * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
  640. * @wq: the waitqueue to wait on
  641. * @condition: a C expression for the event to wait for
  642. *
  643. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  644. * @condition evaluates to true or a signal is received.
  645. * The @condition is checked each time the waitqueue @wq is woken up.
  646. *
  647. * It must be called with wq.lock being held. This spinlock is
  648. * unlocked while sleeping but @condition testing is done while lock
  649. * is held and when this macro exits the lock is held.
  650. *
  651. * The lock is locked/unlocked using spin_lock()/spin_unlock()
  652. * functions which must match the way they are locked/unlocked outside
  653. * of this macro.
  654. *
  655. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  656. * set thus when other process waits process on the list if this
  657. * process is awaken further processes are not considered.
  658. *
  659. * wake_up_locked() has to be called after changing any variable that could
  660. * change the result of the wait condition.
  661. *
  662. * The function will return -ERESTARTSYS if it was interrupted by a
  663. * signal and 0 if @condition evaluated to true.
  664. */
  665. #define wait_event_interruptible_exclusive_locked(wq, condition) \
  666. ((condition) \
  667. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
  668. /**
  669. * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
  670. * @wq: the waitqueue to wait on
  671. * @condition: a C expression for the event to wait for
  672. *
  673. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  674. * @condition evaluates to true or a signal is received.
  675. * The @condition is checked each time the waitqueue @wq is woken up.
  676. *
  677. * It must be called with wq.lock being held. This spinlock is
  678. * unlocked while sleeping but @condition testing is done while lock
  679. * is held and when this macro exits the lock is held.
  680. *
  681. * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
  682. * functions which must match the way they are locked/unlocked outside
  683. * of this macro.
  684. *
  685. * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
  686. * set thus when other process waits process on the list if this
  687. * process is awaken further processes are not considered.
  688. *
  689. * wake_up_locked() has to be called after changing any variable that could
  690. * change the result of the wait condition.
  691. *
  692. * The function will return -ERESTARTSYS if it was interrupted by a
  693. * signal and 0 if @condition evaluated to true.
  694. */
  695. #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
  696. ((condition) \
  697. ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
  698. #define __wait_event_killable(wq, condition) \
  699. ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
  700. /**
  701. * wait_event_killable - sleep until a condition gets true
  702. * @wq: the waitqueue to wait on
  703. * @condition: a C expression for the event to wait for
  704. *
  705. * The process is put to sleep (TASK_KILLABLE) until the
  706. * @condition evaluates to true or a signal is received.
  707. * The @condition is checked each time the waitqueue @wq is woken up.
  708. *
  709. * wake_up() has to be called after changing any variable that could
  710. * change the result of the wait condition.
  711. *
  712. * The function will return -ERESTARTSYS if it was interrupted by a
  713. * signal and 0 if @condition evaluated to true.
  714. */
  715. #define wait_event_killable(wq, condition) \
  716. ({ \
  717. int __ret = 0; \
  718. might_sleep(); \
  719. if (!(condition)) \
  720. __ret = __wait_event_killable(wq, condition); \
  721. __ret; \
  722. })
  723. #define __wait_event_lock_irq(wq, condition, lock, cmd) \
  724. (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
  725. spin_unlock_irq(&lock); \
  726. cmd; \
  727. schedule(); \
  728. spin_lock_irq(&lock))
  729. /**
  730. * wait_event_lock_irq_cmd - sleep until a condition gets true. The
  731. * condition is checked under the lock. This
  732. * is expected to be called with the lock
  733. * taken.
  734. * @wq: the waitqueue to wait on
  735. * @condition: a C expression for the event to wait for
  736. * @lock: a locked spinlock_t, which will be released before cmd
  737. * and schedule() and reacquired afterwards.
  738. * @cmd: a command which is invoked outside the critical section before
  739. * sleep
  740. *
  741. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  742. * @condition evaluates to true. The @condition is checked each time
  743. * the waitqueue @wq is woken up.
  744. *
  745. * wake_up() has to be called after changing any variable that could
  746. * change the result of the wait condition.
  747. *
  748. * This is supposed to be called while holding the lock. The lock is
  749. * dropped before invoking the cmd and going to sleep and is reacquired
  750. * afterwards.
  751. */
  752. #define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
  753. do { \
  754. if (condition) \
  755. break; \
  756. __wait_event_lock_irq(wq, condition, lock, cmd); \
  757. } while (0)
  758. /**
  759. * wait_event_lock_irq - sleep until a condition gets true. The
  760. * condition is checked under the lock. This
  761. * is expected to be called with the lock
  762. * taken.
  763. * @wq: the waitqueue to wait on
  764. * @condition: a C expression for the event to wait for
  765. * @lock: a locked spinlock_t, which will be released before schedule()
  766. * and reacquired afterwards.
  767. *
  768. * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
  769. * @condition evaluates to true. The @condition is checked each time
  770. * the waitqueue @wq is woken up.
  771. *
  772. * wake_up() has to be called after changing any variable that could
  773. * change the result of the wait condition.
  774. *
  775. * This is supposed to be called while holding the lock. The lock is
  776. * dropped before going to sleep and is reacquired afterwards.
  777. */
  778. #define wait_event_lock_irq(wq, condition, lock) \
  779. do { \
  780. if (condition) \
  781. break; \
  782. __wait_event_lock_irq(wq, condition, lock, ); \
  783. } while (0)
  784. #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
  785. ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
  786. spin_unlock_irq(&lock); \
  787. cmd; \
  788. schedule(); \
  789. spin_lock_irq(&lock))
  790. /**
  791. * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
  792. * The condition is checked under the lock. This is expected to
  793. * be called with the lock taken.
  794. * @wq: the waitqueue to wait on
  795. * @condition: a C expression for the event to wait for
  796. * @lock: a locked spinlock_t, which will be released before cmd and
  797. * schedule() and reacquired afterwards.
  798. * @cmd: a command which is invoked outside the critical section before
  799. * sleep
  800. *
  801. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  802. * @condition evaluates to true or a signal is received. The @condition is
  803. * checked each time the waitqueue @wq is woken up.
  804. *
  805. * wake_up() has to be called after changing any variable that could
  806. * change the result of the wait condition.
  807. *
  808. * This is supposed to be called while holding the lock. The lock is
  809. * dropped before invoking the cmd and going to sleep and is reacquired
  810. * afterwards.
  811. *
  812. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  813. * and 0 if @condition evaluated to true.
  814. */
  815. #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
  816. ({ \
  817. int __ret = 0; \
  818. if (!(condition)) \
  819. __ret = __wait_event_interruptible_lock_irq(wq, \
  820. condition, lock, cmd); \
  821. __ret; \
  822. })
  823. /**
  824. * wait_event_interruptible_lock_irq - sleep until a condition gets true.
  825. * The condition is checked under the lock. This is expected
  826. * to be called with the lock taken.
  827. * @wq: the waitqueue to wait on
  828. * @condition: a C expression for the event to wait for
  829. * @lock: a locked spinlock_t, which will be released before schedule()
  830. * and reacquired afterwards.
  831. *
  832. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  833. * @condition evaluates to true or signal is received. The @condition is
  834. * checked each time the waitqueue @wq is woken up.
  835. *
  836. * wake_up() has to be called after changing any variable that could
  837. * change the result of the wait condition.
  838. *
  839. * This is supposed to be called while holding the lock. The lock is
  840. * dropped before going to sleep and is reacquired afterwards.
  841. *
  842. * The macro will return -ERESTARTSYS if it was interrupted by a signal
  843. * and 0 if @condition evaluated to true.
  844. */
  845. #define wait_event_interruptible_lock_irq(wq, condition, lock) \
  846. ({ \
  847. int __ret = 0; \
  848. if (!(condition)) \
  849. __ret = __wait_event_interruptible_lock_irq(wq, \
  850. condition, lock,); \
  851. __ret; \
  852. })
  853. #define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
  854. lock, timeout) \
  855. ___wait_event(wq, ___wait_cond_timeout(condition), \
  856. TASK_INTERRUPTIBLE, 0, timeout, \
  857. spin_unlock_irq(&lock); \
  858. __ret = schedule_timeout(__ret); \
  859. spin_lock_irq(&lock));
  860. /**
  861. * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
  862. * true or a timeout elapses. The condition is checked under
  863. * the lock. This is expected to be called with the lock taken.
  864. * @wq: the waitqueue to wait on
  865. * @condition: a C expression for the event to wait for
  866. * @lock: a locked spinlock_t, which will be released before schedule()
  867. * and reacquired afterwards.
  868. * @timeout: timeout, in jiffies
  869. *
  870. * The process is put to sleep (TASK_INTERRUPTIBLE) until the
  871. * @condition evaluates to true or signal is received. The @condition is
  872. * checked each time the waitqueue @wq is woken up.
  873. *
  874. * wake_up() has to be called after changing any variable that could
  875. * change the result of the wait condition.
  876. *
  877. * This is supposed to be called while holding the lock. The lock is
  878. * dropped before going to sleep and is reacquired afterwards.
  879. *
  880. * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
  881. * was interrupted by a signal, and the remaining jiffies otherwise
  882. * if the condition evaluated to true before the timeout elapsed.
  883. */
  884. #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
  885. timeout) \
  886. ({ \
  887. long __ret = timeout; \
  888. if (!___wait_cond_timeout(condition)) \
  889. __ret = __wait_event_interruptible_lock_irq_timeout( \
  890. wq, condition, lock, timeout); \
  891. __ret; \
  892. })
  893. /*
  894. * Waitqueues which are removed from the waitqueue_head at wakeup time
  895. */
  896. void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
  897. void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
  898. long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
  899. void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
  900. long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
  901. int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  902. int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  903. int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  904. #define DEFINE_WAIT_FUNC(name, function) \
  905. wait_queue_t name = { \
  906. .private = current, \
  907. .func = function, \
  908. .task_list = LIST_HEAD_INIT((name).task_list), \
  909. }
  910. #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
  911. #define DEFINE_WAIT_BIT(name, word, bit) \
  912. struct wait_bit_queue name = { \
  913. .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
  914. .wait = { \
  915. .private = current, \
  916. .func = wake_bit_function, \
  917. .task_list = \
  918. LIST_HEAD_INIT((name).wait.task_list), \
  919. }, \
  920. }
  921. #define init_wait(wait) \
  922. do { \
  923. (wait)->private = current; \
  924. (wait)->func = autoremove_wake_function; \
  925. INIT_LIST_HEAD(&(wait)->task_list); \
  926. (wait)->flags = 0; \
  927. } while (0)
  928. extern int bit_wait(struct wait_bit_key *, int);
  929. extern int bit_wait_io(struct wait_bit_key *, int);
  930. extern int bit_wait_timeout(struct wait_bit_key *, int);
  931. extern int bit_wait_io_timeout(struct wait_bit_key *, int);
  932. /**
  933. * wait_on_bit - wait for a bit to be cleared
  934. * @word: the word being waited on, a kernel virtual address
  935. * @bit: the bit of the word being waited on
  936. * @mode: the task state to sleep in
  937. *
  938. * There is a standard hashed waitqueue table for generic use. This
  939. * is the part of the hashtable's accessor API that waits on a bit.
  940. * For instance, if one were to have waiters on a bitflag, one would
  941. * call wait_on_bit() in threads waiting for the bit to clear.
  942. * One uses wait_on_bit() where one is waiting for the bit to clear,
  943. * but has no intention of setting it.
  944. * Returned value will be zero if the bit was cleared, or non-zero
  945. * if the process received a signal and the mode permitted wakeup
  946. * on that signal.
  947. */
  948. static inline int
  949. wait_on_bit(unsigned long *word, int bit, unsigned mode)
  950. {
  951. might_sleep();
  952. if (!test_bit(bit, word))
  953. return 0;
  954. return out_of_line_wait_on_bit(word, bit,
  955. bit_wait,
  956. mode);
  957. }
  958. /**
  959. * wait_on_bit_io - wait for a bit to be cleared
  960. * @word: the word being waited on, a kernel virtual address
  961. * @bit: the bit of the word being waited on
  962. * @mode: the task state to sleep in
  963. *
  964. * Use the standard hashed waitqueue table to wait for a bit
  965. * to be cleared. This is similar to wait_on_bit(), but calls
  966. * io_schedule() instead of schedule() for the actual waiting.
  967. *
  968. * Returned value will be zero if the bit was cleared, or non-zero
  969. * if the process received a signal and the mode permitted wakeup
  970. * on that signal.
  971. */
  972. static inline int
  973. wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
  974. {
  975. might_sleep();
  976. if (!test_bit(bit, word))
  977. return 0;
  978. return out_of_line_wait_on_bit(word, bit,
  979. bit_wait_io,
  980. mode);
  981. }
  982. /**
  983. * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
  984. * @word: the word being waited on, a kernel virtual address
  985. * @bit: the bit of the word being waited on
  986. * @mode: the task state to sleep in
  987. * @timeout: timeout, in jiffies
  988. *
  989. * Use the standard hashed waitqueue table to wait for a bit
  990. * to be cleared. This is similar to wait_on_bit(), except also takes a
  991. * timeout parameter.
  992. *
  993. * Returned value will be zero if the bit was cleared before the
  994. * @timeout elapsed, or non-zero if the @timeout elapsed or process
  995. * received a signal and the mode permitted wakeup on that signal.
  996. */
  997. static inline int
  998. wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
  999. unsigned long timeout)
  1000. {
  1001. might_sleep();
  1002. if (!test_bit(bit, word))
  1003. return 0;
  1004. return out_of_line_wait_on_bit_timeout(word, bit,
  1005. bit_wait_timeout,
  1006. mode, timeout);
  1007. }
  1008. /**
  1009. * wait_on_bit_action - wait for a bit to be cleared
  1010. * @word: the word being waited on, a kernel virtual address
  1011. * @bit: the bit of the word being waited on
  1012. * @action: the function used to sleep, which may take special actions
  1013. * @mode: the task state to sleep in
  1014. *
  1015. * Use the standard hashed waitqueue table to wait for a bit
  1016. * to be cleared, and allow the waiting action to be specified.
  1017. * This is like wait_on_bit() but allows fine control of how the waiting
  1018. * is done.
  1019. *
  1020. * Returned value will be zero if the bit was cleared, or non-zero
  1021. * if the process received a signal and the mode permitted wakeup
  1022. * on that signal.
  1023. */
  1024. static inline int
  1025. wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
  1026. unsigned mode)
  1027. {
  1028. might_sleep();
  1029. if (!test_bit(bit, word))
  1030. return 0;
  1031. return out_of_line_wait_on_bit(word, bit, action, mode);
  1032. }
  1033. /**
  1034. * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
  1035. * @word: the word being waited on, a kernel virtual address
  1036. * @bit: the bit of the word being waited on
  1037. * @mode: the task state to sleep in
  1038. *
  1039. * There is a standard hashed waitqueue table for generic use. This
  1040. * is the part of the hashtable's accessor API that waits on a bit
  1041. * when one intends to set it, for instance, trying to lock bitflags.
  1042. * For instance, if one were to have waiters trying to set bitflag
  1043. * and waiting for it to clear before setting it, one would call
  1044. * wait_on_bit() in threads waiting to be able to set the bit.
  1045. * One uses wait_on_bit_lock() where one is waiting for the bit to
  1046. * clear with the intention of setting it, and when done, clearing it.
  1047. *
  1048. * Returns zero if the bit was (eventually) found to be clear and was
  1049. * set. Returns non-zero if a signal was delivered to the process and
  1050. * the @mode allows that signal to wake the process.
  1051. */
  1052. static inline int
  1053. wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
  1054. {
  1055. might_sleep();
  1056. if (!test_and_set_bit(bit, word))
  1057. return 0;
  1058. return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
  1059. }
  1060. /**
  1061. * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
  1062. * @word: the word being waited on, a kernel virtual address
  1063. * @bit: the bit of the word being waited on
  1064. * @mode: the task state to sleep in
  1065. *
  1066. * Use the standard hashed waitqueue table to wait for a bit
  1067. * to be cleared and then to atomically set it. This is similar
  1068. * to wait_on_bit(), but calls io_schedule() instead of schedule()
  1069. * for the actual waiting.
  1070. *
  1071. * Returns zero if the bit was (eventually) found to be clear and was
  1072. * set. Returns non-zero if a signal was delivered to the process and
  1073. * the @mode allows that signal to wake the process.
  1074. */
  1075. static inline int
  1076. wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
  1077. {
  1078. might_sleep();
  1079. if (!test_and_set_bit(bit, word))
  1080. return 0;
  1081. return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
  1082. }
  1083. /**
  1084. * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
  1085. * @word: the word being waited on, a kernel virtual address
  1086. * @bit: the bit of the word being waited on
  1087. * @action: the function used to sleep, which may take special actions
  1088. * @mode: the task state to sleep in
  1089. *
  1090. * Use the standard hashed waitqueue table to wait for a bit
  1091. * to be cleared and then to set it, and allow the waiting action
  1092. * to be specified.
  1093. * This is like wait_on_bit() but allows fine control of how the waiting
  1094. * is done.
  1095. *
  1096. * Returns zero if the bit was (eventually) found to be clear and was
  1097. * set. Returns non-zero if a signal was delivered to the process and
  1098. * the @mode allows that signal to wake the process.
  1099. */
  1100. static inline int
  1101. wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
  1102. unsigned mode)
  1103. {
  1104. might_sleep();
  1105. if (!test_and_set_bit(bit, word))
  1106. return 0;
  1107. return out_of_line_wait_on_bit_lock(word, bit, action, mode);
  1108. }
  1109. /**
  1110. * wait_on_atomic_t - Wait for an atomic_t to become 0
  1111. * @val: The atomic value being waited on, a kernel virtual address
  1112. * @action: the function used to sleep, which may take special actions
  1113. * @mode: the task state to sleep in
  1114. *
  1115. * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
  1116. * the purpose of getting a waitqueue, but we set the key to a bit number
  1117. * outside of the target 'word'.
  1118. */
  1119. static inline
  1120. int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
  1121. {
  1122. might_sleep();
  1123. if (atomic_read(val) == 0)
  1124. return 0;
  1125. return out_of_line_wait_on_atomic_t(val, action, mode);
  1126. }
  1127. #endif /* _LINUX_WAIT_H */