swait.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. #include <linux/sched.h>
  2. #include <linux/swait.h>
  3. void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
  4. struct lock_class_key *key)
  5. {
  6. raw_spin_lock_init(&q->lock);
  7. lockdep_set_class_and_name(&q->lock, key, name);
  8. INIT_LIST_HEAD(&q->task_list);
  9. }
  10. EXPORT_SYMBOL(__init_swait_queue_head);
  11. /*
  12. * The thing about the wake_up_state() return value; I think we can ignore it.
  13. *
  14. * If for some reason it would return 0, that means the previously waiting
  15. * task is already running, so it will observe condition true (or has already).
  16. */
  17. void swake_up_locked(struct swait_queue_head *q)
  18. {
  19. struct swait_queue *curr;
  20. if (list_empty(&q->task_list))
  21. return;
  22. curr = list_first_entry(&q->task_list, typeof(*curr), task_list);
  23. wake_up_process(curr->task);
  24. list_del_init(&curr->task_list);
  25. }
  26. EXPORT_SYMBOL(swake_up_locked);
  27. void swake_up(struct swait_queue_head *q)
  28. {
  29. unsigned long flags;
  30. if (!swait_active(q))
  31. return;
  32. raw_spin_lock_irqsave(&q->lock, flags);
  33. swake_up_locked(q);
  34. raw_spin_unlock_irqrestore(&q->lock, flags);
  35. }
  36. EXPORT_SYMBOL(swake_up);
  37. /*
  38. * Does not allow usage from IRQ disabled, since we must be able to
  39. * release IRQs to guarantee bounded hold time.
  40. */
  41. void swake_up_all(struct swait_queue_head *q)
  42. {
  43. struct swait_queue *curr;
  44. LIST_HEAD(tmp);
  45. if (!swait_active(q))
  46. return;
  47. raw_spin_lock_irq(&q->lock);
  48. list_splice_init(&q->task_list, &tmp);
  49. while (!list_empty(&tmp)) {
  50. curr = list_first_entry(&tmp, typeof(*curr), task_list);
  51. wake_up_state(curr->task, TASK_NORMAL);
  52. list_del_init(&curr->task_list);
  53. if (list_empty(&tmp))
  54. break;
  55. raw_spin_unlock_irq(&q->lock);
  56. raw_spin_lock_irq(&q->lock);
  57. }
  58. raw_spin_unlock_irq(&q->lock);
  59. }
  60. EXPORT_SYMBOL(swake_up_all);
  61. void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
  62. {
  63. wait->task = current;
  64. if (list_empty(&wait->task_list))
  65. list_add(&wait->task_list, &q->task_list);
  66. }
  67. void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state)
  68. {
  69. unsigned long flags;
  70. raw_spin_lock_irqsave(&q->lock, flags);
  71. __prepare_to_swait(q, wait);
  72. set_current_state(state);
  73. raw_spin_unlock_irqrestore(&q->lock, flags);
  74. }
  75. EXPORT_SYMBOL(prepare_to_swait);
  76. long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
  77. {
  78. if (signal_pending_state(state, current))
  79. return -ERESTARTSYS;
  80. prepare_to_swait(q, wait, state);
  81. return 0;
  82. }
  83. EXPORT_SYMBOL(prepare_to_swait_event);
  84. void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
  85. {
  86. __set_current_state(TASK_RUNNING);
  87. if (!list_empty(&wait->task_list))
  88. list_del_init(&wait->task_list);
  89. }
  90. void finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
  91. {
  92. unsigned long flags;
  93. __set_current_state(TASK_RUNNING);
  94. if (!list_empty_careful(&wait->task_list)) {
  95. raw_spin_lock_irqsave(&q->lock, flags);
  96. list_del_init(&wait->task_list);
  97. raw_spin_unlock_irqrestore(&q->lock, flags);
  98. }
  99. }
  100. EXPORT_SYMBOL(finish_swait);