rculist_bl.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. #ifndef _LINUX_RCULIST_BL_H
  2. #define _LINUX_RCULIST_BL_H
  3. /*
  4. * RCU-protected bl list version. See include/linux/list_bl.h.
  5. */
  6. #include <linux/list_bl.h>
  7. #include <linux/rcupdate.h>
  8. static inline void hlist_bl_set_first_rcu(struct hlist_bl_head *h,
  9. struct hlist_bl_node *n)
  10. {
  11. LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
  12. LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) !=
  13. LIST_BL_LOCKMASK);
  14. rcu_assign_pointer(h->first,
  15. (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK));
  16. }
  17. static inline struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h)
  18. {
  19. return (struct hlist_bl_node *)
  20. ((unsigned long)rcu_dereference_check(h->first, hlist_bl_is_locked(h)) & ~LIST_BL_LOCKMASK);
  21. }
  22. /**
  23. * hlist_bl_del_init_rcu - deletes entry from hash list with re-initialization
  24. * @n: the element to delete from the hash list.
  25. *
  26. * Note: hlist_bl_unhashed() on the node returns true after this. It is
  27. * useful for RCU based read lockfree traversal if the writer side
  28. * must know if the list entry is still hashed or already unhashed.
  29. *
  30. * In particular, it means that we can not poison the forward pointers
  31. * that may still be used for walking the hash list and we can only
  32. * zero the pprev pointer so list_unhashed() will return true after
  33. * this.
  34. *
  35. * The caller must take whatever precautions are necessary (such as
  36. * holding appropriate locks) to avoid racing with another
  37. * list-mutation primitive, such as hlist_bl_add_head_rcu() or
  38. * hlist_bl_del_rcu(), running on this same list. However, it is
  39. * perfectly legal to run concurrently with the _rcu list-traversal
  40. * primitives, such as hlist_bl_for_each_entry_rcu().
  41. */
  42. static inline void hlist_bl_del_init_rcu(struct hlist_bl_node *n)
  43. {
  44. if (!hlist_bl_unhashed(n)) {
  45. __hlist_bl_del(n);
  46. n->pprev = NULL;
  47. }
  48. }
  49. /**
  50. * hlist_bl_del_rcu - deletes entry from hash list without re-initialization
  51. * @n: the element to delete from the hash list.
  52. *
  53. * Note: hlist_bl_unhashed() on entry does not return true after this,
  54. * the entry is in an undefined state. It is useful for RCU based
  55. * lockfree traversal.
  56. *
  57. * In particular, it means that we can not poison the forward
  58. * pointers that may still be used for walking the hash list.
  59. *
  60. * The caller must take whatever precautions are necessary
  61. * (such as holding appropriate locks) to avoid racing
  62. * with another list-mutation primitive, such as hlist_bl_add_head_rcu()
  63. * or hlist_bl_del_rcu(), running on this same list.
  64. * However, it is perfectly legal to run concurrently with
  65. * the _rcu list-traversal primitives, such as
  66. * hlist_bl_for_each_entry().
  67. */
  68. static inline void hlist_bl_del_rcu(struct hlist_bl_node *n)
  69. {
  70. __hlist_bl_del(n);
  71. n->pprev = LIST_POISON2;
  72. }
  73. /**
  74. * hlist_bl_add_head_rcu
  75. * @n: the element to add to the hash list.
  76. * @h: the list to add to.
  77. *
  78. * Description:
  79. * Adds the specified element to the specified hlist_bl,
  80. * while permitting racing traversals.
  81. *
  82. * The caller must take whatever precautions are necessary
  83. * (such as holding appropriate locks) to avoid racing
  84. * with another list-mutation primitive, such as hlist_bl_add_head_rcu()
  85. * or hlist_bl_del_rcu(), running on this same list.
  86. * However, it is perfectly legal to run concurrently with
  87. * the _rcu list-traversal primitives, such as
  88. * hlist_bl_for_each_entry_rcu(), used to prevent memory-consistency
  89. * problems on Alpha CPUs. Regardless of the type of CPU, the
  90. * list-traversal primitive must be guarded by rcu_read_lock().
  91. */
  92. static inline void hlist_bl_add_head_rcu(struct hlist_bl_node *n,
  93. struct hlist_bl_head *h)
  94. {
  95. struct hlist_bl_node *first;
  96. /* don't need hlist_bl_first_rcu because we're under lock */
  97. first = hlist_bl_first(h);
  98. n->next = first;
  99. if (first)
  100. first->pprev = &n->next;
  101. n->pprev = &h->first;
  102. /* need _rcu because we can have concurrent lock free readers */
  103. hlist_bl_set_first_rcu(h, n);
  104. }
  105. /**
  106. * hlist_bl_for_each_entry_rcu - iterate over rcu list of given type
  107. * @tpos: the type * to use as a loop cursor.
  108. * @pos: the &struct hlist_bl_node to use as a loop cursor.
  109. * @head: the head for your list.
  110. * @member: the name of the hlist_bl_node within the struct.
  111. *
  112. */
  113. #define hlist_bl_for_each_entry_rcu(tpos, pos, head, member) \
  114. for (pos = hlist_bl_first_rcu(head); \
  115. pos && \
  116. ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1; }); \
  117. pos = rcu_dereference_raw(pos->next))
  118. #endif