rculist_nulls.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. #ifndef _LINUX_RCULIST_NULLS_H
  2. #define _LINUX_RCULIST_NULLS_H
  3. #ifdef __KERNEL__
  4. /*
  5. * RCU-protected list version
  6. */
  7. #include <linux/list_nulls.h>
  8. #include <linux/rcupdate.h>
  9. /**
  10. * hlist_nulls_del_init_rcu - deletes entry from hash list with re-initialization
  11. * @n: the element to delete from the hash list.
  12. *
  13. * Note: hlist_nulls_unhashed() on the node return true after this. It is
  14. * useful for RCU based read lockfree traversal if the writer side
  15. * must know if the list entry is still hashed or already unhashed.
  16. *
  17. * In particular, it means that we can not poison the forward pointers
  18. * that may still be used for walking the hash list and we can only
  19. * zero the pprev pointer so list_unhashed() will return true after
  20. * this.
  21. *
  22. * The caller must take whatever precautions are necessary (such as
  23. * holding appropriate locks) to avoid racing with another
  24. * list-mutation primitive, such as hlist_nulls_add_head_rcu() or
  25. * hlist_nulls_del_rcu(), running on this same list. However, it is
  26. * perfectly legal to run concurrently with the _rcu list-traversal
  27. * primitives, such as hlist_nulls_for_each_entry_rcu().
  28. */
  29. static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
  30. {
  31. if (!hlist_nulls_unhashed(n)) {
  32. __hlist_nulls_del(n);
  33. n->pprev = NULL;
  34. }
  35. }
  36. #define hlist_nulls_first_rcu(head) \
  37. (*((struct hlist_nulls_node __rcu __force **)&(head)->first))
  38. #define hlist_nulls_next_rcu(node) \
  39. (*((struct hlist_nulls_node __rcu __force **)&(node)->next))
  40. /**
  41. * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization
  42. * @n: the element to delete from the hash list.
  43. *
  44. * Note: hlist_nulls_unhashed() on entry does not return true after this,
  45. * the entry is in an undefined state. It is useful for RCU based
  46. * lockfree traversal.
  47. *
  48. * In particular, it means that we can not poison the forward
  49. * pointers that may still be used for walking the hash list.
  50. *
  51. * The caller must take whatever precautions are necessary
  52. * (such as holding appropriate locks) to avoid racing
  53. * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
  54. * or hlist_nulls_del_rcu(), running on this same list.
  55. * However, it is perfectly legal to run concurrently with
  56. * the _rcu list-traversal primitives, such as
  57. * hlist_nulls_for_each_entry().
  58. */
  59. static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n)
  60. {
  61. __hlist_nulls_del(n);
  62. n->pprev = LIST_POISON2;
  63. }
  64. /**
  65. * hlist_nulls_add_head_rcu
  66. * @n: the element to add to the hash list.
  67. * @h: the list to add to.
  68. *
  69. * Description:
  70. * Adds the specified element to the specified hlist_nulls,
  71. * while permitting racing traversals.
  72. *
  73. * The caller must take whatever precautions are necessary
  74. * (such as holding appropriate locks) to avoid racing
  75. * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
  76. * or hlist_nulls_del_rcu(), running on this same list.
  77. * However, it is perfectly legal to run concurrently with
  78. * the _rcu list-traversal primitives, such as
  79. * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
  80. * problems on Alpha CPUs. Regardless of the type of CPU, the
  81. * list-traversal primitive must be guarded by rcu_read_lock().
  82. */
  83. static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
  84. struct hlist_nulls_head *h)
  85. {
  86. struct hlist_nulls_node *first = h->first;
  87. n->next = first;
  88. n->pprev = &h->first;
  89. rcu_assign_pointer(hlist_nulls_first_rcu(h), n);
  90. if (!is_a_nulls(first))
  91. first->pprev = &n->next;
  92. }
  93. /**
  94. * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
  95. * @tpos: the type * to use as a loop cursor.
  96. * @pos: the &struct hlist_nulls_node to use as a loop cursor.
  97. * @head: the head for your list.
  98. * @member: the name of the hlist_nulls_node within the struct.
  99. *
  100. * The barrier() is needed to make sure compiler doesn't cache first element [1],
  101. * as this loop can be restarted [2]
  102. * [1] Documentation/atomic_ops.txt around line 114
  103. * [2] Documentation/RCU/rculist_nulls.txt around line 146
  104. */
  105. #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
  106. for (({barrier();}), \
  107. pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
  108. (!is_a_nulls(pos)) && \
  109. ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
  110. pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
  111. #endif
  112. #endif