rculist_bl.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. #ifndef _LINUX_RCULIST_BL_H
  2. #define _LINUX_RCULIST_BL_H
  3. /*
  4. * RCU-protected bl list version. See include/linux/list_bl.h.
  5. */
  6. #include <linux/list_bl.h>
  7. #include <linux/rcupdate.h>
  8. static inline void hlist_bl_set_first_rcu(struct hlist_bl_head *h,
  9. struct hlist_bl_node *n)
  10. {
  11. LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
  12. LIST_BL_BUG_ON(!((unsigned long)h->first & LIST_BL_LOCKMASK));
  13. rcu_assign_pointer(h->first,
  14. (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK));
  15. }
  16. static inline struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h)
  17. {
  18. return (struct hlist_bl_node *)
  19. ((unsigned long)rcu_dereference(h->first) & ~LIST_BL_LOCKMASK);
  20. }
  21. /**
  22. * hlist_bl_del_init_rcu - deletes entry from hash list with re-initialization
  23. * @n: the element to delete from the hash list.
  24. *
  25. * Note: hlist_bl_unhashed() on the node returns true after this. It is
  26. * useful for RCU based read lockfree traversal if the writer side
  27. * must know if the list entry is still hashed or already unhashed.
  28. *
  29. * In particular, it means that we can not poison the forward pointers
  30. * that may still be used for walking the hash list and we can only
  31. * zero the pprev pointer so list_unhashed() will return true after
  32. * this.
  33. *
  34. * The caller must take whatever precautions are necessary (such as
  35. * holding appropriate locks) to avoid racing with another
  36. * list-mutation primitive, such as hlist_bl_add_head_rcu() or
  37. * hlist_bl_del_rcu(), running on this same list. However, it is
  38. * perfectly legal to run concurrently with the _rcu list-traversal
  39. * primitives, such as hlist_bl_for_each_entry_rcu().
  40. */
  41. static inline void hlist_bl_del_init_rcu(struct hlist_bl_node *n)
  42. {
  43. if (!hlist_bl_unhashed(n)) {
  44. __hlist_bl_del(n);
  45. n->pprev = NULL;
  46. }
  47. }
  48. /**
  49. * hlist_bl_del_rcu - deletes entry from hash list without re-initialization
  50. * @n: the element to delete from the hash list.
  51. *
  52. * Note: hlist_bl_unhashed() on entry does not return true after this,
  53. * the entry is in an undefined state. It is useful for RCU based
  54. * lockfree traversal.
  55. *
  56. * In particular, it means that we can not poison the forward
  57. * pointers that may still be used for walking the hash list.
  58. *
  59. * The caller must take whatever precautions are necessary
  60. * (such as holding appropriate locks) to avoid racing
  61. * with another list-mutation primitive, such as hlist_bl_add_head_rcu()
  62. * or hlist_bl_del_rcu(), running on this same list.
  63. * However, it is perfectly legal to run concurrently with
  64. * the _rcu list-traversal primitives, such as
  65. * hlist_bl_for_each_entry().
  66. */
  67. static inline void hlist_bl_del_rcu(struct hlist_bl_node *n)
  68. {
  69. __hlist_bl_del(n);
  70. n->pprev = LIST_POISON2;
  71. }
  72. /**
  73. * hlist_bl_add_head_rcu
  74. * @n: the element to add to the hash list.
  75. * @h: the list to add to.
  76. *
  77. * Description:
  78. * Adds the specified element to the specified hlist_bl,
  79. * while permitting racing traversals.
  80. *
  81. * The caller must take whatever precautions are necessary
  82. * (such as holding appropriate locks) to avoid racing
  83. * with another list-mutation primitive, such as hlist_bl_add_head_rcu()
  84. * or hlist_bl_del_rcu(), running on this same list.
  85. * However, it is perfectly legal to run concurrently with
  86. * the _rcu list-traversal primitives, such as
  87. * hlist_bl_for_each_entry_rcu(), used to prevent memory-consistency
  88. * problems on Alpha CPUs. Regardless of the type of CPU, the
  89. * list-traversal primitive must be guarded by rcu_read_lock().
  90. */
  91. static inline void hlist_bl_add_head_rcu(struct hlist_bl_node *n,
  92. struct hlist_bl_head *h)
  93. {
  94. struct hlist_bl_node *first;
  95. /* don't need hlist_bl_first_rcu because we're under lock */
  96. first = hlist_bl_first(h);
  97. n->next = first;
  98. if (first)
  99. first->pprev = &n->next;
  100. n->pprev = &h->first;
  101. /* need _rcu because we can have concurrent lock free readers */
  102. hlist_bl_set_first_rcu(h, n);
  103. }
  104. /**
  105. * hlist_bl_for_each_entry_rcu - iterate over rcu list of given type
  106. * @tpos: the type * to use as a loop cursor.
  107. * @pos: the &struct hlist_bl_node to use as a loop cursor.
  108. * @head: the head for your list.
  109. * @member: the name of the hlist_bl_node within the struct.
  110. *
  111. */
  112. #define hlist_bl_for_each_entry_rcu(tpos, pos, head, member) \
  113. for (pos = hlist_bl_first_rcu(head); \
  114. pos && \
  115. ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1; }); \
  116. pos = rcu_dereference_raw(pos->next))
  117. #endif