lockref.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. #include <linux/export.h>
  2. #include <linux/lockref.h>
  3. #if USE_CMPXCHG_LOCKREF
  4. /*
  5. * Allow weakly-ordered memory architectures to provide barrier-less
  6. * cmpxchg semantics for lockref updates.
  7. */
  8. #ifndef cmpxchg64_relaxed
  9. # define cmpxchg64_relaxed cmpxchg64
  10. #endif
  11. /*
  12. * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP.
  13. * This is useful for architectures with an expensive cpu_relax().
  14. */
  15. #ifndef arch_mutex_cpu_relax
  16. # define arch_mutex_cpu_relax() cpu_relax()
  17. #endif
  18. /*
  19. * Note that the "cmpxchg()" reloads the "old" value for the
  20. * failure case.
  21. */
  22. #define CMPXCHG_LOOP(CODE, SUCCESS) do { \
  23. struct lockref old; \
  24. BUILD_BUG_ON(sizeof(old) != 8); \
  25. old.lock_count = ACCESS_ONCE(lockref->lock_count); \
  26. while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
  27. struct lockref new = old, prev = old; \
  28. CODE \
  29. old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
  30. old.lock_count, \
  31. new.lock_count); \
  32. if (likely(old.lock_count == prev.lock_count)) { \
  33. SUCCESS; \
  34. } \
  35. arch_mutex_cpu_relax(); \
  36. } \
  37. } while (0)
  38. #else
  39. #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
  40. #endif
  41. /**
  42. * lockref_get - Increments reference count unconditionally
  43. * @lockref: pointer to lockref structure
  44. *
  45. * This operation is only valid if you already hold a reference
  46. * to the object, so you know the count cannot be zero.
  47. */
  48. void lockref_get(struct lockref *lockref)
  49. {
  50. CMPXCHG_LOOP(
  51. new.count++;
  52. ,
  53. return;
  54. );
  55. spin_lock(&lockref->lock);
  56. lockref->count++;
  57. spin_unlock(&lockref->lock);
  58. }
  59. EXPORT_SYMBOL(lockref_get);
  60. /**
  61. * lockref_get_not_zero - Increments count unless the count is 0
  62. * @lockref: pointer to lockref structure
  63. * Return: 1 if count updated successfully or 0 if count was zero
  64. */
  65. int lockref_get_not_zero(struct lockref *lockref)
  66. {
  67. int retval;
  68. CMPXCHG_LOOP(
  69. new.count++;
  70. if (!old.count)
  71. return 0;
  72. ,
  73. return 1;
  74. );
  75. spin_lock(&lockref->lock);
  76. retval = 0;
  77. if (lockref->count) {
  78. lockref->count++;
  79. retval = 1;
  80. }
  81. spin_unlock(&lockref->lock);
  82. return retval;
  83. }
  84. EXPORT_SYMBOL(lockref_get_not_zero);
  85. /**
  86. * lockref_get_or_lock - Increments count unless the count is 0
  87. * @lockref: pointer to lockref structure
  88. * Return: 1 if count updated successfully or 0 if count was zero
  89. * and we got the lock instead.
  90. */
  91. int lockref_get_or_lock(struct lockref *lockref)
  92. {
  93. CMPXCHG_LOOP(
  94. new.count++;
  95. if (!old.count)
  96. break;
  97. ,
  98. return 1;
  99. );
  100. spin_lock(&lockref->lock);
  101. if (!lockref->count)
  102. return 0;
  103. lockref->count++;
  104. spin_unlock(&lockref->lock);
  105. return 1;
  106. }
  107. EXPORT_SYMBOL(lockref_get_or_lock);
  108. /**
  109. * lockref_put_or_lock - decrements count unless count <= 1 before decrement
  110. * @lockref: pointer to lockref structure
  111. * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
  112. */
  113. int lockref_put_or_lock(struct lockref *lockref)
  114. {
  115. CMPXCHG_LOOP(
  116. new.count--;
  117. if (old.count <= 1)
  118. break;
  119. ,
  120. return 1;
  121. );
  122. spin_lock(&lockref->lock);
  123. if (lockref->count <= 1)
  124. return 0;
  125. lockref->count--;
  126. spin_unlock(&lockref->lock);
  127. return 1;
  128. }
  129. EXPORT_SYMBOL(lockref_put_or_lock);
  130. /**
  131. * lockref_mark_dead - mark lockref dead
  132. * @lockref: pointer to lockref structure
  133. */
  134. void lockref_mark_dead(struct lockref *lockref)
  135. {
  136. assert_spin_locked(&lockref->lock);
  137. lockref->count = -128;
  138. }
  139. EXPORT_SYMBOL(lockref_mark_dead);
  140. /**
  141. * lockref_get_not_dead - Increments count unless the ref is dead
  142. * @lockref: pointer to lockref structure
  143. * Return: 1 if count updated successfully or 0 if lockref was dead
  144. */
  145. int lockref_get_not_dead(struct lockref *lockref)
  146. {
  147. int retval;
  148. CMPXCHG_LOOP(
  149. new.count++;
  150. if ((int)old.count < 0)
  151. return 0;
  152. ,
  153. return 1;
  154. );
  155. spin_lock(&lockref->lock);
  156. retval = 0;
  157. if ((int) lockref->count >= 0) {
  158. lockref->count++;
  159. retval = 1;
  160. }
  161. spin_unlock(&lockref->lock);
  162. return retval;
  163. }
  164. EXPORT_SYMBOL(lockref_get_not_dead);