lockref.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. #include <linux/export.h>
  2. #include <linux/lockref.h>
  3. #include <linux/mutex.h>
  4. #if USE_CMPXCHG_LOCKREF
  5. /*
  6. * Allow weakly-ordered memory architectures to provide barrier-less
  7. * cmpxchg semantics for lockref updates.
  8. */
  9. #ifndef cmpxchg64_relaxed
  10. # define cmpxchg64_relaxed cmpxchg64
  11. #endif
  12. /*
  13. * Note that the "cmpxchg()" reloads the "old" value for the
  14. * failure case.
  15. */
  16. #define CMPXCHG_LOOP(CODE, SUCCESS) do { \
  17. struct lockref old; \
  18. BUILD_BUG_ON(sizeof(old) != 8); \
  19. old.lock_count = ACCESS_ONCE(lockref->lock_count); \
  20. while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
  21. struct lockref new = old, prev = old; \
  22. CODE \
  23. old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
  24. old.lock_count, \
  25. new.lock_count); \
  26. if (likely(old.lock_count == prev.lock_count)) { \
  27. SUCCESS; \
  28. } \
  29. arch_mutex_cpu_relax(); \
  30. } \
  31. } while (0)
  32. #else
  33. #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
  34. #endif
  35. /**
  36. * lockref_get - Increments reference count unconditionally
  37. * @lockref: pointer to lockref structure
  38. *
  39. * This operation is only valid if you already hold a reference
  40. * to the object, so you know the count cannot be zero.
  41. */
  42. void lockref_get(struct lockref *lockref)
  43. {
  44. CMPXCHG_LOOP(
  45. new.count++;
  46. ,
  47. return;
  48. );
  49. spin_lock(&lockref->lock);
  50. lockref->count++;
  51. spin_unlock(&lockref->lock);
  52. }
  53. EXPORT_SYMBOL(lockref_get);
  54. /**
  55. * lockref_get_not_zero - Increments count unless the count is 0
  56. * @lockref: pointer to lockref structure
  57. * Return: 1 if count updated successfully or 0 if count was zero
  58. */
  59. int lockref_get_not_zero(struct lockref *lockref)
  60. {
  61. int retval;
  62. CMPXCHG_LOOP(
  63. new.count++;
  64. if (!old.count)
  65. return 0;
  66. ,
  67. return 1;
  68. );
  69. spin_lock(&lockref->lock);
  70. retval = 0;
  71. if (lockref->count) {
  72. lockref->count++;
  73. retval = 1;
  74. }
  75. spin_unlock(&lockref->lock);
  76. return retval;
  77. }
  78. EXPORT_SYMBOL(lockref_get_not_zero);
  79. /**
  80. * lockref_get_or_lock - Increments count unless the count is 0
  81. * @lockref: pointer to lockref structure
  82. * Return: 1 if count updated successfully or 0 if count was zero
  83. * and we got the lock instead.
  84. */
  85. int lockref_get_or_lock(struct lockref *lockref)
  86. {
  87. CMPXCHG_LOOP(
  88. new.count++;
  89. if (!old.count)
  90. break;
  91. ,
  92. return 1;
  93. );
  94. spin_lock(&lockref->lock);
  95. if (!lockref->count)
  96. return 0;
  97. lockref->count++;
  98. spin_unlock(&lockref->lock);
  99. return 1;
  100. }
  101. EXPORT_SYMBOL(lockref_get_or_lock);
  102. /**
  103. * lockref_put_or_lock - decrements count unless count <= 1 before decrement
  104. * @lockref: pointer to lockref structure
  105. * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
  106. */
  107. int lockref_put_or_lock(struct lockref *lockref)
  108. {
  109. CMPXCHG_LOOP(
  110. new.count--;
  111. if (old.count <= 1)
  112. break;
  113. ,
  114. return 1;
  115. );
  116. spin_lock(&lockref->lock);
  117. if (lockref->count <= 1)
  118. return 0;
  119. lockref->count--;
  120. spin_unlock(&lockref->lock);
  121. return 1;
  122. }
  123. EXPORT_SYMBOL(lockref_put_or_lock);
  124. /**
  125. * lockref_mark_dead - mark lockref dead
  126. * @lockref: pointer to lockref structure
  127. */
  128. void lockref_mark_dead(struct lockref *lockref)
  129. {
  130. assert_spin_locked(&lockref->lock);
  131. lockref->count = -128;
  132. }
  133. EXPORT_SYMBOL(lockref_mark_dead);
  134. /**
  135. * lockref_get_not_dead - Increments count unless the ref is dead
  136. * @lockref: pointer to lockref structure
  137. * Return: 1 if count updated successfully or 0 if lockref was dead
  138. */
  139. int lockref_get_not_dead(struct lockref *lockref)
  140. {
  141. int retval;
  142. CMPXCHG_LOOP(
  143. new.count++;
  144. if ((int)old.count < 0)
  145. return 0;
  146. ,
  147. return 1;
  148. );
  149. spin_lock(&lockref->lock);
  150. retval = 0;
  151. if ((int) lockref->count >= 0) {
  152. lockref->count++;
  153. retval = 1;
  154. }
  155. spin_unlock(&lockref->lock);
  156. return retval;
  157. }
  158. EXPORT_SYMBOL(lockref_get_not_dead);