lockref.c 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. #include <linux/export.h>
  2. #include <linux/lockref.h>
  3. #ifdef CONFIG_CMPXCHG_LOCKREF
  4. /*
  5. * Allow weakly-ordered memory architectures to provide barrier-less
  6. * cmpxchg semantics for lockref updates.
  7. */
  8. #ifndef cmpxchg64_relaxed
  9. # define cmpxchg64_relaxed cmpxchg64
  10. #endif
  11. /*
  12. * Note that the "cmpxchg()" reloads the "old" value for the
  13. * failure case.
  14. */
  15. #define CMPXCHG_LOOP(CODE, SUCCESS) do { \
  16. struct lockref old; \
  17. BUILD_BUG_ON(sizeof(old) != 8); \
  18. old.lock_count = ACCESS_ONCE(lockref->lock_count); \
  19. while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
  20. struct lockref new = old, prev = old; \
  21. CODE \
  22. old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
  23. old.lock_count, \
  24. new.lock_count); \
  25. if (likely(old.lock_count == prev.lock_count)) { \
  26. SUCCESS; \
  27. } \
  28. cpu_relax(); \
  29. } \
  30. } while (0)
  31. #else
  32. #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
  33. #endif
  34. /**
  35. * lockref_get - Increments reference count unconditionally
  36. * @lockref: pointer to lockref structure
  37. *
  38. * This operation is only valid if you already hold a reference
  39. * to the object, so you know the count cannot be zero.
  40. */
  41. void lockref_get(struct lockref *lockref)
  42. {
  43. CMPXCHG_LOOP(
  44. new.count++;
  45. ,
  46. return;
  47. );
  48. spin_lock(&lockref->lock);
  49. lockref->count++;
  50. spin_unlock(&lockref->lock);
  51. }
  52. EXPORT_SYMBOL(lockref_get);
  53. /**
  54. * lockref_get_not_zero - Increments count unless the count is 0
  55. * @lockref: pointer to lockref structure
  56. * Return: 1 if count updated successfully or 0 if count was zero
  57. */
  58. int lockref_get_not_zero(struct lockref *lockref)
  59. {
  60. int retval;
  61. CMPXCHG_LOOP(
  62. new.count++;
  63. if (!old.count)
  64. return 0;
  65. ,
  66. return 1;
  67. );
  68. spin_lock(&lockref->lock);
  69. retval = 0;
  70. if (lockref->count) {
  71. lockref->count++;
  72. retval = 1;
  73. }
  74. spin_unlock(&lockref->lock);
  75. return retval;
  76. }
  77. EXPORT_SYMBOL(lockref_get_not_zero);
  78. /**
  79. * lockref_get_or_lock - Increments count unless the count is 0
  80. * @lockref: pointer to lockref structure
  81. * Return: 1 if count updated successfully or 0 if count was zero
  82. * and we got the lock instead.
  83. */
  84. int lockref_get_or_lock(struct lockref *lockref)
  85. {
  86. CMPXCHG_LOOP(
  87. new.count++;
  88. if (!old.count)
  89. break;
  90. ,
  91. return 1;
  92. );
  93. spin_lock(&lockref->lock);
  94. if (!lockref->count)
  95. return 0;
  96. lockref->count++;
  97. spin_unlock(&lockref->lock);
  98. return 1;
  99. }
  100. EXPORT_SYMBOL(lockref_get_or_lock);
  101. /**
  102. * lockref_put_or_lock - decrements count unless count <= 1 before decrement
  103. * @lockref: pointer to lockref structure
  104. * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
  105. */
  106. int lockref_put_or_lock(struct lockref *lockref)
  107. {
  108. CMPXCHG_LOOP(
  109. new.count--;
  110. if (old.count <= 1)
  111. break;
  112. ,
  113. return 1;
  114. );
  115. spin_lock(&lockref->lock);
  116. if (lockref->count <= 1)
  117. return 0;
  118. lockref->count--;
  119. spin_unlock(&lockref->lock);
  120. return 1;
  121. }
  122. EXPORT_SYMBOL(lockref_put_or_lock);
  123. /**
  124. * lockref_mark_dead - mark lockref dead
  125. * @lockref: pointer to lockref structure
  126. */
  127. void lockref_mark_dead(struct lockref *lockref)
  128. {
  129. assert_spin_locked(&lockref->lock);
  130. lockref->count = -128;
  131. }
  132. /**
  133. * lockref_get_not_dead - Increments count unless the ref is dead
  134. * @lockref: pointer to lockref structure
  135. * Return: 1 if count updated successfully or 0 if lockref was dead
  136. */
  137. int lockref_get_not_dead(struct lockref *lockref)
  138. {
  139. int retval;
  140. CMPXCHG_LOOP(
  141. new.count++;
  142. if ((int)old.count < 0)
  143. return 0;
  144. ,
  145. return 1;
  146. );
  147. spin_lock(&lockref->lock);
  148. retval = 0;
  149. if ((int) lockref->count >= 0) {
  150. lockref->count++;
  151. retval = 1;
  152. }
  153. spin_unlock(&lockref->lock);
  154. return retval;
  155. }
  156. EXPORT_SYMBOL(lockref_get_not_dead);