lockref.c 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166
  1. #include <linux/export.h>
  2. #include <linux/lockref.h>
  3. #ifdef CONFIG_CMPXCHG_LOCKREF
  4. /*
  5. * Note that the "cmpxchg()" reloads the "old" value for the
  6. * failure case.
  7. */
  8. #define CMPXCHG_LOOP(CODE, SUCCESS) do { \
  9. struct lockref old; \
  10. BUILD_BUG_ON(sizeof(old) != 8); \
  11. old.lock_count = ACCESS_ONCE(lockref->lock_count); \
  12. while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
  13. struct lockref new = old, prev = old; \
  14. CODE \
  15. old.lock_count = cmpxchg(&lockref->lock_count, \
  16. old.lock_count, new.lock_count); \
  17. if (likely(old.lock_count == prev.lock_count)) { \
  18. SUCCESS; \
  19. } \
  20. cpu_relax(); \
  21. } \
  22. } while (0)
  23. #else
  24. #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
  25. #endif
  26. /**
  27. * lockref_get - Increments reference count unconditionally
  28. * @lockref: pointer to lockref structure
  29. *
  30. * This operation is only valid if you already hold a reference
  31. * to the object, so you know the count cannot be zero.
  32. */
  33. void lockref_get(struct lockref *lockref)
  34. {
  35. CMPXCHG_LOOP(
  36. new.count++;
  37. ,
  38. return;
  39. );
  40. spin_lock(&lockref->lock);
  41. lockref->count++;
  42. spin_unlock(&lockref->lock);
  43. }
  44. EXPORT_SYMBOL(lockref_get);
  45. /**
  46. * lockref_get_not_zero - Increments count unless the count is 0
  47. * @lockref: pointer to lockref structure
  48. * Return: 1 if count updated successfully or 0 if count was zero
  49. */
  50. int lockref_get_not_zero(struct lockref *lockref)
  51. {
  52. int retval;
  53. CMPXCHG_LOOP(
  54. new.count++;
  55. if (!old.count)
  56. return 0;
  57. ,
  58. return 1;
  59. );
  60. spin_lock(&lockref->lock);
  61. retval = 0;
  62. if (lockref->count) {
  63. lockref->count++;
  64. retval = 1;
  65. }
  66. spin_unlock(&lockref->lock);
  67. return retval;
  68. }
  69. EXPORT_SYMBOL(lockref_get_not_zero);
  70. /**
  71. * lockref_get_or_lock - Increments count unless the count is 0
  72. * @lockref: pointer to lockref structure
  73. * Return: 1 if count updated successfully or 0 if count was zero
  74. * and we got the lock instead.
  75. */
  76. int lockref_get_or_lock(struct lockref *lockref)
  77. {
  78. CMPXCHG_LOOP(
  79. new.count++;
  80. if (!old.count)
  81. break;
  82. ,
  83. return 1;
  84. );
  85. spin_lock(&lockref->lock);
  86. if (!lockref->count)
  87. return 0;
  88. lockref->count++;
  89. spin_unlock(&lockref->lock);
  90. return 1;
  91. }
  92. EXPORT_SYMBOL(lockref_get_or_lock);
  93. /**
  94. * lockref_put_or_lock - decrements count unless count <= 1 before decrement
  95. * @lockref: pointer to lockref structure
  96. * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
  97. */
  98. int lockref_put_or_lock(struct lockref *lockref)
  99. {
  100. CMPXCHG_LOOP(
  101. new.count--;
  102. if (old.count <= 1)
  103. break;
  104. ,
  105. return 1;
  106. );
  107. spin_lock(&lockref->lock);
  108. if (lockref->count <= 1)
  109. return 0;
  110. lockref->count--;
  111. spin_unlock(&lockref->lock);
  112. return 1;
  113. }
  114. EXPORT_SYMBOL(lockref_put_or_lock);
  115. /**
  116. * lockref_mark_dead - mark lockref dead
  117. * @lockref: pointer to lockref structure
  118. */
  119. void lockref_mark_dead(struct lockref *lockref)
  120. {
  121. assert_spin_locked(&lockref->lock);
  122. lockref->count = -128;
  123. }
  124. /**
  125. * lockref_get_not_dead - Increments count unless the ref is dead
  126. * @lockref: pointer to lockref structure
  127. * Return: 1 if count updated successfully or 0 if lockref was dead
  128. */
  129. int lockref_get_not_dead(struct lockref *lockref)
  130. {
  131. int retval;
  132. CMPXCHG_LOOP(
  133. new.count++;
  134. if ((int)old.count < 0)
  135. return 0;
  136. ,
  137. return 1;
  138. );
  139. spin_lock(&lockref->lock);
  140. retval = 0;
  141. if ((int) lockref->count >= 0) {
  142. lockref->count++;
  143. retval = 1;
  144. }
  145. spin_unlock(&lockref->lock);
  146. return retval;
  147. }
  148. EXPORT_SYMBOL(lockref_get_not_dead);