lockref.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. #include <linux/export.h>
  2. #include <linux/lockref.h>
  3. #ifdef CONFIG_CMPXCHG_LOCKREF
  4. /*
  5. * Note that the "cmpxchg()" reloads the "old" value for the
  6. * failure case.
  7. */
  8. #define CMPXCHG_LOOP(CODE, SUCCESS) do { \
  9. struct lockref old; \
  10. BUILD_BUG_ON(sizeof(old) != 8); \
  11. old.lock_count = ACCESS_ONCE(lockref->lock_count); \
  12. while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
  13. struct lockref new = old, prev = old; \
  14. CODE \
  15. old.lock_count = cmpxchg(&lockref->lock_count, \
  16. old.lock_count, new.lock_count); \
  17. if (likely(old.lock_count == prev.lock_count)) { \
  18. SUCCESS; \
  19. } \
  20. } \
  21. } while (0)
  22. #else
  23. #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
  24. #endif
  25. /**
  26. * lockref_get - Increments reference count unconditionally
  27. * @lockcnt: pointer to lockref structure
  28. *
  29. * This operation is only valid if you already hold a reference
  30. * to the object, so you know the count cannot be zero.
  31. */
  32. void lockref_get(struct lockref *lockref)
  33. {
  34. CMPXCHG_LOOP(
  35. new.count++;
  36. ,
  37. return;
  38. );
  39. spin_lock(&lockref->lock);
  40. lockref->count++;
  41. spin_unlock(&lockref->lock);
  42. }
  43. EXPORT_SYMBOL(lockref_get);
  44. /**
  45. * lockref_get_not_zero - Increments count unless the count is 0
  46. * @lockcnt: pointer to lockref structure
  47. * Return: 1 if count updated successfully or 0 if count was zero
  48. */
  49. int lockref_get_not_zero(struct lockref *lockref)
  50. {
  51. int retval;
  52. CMPXCHG_LOOP(
  53. new.count++;
  54. if (!old.count)
  55. return 0;
  56. ,
  57. return 1;
  58. );
  59. spin_lock(&lockref->lock);
  60. retval = 0;
  61. if (lockref->count) {
  62. lockref->count++;
  63. retval = 1;
  64. }
  65. spin_unlock(&lockref->lock);
  66. return retval;
  67. }
  68. EXPORT_SYMBOL(lockref_get_not_zero);
  69. /**
  70. * lockref_get_or_lock - Increments count unless the count is 0
  71. * @lockcnt: pointer to lockref structure
  72. * Return: 1 if count updated successfully or 0 if count was zero
  73. * and we got the lock instead.
  74. */
  75. int lockref_get_or_lock(struct lockref *lockref)
  76. {
  77. CMPXCHG_LOOP(
  78. new.count++;
  79. if (!old.count)
  80. break;
  81. ,
  82. return 1;
  83. );
  84. spin_lock(&lockref->lock);
  85. if (!lockref->count)
  86. return 0;
  87. lockref->count++;
  88. spin_unlock(&lockref->lock);
  89. return 1;
  90. }
  91. EXPORT_SYMBOL(lockref_get_or_lock);
  92. /**
  93. * lockref_put_or_lock - decrements count unless count <= 1 before decrement
  94. * @lockcnt: pointer to lockref structure
  95. * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
  96. */
  97. int lockref_put_or_lock(struct lockref *lockref)
  98. {
  99. CMPXCHG_LOOP(
  100. new.count--;
  101. if (old.count <= 1)
  102. break;
  103. ,
  104. return 1;
  105. );
  106. spin_lock(&lockref->lock);
  107. if (lockref->count <= 1)
  108. return 0;
  109. lockref->count--;
  110. spin_unlock(&lockref->lock);
  111. return 1;
  112. }
  113. EXPORT_SYMBOL(lockref_put_or_lock);