lockref.h 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687
  1. #ifndef __LINUX_LOCKREF_H
  2. #define __LINUX_LOCKREF_H
  3. /*
  4. * Locked reference counts.
  5. *
  6. * These are different from just plain atomic refcounts in that they
  7. * are atomic with respect to the spinlock that goes with them. In
  8. * particular, there can be implementations that don't actually get
  9. * the spinlock for the common decrement/increment operations, but they
  10. * still have to check that the operation is done semantically as if
  11. * the spinlock had been taken (using a cmpxchg operation that covers
  12. * both the lock and the count word, or using memory transactions, for
  13. * example).
  14. */
  15. #include <linux/spinlock.h>
  16. struct lockref {
  17. spinlock_t lock;
  18. unsigned int count;
  19. };
  20. /**
  21. * lockref_get - Increments reference count unconditionally
  22. * @lockcnt: pointer to lockref structure
  23. *
  24. * This operation is only valid if you already hold a reference
  25. * to the object, so you know the count cannot be zero.
  26. */
  27. static inline void lockref_get(struct lockref *lockref)
  28. {
  29. spin_lock(&lockref->lock);
  30. lockref->count++;
  31. spin_unlock(&lockref->lock);
  32. }
  33. /**
  34. * lockref_get_not_zero - Increments count unless the count is 0
  35. * @lockcnt: pointer to lockref structure
  36. * Return: 1 if count updated successfully or 0 if count is 0
  37. */
  38. static inline int lockref_get_not_zero(struct lockref *lockref)
  39. {
  40. int retval = 0;
  41. spin_lock(&lockref->lock);
  42. if (lockref->count) {
  43. lockref->count++;
  44. retval = 1;
  45. }
  46. spin_unlock(&lockref->lock);
  47. return retval;
  48. }
  49. /**
  50. * lockref_get_or_lock - Increments count unless the count is 0
  51. * @lockcnt: pointer to lockref structure
  52. * Return: 1 if count updated successfully or 0 if count was zero
  53. * and we got the lock instead.
  54. */
  55. static inline int lockref_get_or_lock(struct lockref *lockref)
  56. {
  57. spin_lock(&lockref->lock);
  58. if (!lockref->count)
  59. return 0;
  60. lockref->count++;
  61. spin_unlock(&lockref->lock);
  62. return 1;
  63. }
  64. /**
  65. * lockref_put_or_lock - decrements count unless count <= 1 before decrement
  66. * @lockcnt: pointer to lockref structure
  67. * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
  68. */
  69. static inline int lockref_put_or_lock(struct lockref *lockref)
  70. {
  71. spin_lock(&lockref->lock);
  72. if (lockref->count <= 1)
  73. return 0;
  74. lockref->count--;
  75. spin_unlock(&lockref->lock);
  76. return 1;
  77. }
  78. #endif /* __LINUX_LOCKREF_H */