mutex_32.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. /*
  2. * Assembly implementation of the mutex fastpath, based on atomic
  3. * decrement/increment.
  4. *
  5. * started by Ingo Molnar:
  6. *
  7. * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  8. */
  9. #ifndef _ASM_MUTEX_H
  10. #define _ASM_MUTEX_H
  11. #include "asm/alternative.h"
  12. /**
  13. * __mutex_fastpath_lock - try to take the lock by moving the count
  14. * from 1 to a 0 value
  15. * @count: pointer of type atomic_t
  16. * @fn: function to call if the original value was not 1
  17. *
  18. * Change the count from 1 to a value lower than 1, and call <fn> if it
  19. * wasn't 1 originally. This function MUST leave the value lower than 1
  20. * even when the "1" assertion wasn't true.
  21. */
  22. #define __mutex_fastpath_lock(count, fail_fn) \
  23. do { \
  24. unsigned int dummy; \
  25. \
  26. typecheck(atomic_t *, count); \
  27. typecheck_fn(void (*)(atomic_t *), fail_fn); \
  28. \
  29. __asm__ __volatile__( \
  30. LOCK_PREFIX " decl (%%eax) \n" \
  31. " jns 1f \n" \
  32. " call "#fail_fn" \n" \
  33. "1: \n" \
  34. \
  35. :"=a" (dummy) \
  36. : "a" (count) \
  37. : "memory", "ecx", "edx"); \
  38. } while (0)
  39. /**
  40. * __mutex_fastpath_lock_retval - try to take the lock by moving the count
  41. * from 1 to a 0 value
  42. * @count: pointer of type atomic_t
  43. * @fail_fn: function to call if the original value was not 1
  44. *
  45. * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
  46. * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
  47. * or anything the slow path function returns
  48. */
  49. static inline int
  50. __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
  51. {
  52. if (unlikely(atomic_dec_return(count) < 0))
  53. return fail_fn(count);
  54. else
  55. return 0;
  56. }
  57. /**
  58. * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
  59. * @count: pointer of type atomic_t
  60. * @fail_fn: function to call if the original value was not 0
  61. *
  62. * try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>.
  63. * In the failure case, this function is allowed to either set the value
  64. * to 1, or to set it to a value lower than 1.
  65. *
  66. * If the implementation sets it to a value of lower than 1, the
  67. * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
  68. * to return 0 otherwise.
  69. */
  70. #define __mutex_fastpath_unlock(count, fail_fn) \
  71. do { \
  72. unsigned int dummy; \
  73. \
  74. typecheck(atomic_t *, count); \
  75. typecheck_fn(void (*)(atomic_t *), fail_fn); \
  76. \
  77. __asm__ __volatile__( \
  78. LOCK_PREFIX " incl (%%eax) \n" \
  79. " jg 1f \n" \
  80. " call "#fail_fn" \n" \
  81. "1: \n" \
  82. \
  83. :"=a" (dummy) \
  84. : "a" (count) \
  85. : "memory", "ecx", "edx"); \
  86. } while (0)
  87. #define __mutex_slowpath_needs_to_unlock() 1
  88. /**
  89. * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
  90. *
  91. * @count: pointer of type atomic_t
  92. * @fail_fn: fallback function
  93. *
  94. * Change the count from 1 to a value lower than 1, and return 0 (failure)
  95. * if it wasn't 1 originally, or return 1 (success) otherwise. This function
  96. * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
  97. * Additionally, if the value was < 0 originally, this function must not leave
  98. * it to 0 on failure.
  99. */
  100. static inline int
  101. __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
  102. {
  103. /*
  104. * We have two variants here. The cmpxchg based one is the best one
  105. * because it never induce a false contention state. It is included
  106. * here because architectures using the inc/dec algorithms over the
  107. * xchg ones are much more likely to support cmpxchg natively.
  108. *
  109. * If not we fall back to the spinlock based variant - that is
  110. * just as efficient (and simpler) as a 'destructive' probing of
  111. * the mutex state would be.
  112. */
  113. #ifdef __HAVE_ARCH_CMPXCHG
  114. if (likely(atomic_cmpxchg(count, 1, 0) == 1))
  115. return 1;
  116. return 0;
  117. #else
  118. return fail_fn(count);
  119. #endif
  120. }
  121. #endif