mutex.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. /*
  2. * Assembly implementation of the mutex fastpath, based on atomic
  3. * decrement/increment.
  4. *
  5. * started by Ingo Molnar:
  6. *
  7. * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  8. */
  9. #ifndef _ASM_MUTEX_H
  10. #define _ASM_MUTEX_H
  11. /**
  12. * __mutex_fastpath_lock - try to take the lock by moving the count
  13. * from 1 to a 0 value
  14. * @count: pointer of type atomic_t
  15. * @fn: function to call if the original value was not 1
  16. *
  17. * Change the count from 1 to a value lower than 1, and call <fn> if it
  18. * wasn't 1 originally. This function MUST leave the value lower than 1
  19. * even when the "1" assertion wasn't true.
  20. */
  21. #define __mutex_fastpath_lock(count, fail_fn) \
  22. do { \
  23. unsigned int dummy; \
  24. \
  25. typecheck(atomic_t *, count); \
  26. typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
  27. \
  28. __asm__ __volatile__( \
  29. LOCK " decl (%%eax) \n" \
  30. " js 2f \n" \
  31. "1: \n" \
  32. \
  33. LOCK_SECTION_START("") \
  34. "2: call "#fail_fn" \n" \
  35. " jmp 1b \n" \
  36. LOCK_SECTION_END \
  37. \
  38. :"=a" (dummy) \
  39. : "a" (count) \
  40. : "memory", "ecx", "edx"); \
  41. } while (0)
  42. /**
  43. * __mutex_fastpath_lock_retval - try to take the lock by moving the count
  44. * from 1 to a 0 value
  45. * @count: pointer of type atomic_t
  46. * @fail_fn: function to call if the original value was not 1
  47. *
  48. * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
  49. * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
  50. * or anything the slow path function returns
  51. */
  52. static inline int
  53. __mutex_fastpath_lock_retval(atomic_t *count,
  54. int fastcall (*fail_fn)(atomic_t *))
  55. {
  56. if (unlikely(atomic_dec_return(count) < 0))
  57. return fail_fn(count);
  58. else
  59. return 0;
  60. }
  61. /**
  62. * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
  63. * @count: pointer of type atomic_t
  64. * @fail_fn: function to call if the original value was not 0
  65. *
  66. * try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>.
  67. * In the failure case, this function is allowed to either set the value
  68. * to 1, or to set it to a value lower than 1.
  69. *
  70. * If the implementation sets it to a value of lower than 1, the
  71. * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
  72. * to return 0 otherwise.
  73. */
  74. #define __mutex_fastpath_unlock(count, fail_fn) \
  75. do { \
  76. unsigned int dummy; \
  77. \
  78. typecheck(atomic_t *, count); \
  79. typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
  80. \
  81. __asm__ __volatile__( \
  82. LOCK " incl (%%eax) \n" \
  83. " jle 2f \n" \
  84. "1: \n" \
  85. \
  86. LOCK_SECTION_START("") \
  87. "2: call "#fail_fn" \n" \
  88. " jmp 1b \n" \
  89. LOCK_SECTION_END \
  90. \
  91. :"=a" (dummy) \
  92. : "a" (count) \
  93. : "memory", "ecx", "edx"); \
  94. } while (0)
  95. #define __mutex_slowpath_needs_to_unlock() 1
  96. /**
  97. * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
  98. *
  99. * @count: pointer of type atomic_t
  100. * @fail_fn: fallback function
  101. *
  102. * Change the count from 1 to a value lower than 1, and return 0 (failure)
  103. * if it wasn't 1 originally, or return 1 (success) otherwise. This function
  104. * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
  105. * Additionally, if the value was < 0 originally, this function must not leave
  106. * it to 0 on failure.
  107. */
  108. static inline int
  109. __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
  110. {
  111. /*
  112. * We have two variants here. The cmpxchg based one is the best one
  113. * because it never induce a false contention state. It is included
  114. * here because architectures using the inc/dec algorithms over the
  115. * xchg ones are much more likely to support cmpxchg natively.
  116. *
  117. * If not we fall back to the spinlock based variant - that is
  118. * just as efficient (and simpler) as a 'destructive' probing of
  119. * the mutex state would be.
  120. */
  121. #ifdef __HAVE_ARCH_CMPXCHG
  122. if (likely(atomic_cmpxchg(count, 1, 0) == 1))
  123. return 1;
  124. return 0;
  125. #else
  126. return fail_fn(count);
  127. #endif
  128. }
  129. #endif