mutex.h 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. /*
  2. * include/asm-arm/mutex.h
  3. *
  4. * ARM optimized mutex locking primitives
  5. *
  6. * Please look into asm-generic/mutex-xchg.h for a formal definition.
  7. */
  8. #ifndef _ASM_MUTEX_H
  9. #define _ASM_MUTEX_H
  10. #if __LINUX_ARM_ARCH__ < 6
  11. /* On pre-ARMv6 hardware the swp based implementation is the most efficient. */
  12. # include <asm-generic/mutex-xchg.h>
  13. #else
  14. /*
  15. * Attempting to lock a mutex on ARMv6+ can be done with a bastardized
  16. * atomic decrement (it is not a reliable atomic decrement but it satisfies
  17. * the defined semantics for our purpose, while being smaller and faster
  18. * than a real atomic decrement or atomic swap. The idea is to attempt
  19. * decrementing the lock value only once. If once decremented it isn't zero,
  20. * or if its store-back fails due to a dispute on the exclusive store, we
  21. * simply bail out immediately through the slow path where the lock will be
  22. * reattempted until it succeeds.
  23. */
  24. #define __mutex_fastpath_lock(count, fail_fn) \
  25. do { \
  26. int __ex_flag, __res; \
  27. \
  28. typecheck(atomic_t *, count); \
  29. typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
  30. \
  31. __asm__ ( \
  32. "ldrex %0, [%2] \n" \
  33. "sub %0, %0, #1 \n" \
  34. "strex %1, %0, [%2] \n" \
  35. \
  36. : "=&r" (__res), "=&r" (__ex_flag) \
  37. : "r" (&(count)->counter) \
  38. : "cc","memory" ); \
  39. \
  40. if (unlikely(__res || __ex_flag)) \
  41. fail_fn(count); \
  42. } while (0)
  43. #define __mutex_fastpath_lock_retval(count, fail_fn) \
  44. ({ \
  45. int __ex_flag, __res; \
  46. \
  47. typecheck(atomic_t *, count); \
  48. typecheck_fn(fastcall int (*)(atomic_t *), fail_fn); \
  49. \
  50. __asm__ ( \
  51. "ldrex %0, [%2] \n" \
  52. "sub %0, %0, #1 \n" \
  53. "strex %1, %0, [%2] \n" \
  54. \
  55. : "=&r" (__res), "=&r" (__ex_flag) \
  56. : "r" (&(count)->counter) \
  57. : "cc","memory" ); \
  58. \
  59. __res |= __ex_flag; \
  60. if (unlikely(__res != 0)) \
  61. __res = fail_fn(count); \
  62. __res; \
  63. })
  64. /*
  65. * Same trick is used for the unlock fast path. However the original value,
  66. * rather than the result, is used to test for success in order to have
  67. * better generated assembly.
  68. */
  69. #define __mutex_fastpath_unlock(count, fail_fn) \
  70. do { \
  71. int __ex_flag, __res, __orig; \
  72. \
  73. typecheck(atomic_t *, count); \
  74. typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
  75. \
  76. __asm__ ( \
  77. "ldrex %0, [%3] \n" \
  78. "add %1, %0, #1 \n" \
  79. "strex %2, %1, [%3] \n" \
  80. \
  81. : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag) \
  82. : "r" (&(count)->counter) \
  83. : "cc","memory" ); \
  84. \
  85. if (unlikely(__orig || __ex_flag)) \
  86. fail_fn(count); \
  87. } while (0)
  88. /*
  89. * If the unlock was done on a contended lock, or if the unlock simply fails
  90. * then the mutex remains locked.
  91. */
  92. #define __mutex_slowpath_needs_to_unlock() 1
  93. /*
  94. * For __mutex_fastpath_trylock we use another construct which could be
  95. * described as a "single value cmpxchg".
  96. *
  97. * This provides the needed trylock semantics like cmpxchg would, but it is
  98. * lighter and less generic than a true cmpxchg implementation.
  99. */
  100. static inline int
  101. __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
  102. {
  103. int __ex_flag, __res, __orig;
  104. __asm__ (
  105. "1: ldrex %0, [%3] \n"
  106. "subs %1, %0, #1 \n"
  107. "strexeq %2, %1, [%3] \n"
  108. "movlt %0, #0 \n"
  109. "cmpeq %2, #0 \n"
  110. "bgt 1b \n"
  111. : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
  112. : "r" (&count->counter)
  113. : "cc", "memory" );
  114. return __orig;
  115. }
  116. #endif
  117. #endif