mutex_64.h 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. /*
  2. * Assembly implementation of the mutex fastpath, based on atomic
  3. * decrement/increment.
  4. *
  5. * started by Ingo Molnar:
  6. *
  7. * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  8. */
  9. #ifndef _ASM_X86_MUTEX_64_H
  10. #define _ASM_X86_MUTEX_64_H
  11. /**
  12. * __mutex_fastpath_lock - decrement and call function if negative
  13. * @v: pointer of type atomic_t
  14. * @fail_fn: function to call if the result is negative
  15. *
  16. * Atomically decrements @v and calls <fail_fn> if the result is negative.
  17. */
  18. #ifdef CC_HAVE_ASM_GOTO
  19. static inline void __mutex_fastpath_lock(atomic_t *v,
  20. void (*fail_fn)(atomic_t *))
  21. {
  22. asm volatile goto(LOCK_PREFIX " decl %0\n"
  23. " jns %l[exit]\n"
  24. : : "m" (v->counter)
  25. : "memory", "cc"
  26. : exit);
  27. fail_fn(v);
  28. exit:
  29. return;
  30. }
  31. #else
  32. #define __mutex_fastpath_lock(v, fail_fn) \
  33. do { \
  34. unsigned long dummy; \
  35. \
  36. typecheck(atomic_t *, v); \
  37. typecheck_fn(void (*)(atomic_t *), fail_fn); \
  38. \
  39. asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \
  40. " jns 1f \n" \
  41. " call " #fail_fn "\n" \
  42. "1:" \
  43. : "=D" (dummy) \
  44. : "D" (v) \
  45. : "rax", "rsi", "rdx", "rcx", \
  46. "r8", "r9", "r10", "r11", "memory"); \
  47. } while (0)
  48. #endif
  49. /**
  50. * __mutex_fastpath_lock_retval - try to take the lock by moving the count
  51. * from 1 to a 0 value
  52. * @count: pointer of type atomic_t
  53. *
  54. * Change the count from 1 to a value lower than 1. This function returns 0
  55. * if the fastpath succeeds, or -1 otherwise.
  56. */
  57. static inline int __mutex_fastpath_lock_retval(atomic_t *count)
  58. {
  59. if (unlikely(atomic_dec_return(count) < 0))
  60. return -1;
  61. else
  62. return 0;
  63. }
  64. /**
  65. * __mutex_fastpath_unlock - increment and call function if nonpositive
  66. * @v: pointer of type atomic_t
  67. * @fail_fn: function to call if the result is nonpositive
  68. *
  69. * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
  70. */
  71. #ifdef CC_HAVE_ASM_GOTO
  72. static inline void __mutex_fastpath_unlock(atomic_t *v,
  73. void (*fail_fn)(atomic_t *))
  74. {
  75. asm volatile goto(LOCK_PREFIX " incl %0\n"
  76. " jg %l[exit]\n"
  77. : : "m" (v->counter)
  78. : "memory", "cc"
  79. : exit);
  80. fail_fn(v);
  81. exit:
  82. return;
  83. }
  84. #else
  85. #define __mutex_fastpath_unlock(v, fail_fn) \
  86. do { \
  87. unsigned long dummy; \
  88. \
  89. typecheck(atomic_t *, v); \
  90. typecheck_fn(void (*)(atomic_t *), fail_fn); \
  91. \
  92. asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \
  93. " jg 1f\n" \
  94. " call " #fail_fn "\n" \
  95. "1:" \
  96. : "=D" (dummy) \
  97. : "D" (v) \
  98. : "rax", "rsi", "rdx", "rcx", \
  99. "r8", "r9", "r10", "r11", "memory"); \
  100. } while (0)
  101. #endif
  102. #define __mutex_slowpath_needs_to_unlock() 1
  103. /**
  104. * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
  105. *
  106. * @count: pointer of type atomic_t
  107. * @fail_fn: fallback function
  108. *
  109. * Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
  110. * if it wasn't 1 originally. [the fallback function is never used on
  111. * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
  112. */
  113. static inline int __mutex_fastpath_trylock(atomic_t *count,
  114. int (*fail_fn)(atomic_t *))
  115. {
  116. if (likely(atomic_cmpxchg(count, 1, 0) == 1))
  117. return 1;
  118. else
  119. return 0;
  120. }
  121. #endif /* _ASM_X86_MUTEX_64_H */