mutex.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. /*
  2. * Assembly implementation of the mutex fastpath, based on atomic
  3. * decrement/increment.
  4. *
  5. * started by Ingo Molnar:
  6. *
  7. * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  8. */
  9. #ifndef _ASM_MUTEX_H
  10. #define _ASM_MUTEX_H
  11. /**
  12. * __mutex_fastpath_lock - decrement and call function if negative
  13. * @v: pointer of type atomic_t
  14. * @fail_fn: function to call if the result is negative
  15. *
  16. * Atomically decrements @v and calls <fail_fn> if the result is negative.
  17. */
  18. #define __mutex_fastpath_lock(v, fail_fn) \
  19. do { \
  20. unsigned long dummy; \
  21. \
  22. typecheck(atomic_t *, v); \
  23. typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
  24. \
  25. __asm__ __volatile__( \
  26. LOCK " decl (%%rdi) \n" \
  27. " js 2f \n" \
  28. "1: \n" \
  29. \
  30. LOCK_SECTION_START("") \
  31. "2: call "#fail_fn" \n" \
  32. " jmp 1b \n" \
  33. LOCK_SECTION_END \
  34. \
  35. :"=D" (dummy) \
  36. : "D" (v) \
  37. : "rax", "rsi", "rdx", "rcx", \
  38. "r8", "r9", "r10", "r11", "memory"); \
  39. } while (0)
  40. /**
  41. * __mutex_fastpath_lock_retval - try to take the lock by moving the count
  42. * from 1 to a 0 value
  43. * @count: pointer of type atomic_t
  44. * @fail_fn: function to call if the original value was not 1
  45. *
  46. * Change the count from 1 to a value lower than 1, and call <fail_fn> if
  47. * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
  48. * or anything the slow path function returns
  49. */
  50. static inline int
  51. __mutex_fastpath_lock_retval(atomic_t *count,
  52. int fastcall (*fail_fn)(atomic_t *))
  53. {
  54. if (unlikely(atomic_dec_return(count) < 0))
  55. return fail_fn(count);
  56. else
  57. return 0;
  58. }
  59. /**
  60. * __mutex_fastpath_unlock - increment and call function if nonpositive
  61. * @v: pointer of type atomic_t
  62. * @fail_fn: function to call if the result is nonpositive
  63. *
  64. * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
  65. */
  66. #define __mutex_fastpath_unlock(v, fail_fn) \
  67. do { \
  68. unsigned long dummy; \
  69. \
  70. typecheck(atomic_t *, v); \
  71. typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \
  72. \
  73. __asm__ __volatile__( \
  74. LOCK " incl (%%rdi) \n" \
  75. " jle 2f \n" \
  76. "1: \n" \
  77. \
  78. LOCK_SECTION_START("") \
  79. "2: call "#fail_fn" \n" \
  80. " jmp 1b \n" \
  81. LOCK_SECTION_END \
  82. \
  83. :"=D" (dummy) \
  84. : "D" (v) \
  85. : "rax", "rsi", "rdx", "rcx", \
  86. "r8", "r9", "r10", "r11", "memory"); \
  87. } while (0)
  88. #define __mutex_slowpath_needs_to_unlock() 1
  89. /**
  90. * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
  91. *
  92. * @count: pointer of type atomic_t
  93. * @fail_fn: fallback function
  94. *
  95. * Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
  96. * if it wasn't 1 originally. [the fallback function is never used on
  97. * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
  98. */
  99. static inline int
  100. __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
  101. {
  102. if (likely(atomic_cmpxchg(count, 1, 0) == 1))
  103. return 1;
  104. else
  105. return 0;
  106. }
  107. #endif