mutex-llsc.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. /*
  2. * arch/sh/include/asm/mutex-llsc.h
  3. *
  4. * SH-4A optimized mutex locking primitives
  5. *
  6. * Please look into asm-generic/mutex-xchg.h for a formal definition.
  7. */
  8. #ifndef __ASM_SH_MUTEX_LLSC_H
  9. #define __ASM_SH_MUTEX_LLSC_H
  10. /*
  11. * Attempting to lock a mutex on SH4A is done like in ARMv6+ architecure.
  12. * with a bastardized atomic decrement (it is not a reliable atomic decrement
  13. * but it satisfies the defined semantics for our purpose, while being
  14. * smaller and faster than a real atomic decrement or atomic swap.
  15. * The idea is to attempt decrementing the lock value only once. If once
  16. * decremented it isn't zero, or if its store-back fails due to a dispute
  17. * on the exclusive store, we simply bail out immediately through the slow
  18. * path where the lock will be reattempted until it succeeds.
  19. */
  20. static inline void
  21. __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
  22. {
  23. int __ex_flag, __res;
  24. __asm__ __volatile__ (
  25. "movli.l @%2, %0 \n"
  26. "add #-1, %0 \n"
  27. "movco.l %0, @%2 \n"
  28. "movt %1 \n"
  29. : "=&z" (__res), "=&r" (__ex_flag)
  30. : "r" (&(count)->counter)
  31. : "t");
  32. __res |= !__ex_flag;
  33. if (unlikely(__res != 0))
  34. fail_fn(count);
  35. }
  36. static inline int
  37. __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
  38. {
  39. int __ex_flag, __res;
  40. __asm__ __volatile__ (
  41. "movli.l @%2, %0 \n"
  42. "add #-1, %0 \n"
  43. "movco.l %0, @%2 \n"
  44. "movt %1 \n"
  45. : "=&z" (__res), "=&r" (__ex_flag)
  46. : "r" (&(count)->counter)
  47. : "t");
  48. __res |= !__ex_flag;
  49. if (unlikely(__res != 0))
  50. __res = fail_fn(count);
  51. return __res;
  52. }
  53. static inline void
  54. __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
  55. {
  56. int __ex_flag, __res;
  57. __asm__ __volatile__ (
  58. "movli.l @%2, %0 \n\t"
  59. "add #1, %0 \n\t"
  60. "movco.l %0, @%2 \n\t"
  61. "movt %1 \n\t"
  62. : "=&z" (__res), "=&r" (__ex_flag)
  63. : "r" (&(count)->counter)
  64. : "t");
  65. __res |= !__ex_flag;
  66. if (unlikely(__res <= 0))
  67. fail_fn(count);
  68. }
  69. /*
  70. * If the unlock was done on a contended lock, or if the unlock simply fails
  71. * then the mutex remains locked.
  72. */
  73. #define __mutex_slowpath_needs_to_unlock() 1
  74. /*
  75. * For __mutex_fastpath_trylock we do an atomic decrement and check the
  76. * result and put it in the __res variable.
  77. */
  78. static inline int
  79. __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
  80. {
  81. int __res, __orig;
  82. __asm__ __volatile__ (
  83. "1: movli.l @%2, %0 \n\t"
  84. "dt %0 \n\t"
  85. "movco.l %0,@%2 \n\t"
  86. "bf 1b \n\t"
  87. "cmp/eq #0,%0 \n\t"
  88. "bt 2f \n\t"
  89. "mov #0, %1 \n\t"
  90. "bf 3f \n\t"
  91. "2: mov #1, %1 \n\t"
  92. "3: "
  93. : "=&z" (__orig), "=&r" (__res)
  94. : "r" (&count->counter)
  95. : "t");
  96. return __res;
  97. }
  98. #endif /* __ASM_SH_MUTEX_LLSC_H */