spinlock.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. /*
  2. * Spinlock support for the Hexagon architecture
  3. *
  4. * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
  5. *
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 and
  9. * only version 2 as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  19. * 02110-1301, USA.
  20. */
  21. #ifndef _ASM_SPINLOCK_H
  22. #define _ASM_SPINLOCK_H
  23. #include <asm/irqflags.h>
  24. /*
  25. * This file is pulled in for SMP builds.
  26. * Really need to check all the barrier stuff for "true" SMP
  27. */
  28. /*
  29. * Read locks:
  30. * - load the lock value
  31. * - increment it
  32. * - if the lock value is still negative, go back and try again.
  33. * - unsuccessful store is unsuccessful. Go back and try again. Loser.
  34. * - successful store new lock value if positive -> lock acquired
  35. */
  36. static inline void arch_read_lock(arch_rwlock_t *lock)
  37. {
  38. __asm__ __volatile__(
  39. "1: R6 = memw_locked(%0);\n"
  40. " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
  41. " { if !P3 jump 1b; }\n"
  42. " memw_locked(%0,P3) = R6;\n"
  43. " { if !P3 jump 1b; }\n"
  44. :
  45. : "r" (&lock->lock)
  46. : "memory", "r6", "p3"
  47. );
  48. }
  49. static inline void arch_read_unlock(arch_rwlock_t *lock)
  50. {
  51. __asm__ __volatile__(
  52. "1: R6 = memw_locked(%0);\n"
  53. " R6 = add(R6,#-1);\n"
  54. " memw_locked(%0,P3) = R6\n"
  55. " if !P3 jump 1b;\n"
  56. :
  57. : "r" (&lock->lock)
  58. : "memory", "r6", "p3"
  59. );
  60. }
  61. /* I think this returns 0 on fail, 1 on success. */
  62. static inline int arch_read_trylock(arch_rwlock_t *lock)
  63. {
  64. int temp;
  65. __asm__ __volatile__(
  66. " R6 = memw_locked(%1);\n"
  67. " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
  68. " { if !P3 jump 1f; }\n"
  69. " memw_locked(%1,P3) = R6;\n"
  70. " { %0 = P3 }\n"
  71. "1:\n"
  72. : "=&r" (temp)
  73. : "r" (&lock->lock)
  74. : "memory", "r6", "p3"
  75. );
  76. return temp;
  77. }
  78. static inline int arch_read_can_lock(arch_rwlock_t *rwlock)
  79. {
  80. return rwlock->lock == 0;
  81. }
  82. static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
  83. {
  84. return rwlock->lock == 0;
  85. }
  86. /* Stuffs a -1 in the lock value? */
  87. static inline void arch_write_lock(arch_rwlock_t *lock)
  88. {
  89. __asm__ __volatile__(
  90. "1: R6 = memw_locked(%0)\n"
  91. " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
  92. " { if !P3 jump 1b; }\n"
  93. " memw_locked(%0,P3) = R6;\n"
  94. " { if !P3 jump 1b; }\n"
  95. :
  96. : "r" (&lock->lock)
  97. : "memory", "r6", "p3"
  98. );
  99. }
  100. static inline int arch_write_trylock(arch_rwlock_t *lock)
  101. {
  102. int temp;
  103. __asm__ __volatile__(
  104. " R6 = memw_locked(%1)\n"
  105. " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
  106. " { if !P3 jump 1f; }\n"
  107. " memw_locked(%1,P3) = R6;\n"
  108. " %0 = P3;\n"
  109. "1:\n"
  110. : "=&r" (temp)
  111. : "r" (&lock->lock)
  112. : "memory", "r6", "p3"
  113. );
  114. return temp;
  115. }
  116. static inline void arch_write_unlock(arch_rwlock_t *lock)
  117. {
  118. smp_mb();
  119. lock->lock = 0;
  120. }
  121. static inline void arch_spin_lock(arch_spinlock_t *lock)
  122. {
  123. __asm__ __volatile__(
  124. "1: R6 = memw_locked(%0);\n"
  125. " P3 = cmp.eq(R6,#0);\n"
  126. " { if !P3 jump 1b; R6 = #1; }\n"
  127. " memw_locked(%0,P3) = R6;\n"
  128. " { if !P3 jump 1b; }\n"
  129. :
  130. : "r" (&lock->lock)
  131. : "memory", "r6", "p3"
  132. );
  133. }
  134. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  135. {
  136. smp_mb();
  137. lock->lock = 0;
  138. }
  139. static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
  140. {
  141. int temp;
  142. __asm__ __volatile__(
  143. " R6 = memw_locked(%1);\n"
  144. " P3 = cmp.eq(R6,#0);\n"
  145. " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n"
  146. " memw_locked(%1,P3) = R6;\n"
  147. " %0 = P3;\n"
  148. "1:\n"
  149. : "=&r" (temp)
  150. : "r" (&lock->lock)
  151. : "memory", "r6", "p3"
  152. );
  153. return temp;
  154. }
  155. /*
  156. * SMP spinlocks are intended to allow only a single CPU at the lock
  157. */
  158. #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  159. #define arch_spin_unlock_wait(lock) \
  160. do {while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
  161. #define arch_spin_is_locked(x) ((x)->lock != 0)
  162. #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
  163. #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
  164. #endif