spinlock.h 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326
  1. #ifndef _ASM_M32R_SPINLOCK_H
  2. #define _ASM_M32R_SPINLOCK_H
  3. /*
  4. * linux/include/asm-m32r/spinlock.h
  5. *
  6. * M32R version:
  7. * Copyright (C) 2001, 2002 Hitoshi Yamamoto
  8. * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
  9. */
  10. #include <linux/compiler.h>
  11. #include <asm/atomic.h>
  12. #include <asm/page.h>
  13. /*
  14. * Your basic SMP spinlocks, allowing only a single CPU anywhere
  15. *
  16. * (the type definitions are in asm/spinlock_types.h)
  17. *
  18. * Simple spin lock operations. There are two variants, one clears IRQ's
  19. * on the local processor, one does not.
  20. *
  21. * We make no fairness assumptions. They have a cost.
  22. */
  23. #define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
  24. #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
  25. #define __raw_spin_unlock_wait(x) \
  26. do { cpu_relax(); } while (__raw_spin_is_locked(x))
  27. /**
  28. * __raw_spin_trylock - Try spin lock and return a result
  29. * @lock: Pointer to the lock variable
  30. *
  31. * __raw_spin_trylock() tries to get the lock and returns a result.
  32. * On the m32r, the result value is 1 (= Success) or 0 (= Failure).
  33. */
  34. static inline int __raw_spin_trylock(raw_spinlock_t *lock)
  35. {
  36. int oldval;
  37. unsigned long tmp1, tmp2;
  38. /*
  39. * lock->slock : =1 : unlock
  40. * : <=0 : lock
  41. * {
  42. * oldval = lock->slock; <--+ need atomic operation
  43. * lock->slock = 0; <--+
  44. * }
  45. */
  46. __asm__ __volatile__ (
  47. "# __raw_spin_trylock \n\t"
  48. "ldi %1, #0; \n\t"
  49. "mvfc %2, psw; \n\t"
  50. "clrpsw #0x40 -> nop; \n\t"
  51. DCACHE_CLEAR("%0", "r6", "%3")
  52. "lock %0, @%3; \n\t"
  53. "unlock %1, @%3; \n\t"
  54. "mvtc %2, psw; \n\t"
  55. : "=&r" (oldval), "=&r" (tmp1), "=&r" (tmp2)
  56. : "r" (&lock->slock)
  57. : "memory"
  58. #ifdef CONFIG_CHIP_M32700_TS1
  59. , "r6"
  60. #endif /* CONFIG_CHIP_M32700_TS1 */
  61. );
  62. return (oldval > 0);
  63. }
  64. static inline void __raw_spin_lock(raw_spinlock_t *lock)
  65. {
  66. unsigned long tmp0, tmp1;
  67. /*
  68. * lock->slock : =1 : unlock
  69. * : <=0 : lock
  70. *
  71. * for ( ; ; ) {
  72. * lock->slock -= 1; <-- need atomic operation
  73. * if (lock->slock == 0) break;
  74. * for ( ; lock->slock <= 0 ; );
  75. * }
  76. */
  77. __asm__ __volatile__ (
  78. "# __raw_spin_lock \n\t"
  79. ".fillinsn \n"
  80. "1: \n\t"
  81. "mvfc %1, psw; \n\t"
  82. "clrpsw #0x40 -> nop; \n\t"
  83. DCACHE_CLEAR("%0", "r6", "%2")
  84. "lock %0, @%2; \n\t"
  85. "addi %0, #-1; \n\t"
  86. "unlock %0, @%2; \n\t"
  87. "mvtc %1, psw; \n\t"
  88. "bltz %0, 2f; \n\t"
  89. LOCK_SECTION_START(".balign 4 \n\t")
  90. ".fillinsn \n"
  91. "2: \n\t"
  92. "ld %0, @%2; \n\t"
  93. "bgtz %0, 1b; \n\t"
  94. "bra 2b; \n\t"
  95. LOCK_SECTION_END
  96. : "=&r" (tmp0), "=&r" (tmp1)
  97. : "r" (&lock->slock)
  98. : "memory"
  99. #ifdef CONFIG_CHIP_M32700_TS1
  100. , "r6"
  101. #endif /* CONFIG_CHIP_M32700_TS1 */
  102. );
  103. }
  104. static inline void __raw_spin_unlock(raw_spinlock_t *lock)
  105. {
  106. mb();
  107. lock->slock = 1;
  108. }
  109. /*
  110. * Read-write spinlocks, allowing multiple readers
  111. * but only one writer.
  112. *
  113. * NOTE! it is quite common to have readers in interrupts
  114. * but no interrupt writers. For those circumstances we
  115. * can "mix" irq-safe locks - any writer needs to get a
  116. * irq-safe write-lock, but readers can get non-irqsafe
  117. * read-locks.
  118. *
  119. * On x86, we implement read-write locks as a 32-bit counter
  120. * with the high bit (sign) being the "contended" bit.
  121. *
  122. * The inline assembly is non-obvious. Think about it.
  123. *
  124. * Changed to use the same technique as rw semaphores. See
  125. * semaphore.h for details. -ben
  126. */
  127. /**
  128. * read_can_lock - would read_trylock() succeed?
  129. * @lock: the rwlock in question.
  130. */
  131. #define __raw_read_can_lock(x) ((int)(x)->lock > 0)
  132. /**
  133. * write_can_lock - would write_trylock() succeed?
  134. * @lock: the rwlock in question.
  135. */
  136. #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
  137. static inline void __raw_read_lock(raw_rwlock_t *rw)
  138. {
  139. unsigned long tmp0, tmp1;
  140. /*
  141. * rw->lock : >0 : unlock
  142. * : <=0 : lock
  143. *
  144. * for ( ; ; ) {
  145. * rw->lock -= 1; <-- need atomic operation
  146. * if (rw->lock >= 0) break;
  147. * rw->lock += 1; <-- need atomic operation
  148. * for ( ; rw->lock <= 0 ; );
  149. * }
  150. */
  151. __asm__ __volatile__ (
  152. "# read_lock \n\t"
  153. ".fillinsn \n"
  154. "1: \n\t"
  155. "mvfc %1, psw; \n\t"
  156. "clrpsw #0x40 -> nop; \n\t"
  157. DCACHE_CLEAR("%0", "r6", "%2")
  158. "lock %0, @%2; \n\t"
  159. "addi %0, #-1; \n\t"
  160. "unlock %0, @%2; \n\t"
  161. "mvtc %1, psw; \n\t"
  162. "bltz %0, 2f; \n\t"
  163. LOCK_SECTION_START(".balign 4 \n\t")
  164. ".fillinsn \n"
  165. "2: \n\t"
  166. "clrpsw #0x40 -> nop; \n\t"
  167. DCACHE_CLEAR("%0", "r6", "%2")
  168. "lock %0, @%2; \n\t"
  169. "addi %0, #1; \n\t"
  170. "unlock %0, @%2; \n\t"
  171. "mvtc %1, psw; \n\t"
  172. ".fillinsn \n"
  173. "3: \n\t"
  174. "ld %0, @%2; \n\t"
  175. "bgtz %0, 1b; \n\t"
  176. "bra 3b; \n\t"
  177. LOCK_SECTION_END
  178. : "=&r" (tmp0), "=&r" (tmp1)
  179. : "r" (&rw->lock)
  180. : "memory"
  181. #ifdef CONFIG_CHIP_M32700_TS1
  182. , "r6"
  183. #endif /* CONFIG_CHIP_M32700_TS1 */
  184. );
  185. }
  186. static inline void __raw_write_lock(raw_rwlock_t *rw)
  187. {
  188. unsigned long tmp0, tmp1, tmp2;
  189. /*
  190. * rw->lock : =RW_LOCK_BIAS_STR : unlock
  191. * : !=RW_LOCK_BIAS_STR : lock
  192. *
  193. * for ( ; ; ) {
  194. * rw->lock -= RW_LOCK_BIAS_STR; <-- need atomic operation
  195. * if (rw->lock == 0) break;
  196. * rw->lock += RW_LOCK_BIAS_STR; <-- need atomic operation
  197. * for ( ; rw->lock != RW_LOCK_BIAS_STR ; ) ;
  198. * }
  199. */
  200. __asm__ __volatile__ (
  201. "# write_lock \n\t"
  202. "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t"
  203. "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t"
  204. ".fillinsn \n"
  205. "1: \n\t"
  206. "mvfc %2, psw; \n\t"
  207. "clrpsw #0x40 -> nop; \n\t"
  208. DCACHE_CLEAR("%0", "r7", "%3")
  209. "lock %0, @%3; \n\t"
  210. "sub %0, %1; \n\t"
  211. "unlock %0, @%3; \n\t"
  212. "mvtc %2, psw; \n\t"
  213. "bnez %0, 2f; \n\t"
  214. LOCK_SECTION_START(".balign 4 \n\t")
  215. ".fillinsn \n"
  216. "2: \n\t"
  217. "clrpsw #0x40 -> nop; \n\t"
  218. DCACHE_CLEAR("%0", "r7", "%3")
  219. "lock %0, @%3; \n\t"
  220. "add %0, %1; \n\t"
  221. "unlock %0, @%3; \n\t"
  222. "mvtc %2, psw; \n\t"
  223. ".fillinsn \n"
  224. "3: \n\t"
  225. "ld %0, @%3; \n\t"
  226. "beq %0, %1, 1b; \n\t"
  227. "bra 3b; \n\t"
  228. LOCK_SECTION_END
  229. : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
  230. : "r" (&rw->lock)
  231. : "memory"
  232. #ifdef CONFIG_CHIP_M32700_TS1
  233. , "r7"
  234. #endif /* CONFIG_CHIP_M32700_TS1 */
  235. );
  236. }
  237. static inline void __raw_read_unlock(raw_rwlock_t *rw)
  238. {
  239. unsigned long tmp0, tmp1;
  240. __asm__ __volatile__ (
  241. "# read_unlock \n\t"
  242. "mvfc %1, psw; \n\t"
  243. "clrpsw #0x40 -> nop; \n\t"
  244. DCACHE_CLEAR("%0", "r6", "%2")
  245. "lock %0, @%2; \n\t"
  246. "addi %0, #1; \n\t"
  247. "unlock %0, @%2; \n\t"
  248. "mvtc %1, psw; \n\t"
  249. : "=&r" (tmp0), "=&r" (tmp1)
  250. : "r" (&rw->lock)
  251. : "memory"
  252. #ifdef CONFIG_CHIP_M32700_TS1
  253. , "r6"
  254. #endif /* CONFIG_CHIP_M32700_TS1 */
  255. );
  256. }
  257. static inline void __raw_write_unlock(raw_rwlock_t *rw)
  258. {
  259. unsigned long tmp0, tmp1, tmp2;
  260. __asm__ __volatile__ (
  261. "# write_unlock \n\t"
  262. "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t"
  263. "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t"
  264. "mvfc %2, psw; \n\t"
  265. "clrpsw #0x40 -> nop; \n\t"
  266. DCACHE_CLEAR("%0", "r7", "%3")
  267. "lock %0, @%3; \n\t"
  268. "add %0, %1; \n\t"
  269. "unlock %0, @%3; \n\t"
  270. "mvtc %2, psw; \n\t"
  271. : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
  272. : "r" (&rw->lock)
  273. : "memory"
  274. #ifdef CONFIG_CHIP_M32700_TS1
  275. , "r7"
  276. #endif /* CONFIG_CHIP_M32700_TS1 */
  277. );
  278. }
  279. static inline int __raw_read_trylock(raw_rwlock_t *lock)
  280. {
  281. atomic_t *count = (atomic_t*)lock;
  282. if (atomic_dec_return(count) >= 0)
  283. return 1;
  284. atomic_inc(count);
  285. return 0;
  286. }
  287. static inline int __raw_write_trylock(raw_rwlock_t *lock)
  288. {
  289. atomic_t *count = (atomic_t *)lock;
  290. if (atomic_sub_and_test(RW_LOCK_BIAS, count))
  291. return 1;
  292. atomic_add(RW_LOCK_BIAS, count);
  293. return 0;
  294. }
  295. #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
  296. #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
  297. #define _raw_spin_relax(lock) cpu_relax()
  298. #define _raw_read_relax(lock) cpu_relax()
  299. #define _raw_write_relax(lock) cpu_relax()
  300. #endif /* _ASM_M32R_SPINLOCK_H */