spinlock.h 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. #ifndef _ASM_M32R_SPINLOCK_H
  2. #define _ASM_M32R_SPINLOCK_H
  3. /*
  4. * linux/include/asm-m32r/spinlock.h
  5. *
  6. * M32R version:
  7. * Copyright (C) 2001, 2002 Hitoshi Yamamoto
  8. * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
  9. */
  10. #include <linux/config.h> /* CONFIG_DEBUG_SPINLOCK, CONFIG_SMP */
  11. #include <linux/compiler.h>
  12. #include <asm/atomic.h>
  13. #include <asm/page.h>
  14. extern int printk(const char * fmt, ...)
  15. __attribute__ ((format (printf, 1, 2)));
  16. #define RW_LOCK_BIAS 0x01000000
  17. #define RW_LOCK_BIAS_STR "0x01000000"
  18. /*
  19. * Your basic SMP spinlocks, allowing only a single CPU anywhere
  20. */
  21. typedef struct {
  22. volatile int slock;
  23. #ifdef CONFIG_DEBUG_SPINLOCK
  24. unsigned magic;
  25. #endif
  26. #ifdef CONFIG_PREEMPT
  27. unsigned int break_lock;
  28. #endif
  29. } spinlock_t;
  30. #define SPINLOCK_MAGIC 0xdead4ead
  31. #ifdef CONFIG_DEBUG_SPINLOCK
  32. #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC
  33. #else
  34. #define SPINLOCK_MAGIC_INIT /* */
  35. #endif
  36. #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
  37. #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
  38. /*
  39. * Simple spin lock operations. There are two variants, one clears IRQ's
  40. * on the local processor, one does not.
  41. *
  42. * We make no fairness assumptions. They have a cost.
  43. */
  44. #define spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0)
  45. #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
  46. #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
  47. /**
  48. * _raw_spin_trylock - Try spin lock and return a result
  49. * @lock: Pointer to the lock variable
  50. *
  51. * _raw_spin_trylock() tries to get the lock and returns a result.
  52. * On the m32r, the result value is 1 (= Success) or 0 (= Failure).
  53. */
  54. static inline int _raw_spin_trylock(spinlock_t *lock)
  55. {
  56. int oldval;
  57. unsigned long tmp1, tmp2;
  58. /*
  59. * lock->slock : =1 : unlock
  60. * : <=0 : lock
  61. * {
  62. * oldval = lock->slock; <--+ need atomic operation
  63. * lock->slock = 0; <--+
  64. * }
  65. */
  66. __asm__ __volatile__ (
  67. "# spin_trylock \n\t"
  68. "ldi %1, #0; \n\t"
  69. "mvfc %2, psw; \n\t"
  70. "clrpsw #0x40 -> nop; \n\t"
  71. DCACHE_CLEAR("%0", "r6", "%3")
  72. "lock %0, @%3; \n\t"
  73. "unlock %1, @%3; \n\t"
  74. "mvtc %2, psw; \n\t"
  75. : "=&r" (oldval), "=&r" (tmp1), "=&r" (tmp2)
  76. : "r" (&lock->slock)
  77. : "memory"
  78. #ifdef CONFIG_CHIP_M32700_TS1
  79. , "r6"
  80. #endif /* CONFIG_CHIP_M32700_TS1 */
  81. );
  82. return (oldval > 0);
  83. }
  84. static inline void _raw_spin_lock(spinlock_t *lock)
  85. {
  86. unsigned long tmp0, tmp1;
  87. #ifdef CONFIG_DEBUG_SPINLOCK
  88. if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
  89. printk("pc: %p\n", __builtin_return_address(0));
  90. BUG();
  91. }
  92. #endif
  93. /*
  94. * lock->slock : =1 : unlock
  95. * : <=0 : lock
  96. *
  97. * for ( ; ; ) {
  98. * lock->slock -= 1; <-- need atomic operation
  99. * if (lock->slock == 0) break;
  100. * for ( ; lock->slock <= 0 ; );
  101. * }
  102. */
  103. __asm__ __volatile__ (
  104. "# spin_lock \n\t"
  105. ".fillinsn \n"
  106. "1: \n\t"
  107. "mvfc %1, psw; \n\t"
  108. "clrpsw #0x40 -> nop; \n\t"
  109. DCACHE_CLEAR("%0", "r6", "%2")
  110. "lock %0, @%2; \n\t"
  111. "addi %0, #-1; \n\t"
  112. "unlock %0, @%2; \n\t"
  113. "mvtc %1, psw; \n\t"
  114. "bltz %0, 2f; \n\t"
  115. LOCK_SECTION_START(".balign 4 \n\t")
  116. ".fillinsn \n"
  117. "2: \n\t"
  118. "ld %0, @%2; \n\t"
  119. "bgtz %0, 1b; \n\t"
  120. "bra 2b; \n\t"
  121. LOCK_SECTION_END
  122. : "=&r" (tmp0), "=&r" (tmp1)
  123. : "r" (&lock->slock)
  124. : "memory"
  125. #ifdef CONFIG_CHIP_M32700_TS1
  126. , "r6"
  127. #endif /* CONFIG_CHIP_M32700_TS1 */
  128. );
  129. }
  130. static inline void _raw_spin_unlock(spinlock_t *lock)
  131. {
  132. #ifdef CONFIG_DEBUG_SPINLOCK
  133. BUG_ON(lock->magic != SPINLOCK_MAGIC);
  134. BUG_ON(!spin_is_locked(lock));
  135. #endif
  136. mb();
  137. lock->slock = 1;
  138. }
  139. /*
  140. * Read-write spinlocks, allowing multiple readers
  141. * but only one writer.
  142. *
  143. * NOTE! it is quite common to have readers in interrupts
  144. * but no interrupt writers. For those circumstances we
  145. * can "mix" irq-safe locks - any writer needs to get a
  146. * irq-safe write-lock, but readers can get non-irqsafe
  147. * read-locks.
  148. */
  149. typedef struct {
  150. volatile int lock;
  151. #ifdef CONFIG_DEBUG_SPINLOCK
  152. unsigned magic;
  153. #endif
  154. #ifdef CONFIG_PREEMPT
  155. unsigned int break_lock;
  156. #endif
  157. } rwlock_t;
  158. #define RWLOCK_MAGIC 0xdeaf1eed
  159. #ifdef CONFIG_DEBUG_SPINLOCK
  160. #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC
  161. #else
  162. #define RWLOCK_MAGIC_INIT /* */
  163. #endif
  164. #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
  165. #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
  166. /**
  167. * read_can_lock - would read_trylock() succeed?
  168. * @lock: the rwlock in question.
  169. */
  170. #define read_can_lock(x) ((int)(x)->lock > 0)
  171. /**
  172. * write_can_lock - would write_trylock() succeed?
  173. * @lock: the rwlock in question.
  174. */
  175. #define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
  176. /*
  177. * On x86, we implement read-write locks as a 32-bit counter
  178. * with the high bit (sign) being the "contended" bit.
  179. *
  180. * The inline assembly is non-obvious. Think about it.
  181. *
  182. * Changed to use the same technique as rw semaphores. See
  183. * semaphore.h for details. -ben
  184. */
  185. /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
  186. static inline void _raw_read_lock(rwlock_t *rw)
  187. {
  188. unsigned long tmp0, tmp1;
  189. #ifdef CONFIG_DEBUG_SPINLOCK
  190. BUG_ON(rw->magic != RWLOCK_MAGIC);
  191. #endif
  192. /*
  193. * rw->lock : >0 : unlock
  194. * : <=0 : lock
  195. *
  196. * for ( ; ; ) {
  197. * rw->lock -= 1; <-- need atomic operation
  198. * if (rw->lock >= 0) break;
  199. * rw->lock += 1; <-- need atomic operation
  200. * for ( ; rw->lock <= 0 ; );
  201. * }
  202. */
  203. __asm__ __volatile__ (
  204. "# read_lock \n\t"
  205. ".fillinsn \n"
  206. "1: \n\t"
  207. "mvfc %1, psw; \n\t"
  208. "clrpsw #0x40 -> nop; \n\t"
  209. DCACHE_CLEAR("%0", "r6", "%2")
  210. "lock %0, @%2; \n\t"
  211. "addi %0, #-1; \n\t"
  212. "unlock %0, @%2; \n\t"
  213. "mvtc %1, psw; \n\t"
  214. "bltz %0, 2f; \n\t"
  215. LOCK_SECTION_START(".balign 4 \n\t")
  216. ".fillinsn \n"
  217. "2: \n\t"
  218. "clrpsw #0x40 -> nop; \n\t"
  219. DCACHE_CLEAR("%0", "r6", "%2")
  220. "lock %0, @%2; \n\t"
  221. "addi %0, #1; \n\t"
  222. "unlock %0, @%2; \n\t"
  223. "mvtc %1, psw; \n\t"
  224. ".fillinsn \n"
  225. "3: \n\t"
  226. "ld %0, @%2; \n\t"
  227. "bgtz %0, 1b; \n\t"
  228. "bra 3b; \n\t"
  229. LOCK_SECTION_END
  230. : "=&r" (tmp0), "=&r" (tmp1)
  231. : "r" (&rw->lock)
  232. : "memory"
  233. #ifdef CONFIG_CHIP_M32700_TS1
  234. , "r6"
  235. #endif /* CONFIG_CHIP_M32700_TS1 */
  236. );
  237. }
  238. static inline void _raw_write_lock(rwlock_t *rw)
  239. {
  240. unsigned long tmp0, tmp1, tmp2;
  241. #ifdef CONFIG_DEBUG_SPINLOCK
  242. BUG_ON(rw->magic != RWLOCK_MAGIC);
  243. #endif
  244. /*
  245. * rw->lock : =RW_LOCK_BIAS_STR : unlock
  246. * : !=RW_LOCK_BIAS_STR : lock
  247. *
  248. * for ( ; ; ) {
  249. * rw->lock -= RW_LOCK_BIAS_STR; <-- need atomic operation
  250. * if (rw->lock == 0) break;
  251. * rw->lock += RW_LOCK_BIAS_STR; <-- need atomic operation
  252. * for ( ; rw->lock != RW_LOCK_BIAS_STR ; ) ;
  253. * }
  254. */
  255. __asm__ __volatile__ (
  256. "# write_lock \n\t"
  257. "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t"
  258. "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t"
  259. ".fillinsn \n"
  260. "1: \n\t"
  261. "mvfc %2, psw; \n\t"
  262. "clrpsw #0x40 -> nop; \n\t"
  263. DCACHE_CLEAR("%0", "r7", "%3")
  264. "lock %0, @%3; \n\t"
  265. "sub %0, %1; \n\t"
  266. "unlock %0, @%3; \n\t"
  267. "mvtc %2, psw; \n\t"
  268. "bnez %0, 2f; \n\t"
  269. LOCK_SECTION_START(".balign 4 \n\t")
  270. ".fillinsn \n"
  271. "2: \n\t"
  272. "clrpsw #0x40 -> nop; \n\t"
  273. DCACHE_CLEAR("%0", "r7", "%3")
  274. "lock %0, @%3; \n\t"
  275. "add %0, %1; \n\t"
  276. "unlock %0, @%3; \n\t"
  277. "mvtc %2, psw; \n\t"
  278. ".fillinsn \n"
  279. "3: \n\t"
  280. "ld %0, @%3; \n\t"
  281. "beq %0, %1, 1b; \n\t"
  282. "bra 3b; \n\t"
  283. LOCK_SECTION_END
  284. : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
  285. : "r" (&rw->lock)
  286. : "memory"
  287. #ifdef CONFIG_CHIP_M32700_TS1
  288. , "r7"
  289. #endif /* CONFIG_CHIP_M32700_TS1 */
  290. );
  291. }
  292. static inline void _raw_read_unlock(rwlock_t *rw)
  293. {
  294. unsigned long tmp0, tmp1;
  295. __asm__ __volatile__ (
  296. "# read_unlock \n\t"
  297. "mvfc %1, psw; \n\t"
  298. "clrpsw #0x40 -> nop; \n\t"
  299. DCACHE_CLEAR("%0", "r6", "%2")
  300. "lock %0, @%2; \n\t"
  301. "addi %0, #1; \n\t"
  302. "unlock %0, @%2; \n\t"
  303. "mvtc %1, psw; \n\t"
  304. : "=&r" (tmp0), "=&r" (tmp1)
  305. : "r" (&rw->lock)
  306. : "memory"
  307. #ifdef CONFIG_CHIP_M32700_TS1
  308. , "r6"
  309. #endif /* CONFIG_CHIP_M32700_TS1 */
  310. );
  311. }
  312. static inline void _raw_write_unlock(rwlock_t *rw)
  313. {
  314. unsigned long tmp0, tmp1, tmp2;
  315. __asm__ __volatile__ (
  316. "# write_unlock \n\t"
  317. "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t"
  318. "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t"
  319. "mvfc %2, psw; \n\t"
  320. "clrpsw #0x40 -> nop; \n\t"
  321. DCACHE_CLEAR("%0", "r7", "%3")
  322. "lock %0, @%3; \n\t"
  323. "add %0, %1; \n\t"
  324. "unlock %0, @%3; \n\t"
  325. "mvtc %2, psw; \n\t"
  326. : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
  327. : "r" (&rw->lock)
  328. : "memory"
  329. #ifdef CONFIG_CHIP_M32700_TS1
  330. , "r7"
  331. #endif /* CONFIG_CHIP_M32700_TS1 */
  332. );
  333. }
  334. #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
  335. static inline int _raw_write_trylock(rwlock_t *lock)
  336. {
  337. atomic_t *count = (atomic_t *)lock;
  338. if (atomic_sub_and_test(RW_LOCK_BIAS, count))
  339. return 1;
  340. atomic_add(RW_LOCK_BIAS, count);
  341. return 0;
  342. }
  343. #endif /* _ASM_M32R_SPINLOCK_H */