bitops.h 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. #ifndef _ASM_M32R_BITOPS_H
  2. #define _ASM_M32R_BITOPS_H
  3. /*
  4. * linux/include/asm-m32r/bitops.h
  5. *
  6. * Copyright 1992, Linus Torvalds.
  7. *
  8. * M32R version:
  9. * Copyright (C) 2001, 2002 Hitoshi Yamamoto
  10. * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
  11. */
  12. #ifndef _LINUX_BITOPS_H
  13. #error only <linux/bitops.h> can be included directly
  14. #endif
  15. #include <linux/compiler.h>
  16. #include <linux/irqflags.h>
  17. #include <asm/assembler.h>
  18. #include <asm/byteorder.h>
  19. #include <asm/dcache_clear.h>
  20. #include <asm/types.h>
  21. /*
  22. * These have to be done with inline assembly: that way the bit-setting
  23. * is guaranteed to be atomic. All bit operations return 0 if the bit
  24. * was cleared before the operation and != 0 if it was not.
  25. *
  26. * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  27. */
  28. /**
  29. * set_bit - Atomically set a bit in memory
  30. * @nr: the bit to set
  31. * @addr: the address to start counting from
  32. *
  33. * This function is atomic and may not be reordered. See __set_bit()
  34. * if you do not require the atomic guarantees.
  35. * Note that @nr may be almost arbitrarily large; this function is not
  36. * restricted to acting on a single-word quantity.
  37. */
  38. static __inline__ void set_bit(int nr, volatile void * addr)
  39. {
  40. __u32 mask;
  41. volatile __u32 *a = addr;
  42. unsigned long flags;
  43. unsigned long tmp;
  44. a += (nr >> 5);
  45. mask = (1 << (nr & 0x1F));
  46. local_irq_save(flags);
  47. __asm__ __volatile__ (
  48. DCACHE_CLEAR("%0", "r6", "%1")
  49. M32R_LOCK" %0, @%1; \n\t"
  50. "or %0, %2; \n\t"
  51. M32R_UNLOCK" %0, @%1; \n\t"
  52. : "=&r" (tmp)
  53. : "r" (a), "r" (mask)
  54. : "memory"
  55. #ifdef CONFIG_CHIP_M32700_TS1
  56. , "r6"
  57. #endif /* CONFIG_CHIP_M32700_TS1 */
  58. );
  59. local_irq_restore(flags);
  60. }
  61. /**
  62. * clear_bit - Clears a bit in memory
  63. * @nr: Bit to clear
  64. * @addr: Address to start counting from
  65. *
  66. * clear_bit() is atomic and may not be reordered. However, it does
  67. * not contain a memory barrier, so if it is used for locking purposes,
  68. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  69. * in order to ensure changes are visible on other processors.
  70. */
  71. static __inline__ void clear_bit(int nr, volatile void * addr)
  72. {
  73. __u32 mask;
  74. volatile __u32 *a = addr;
  75. unsigned long flags;
  76. unsigned long tmp;
  77. a += (nr >> 5);
  78. mask = (1 << (nr & 0x1F));
  79. local_irq_save(flags);
  80. __asm__ __volatile__ (
  81. DCACHE_CLEAR("%0", "r6", "%1")
  82. M32R_LOCK" %0, @%1; \n\t"
  83. "and %0, %2; \n\t"
  84. M32R_UNLOCK" %0, @%1; \n\t"
  85. : "=&r" (tmp)
  86. : "r" (a), "r" (~mask)
  87. : "memory"
  88. #ifdef CONFIG_CHIP_M32700_TS1
  89. , "r6"
  90. #endif /* CONFIG_CHIP_M32700_TS1 */
  91. );
  92. local_irq_restore(flags);
  93. }
  94. #define smp_mb__before_clear_bit() barrier()
  95. #define smp_mb__after_clear_bit() barrier()
  96. /**
  97. * change_bit - Toggle a bit in memory
  98. * @nr: Bit to clear
  99. * @addr: Address to start counting from
  100. *
  101. * change_bit() is atomic and may not be reordered.
  102. * Note that @nr may be almost arbitrarily large; this function is not
  103. * restricted to acting on a single-word quantity.
  104. */
  105. static __inline__ void change_bit(int nr, volatile void * addr)
  106. {
  107. __u32 mask;
  108. volatile __u32 *a = addr;
  109. unsigned long flags;
  110. unsigned long tmp;
  111. a += (nr >> 5);
  112. mask = (1 << (nr & 0x1F));
  113. local_irq_save(flags);
  114. __asm__ __volatile__ (
  115. DCACHE_CLEAR("%0", "r6", "%1")
  116. M32R_LOCK" %0, @%1; \n\t"
  117. "xor %0, %2; \n\t"
  118. M32R_UNLOCK" %0, @%1; \n\t"
  119. : "=&r" (tmp)
  120. : "r" (a), "r" (mask)
  121. : "memory"
  122. #ifdef CONFIG_CHIP_M32700_TS1
  123. , "r6"
  124. #endif /* CONFIG_CHIP_M32700_TS1 */
  125. );
  126. local_irq_restore(flags);
  127. }
  128. /**
  129. * test_and_set_bit - Set a bit and return its old value
  130. * @nr: Bit to set
  131. * @addr: Address to count from
  132. *
  133. * This operation is atomic and cannot be reordered.
  134. * It also implies a memory barrier.
  135. */
  136. static __inline__ int test_and_set_bit(int nr, volatile void * addr)
  137. {
  138. __u32 mask, oldbit;
  139. volatile __u32 *a = addr;
  140. unsigned long flags;
  141. unsigned long tmp;
  142. a += (nr >> 5);
  143. mask = (1 << (nr & 0x1F));
  144. local_irq_save(flags);
  145. __asm__ __volatile__ (
  146. DCACHE_CLEAR("%0", "%1", "%2")
  147. M32R_LOCK" %0, @%2; \n\t"
  148. "mv %1, %0; \n\t"
  149. "and %0, %3; \n\t"
  150. "or %1, %3; \n\t"
  151. M32R_UNLOCK" %1, @%2; \n\t"
  152. : "=&r" (oldbit), "=&r" (tmp)
  153. : "r" (a), "r" (mask)
  154. : "memory"
  155. );
  156. local_irq_restore(flags);
  157. return (oldbit != 0);
  158. }
  159. /**
  160. * test_and_clear_bit - Clear a bit and return its old value
  161. * @nr: Bit to set
  162. * @addr: Address to count from
  163. *
  164. * This operation is atomic and cannot be reordered.
  165. * It also implies a memory barrier.
  166. */
  167. static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  168. {
  169. __u32 mask, oldbit;
  170. volatile __u32 *a = addr;
  171. unsigned long flags;
  172. unsigned long tmp;
  173. a += (nr >> 5);
  174. mask = (1 << (nr & 0x1F));
  175. local_irq_save(flags);
  176. __asm__ __volatile__ (
  177. DCACHE_CLEAR("%0", "%1", "%3")
  178. M32R_LOCK" %0, @%3; \n\t"
  179. "mv %1, %0; \n\t"
  180. "and %0, %2; \n\t"
  181. "not %2, %2; \n\t"
  182. "and %1, %2; \n\t"
  183. M32R_UNLOCK" %1, @%3; \n\t"
  184. : "=&r" (oldbit), "=&r" (tmp), "+r" (mask)
  185. : "r" (a)
  186. : "memory"
  187. );
  188. local_irq_restore(flags);
  189. return (oldbit != 0);
  190. }
  191. /**
  192. * test_and_change_bit - Change a bit and return its old value
  193. * @nr: Bit to set
  194. * @addr: Address to count from
  195. *
  196. * This operation is atomic and cannot be reordered.
  197. * It also implies a memory barrier.
  198. */
  199. static __inline__ int test_and_change_bit(int nr, volatile void * addr)
  200. {
  201. __u32 mask, oldbit;
  202. volatile __u32 *a = addr;
  203. unsigned long flags;
  204. unsigned long tmp;
  205. a += (nr >> 5);
  206. mask = (1 << (nr & 0x1F));
  207. local_irq_save(flags);
  208. __asm__ __volatile__ (
  209. DCACHE_CLEAR("%0", "%1", "%2")
  210. M32R_LOCK" %0, @%2; \n\t"
  211. "mv %1, %0; \n\t"
  212. "and %0, %3; \n\t"
  213. "xor %1, %3; \n\t"
  214. M32R_UNLOCK" %1, @%2; \n\t"
  215. : "=&r" (oldbit), "=&r" (tmp)
  216. : "r" (a), "r" (mask)
  217. : "memory"
  218. );
  219. local_irq_restore(flags);
  220. return (oldbit != 0);
  221. }
  222. #include <asm-generic/bitops/non-atomic.h>
  223. #include <asm-generic/bitops/ffz.h>
  224. #include <asm-generic/bitops/__ffs.h>
  225. #include <asm-generic/bitops/fls.h>
  226. #include <asm-generic/bitops/__fls.h>
  227. #include <asm-generic/bitops/fls64.h>
  228. #ifdef __KERNEL__
  229. #include <asm-generic/bitops/sched.h>
  230. #include <asm-generic/bitops/find.h>
  231. #include <asm-generic/bitops/ffs.h>
  232. #include <asm-generic/bitops/hweight.h>
  233. #include <asm-generic/bitops/lock.h>
  234. #endif /* __KERNEL__ */
  235. #ifdef __KERNEL__
  236. #include <asm-generic/bitops/le.h>
  237. #include <asm-generic/bitops/ext2-atomic.h>
  238. #endif /* __KERNEL__ */
  239. #endif /* _ASM_M32R_BITOPS_H */