bitops.h 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269
  1. #ifndef _ASM_M32R_BITOPS_H
  2. #define _ASM_M32R_BITOPS_H
  3. /*
  4. * linux/include/asm-m32r/bitops.h
  5. *
  6. * Copyright 1992, Linus Torvalds.
  7. *
  8. * M32R version:
  9. * Copyright (C) 2001, 2002 Hitoshi Yamamoto
  10. * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
  11. */
  12. #include <linux/compiler.h>
  13. #include <asm/assembler.h>
  14. #include <asm/system.h>
  15. #include <asm/byteorder.h>
  16. #include <asm/types.h>
  17. /*
  18. * These have to be done with inline assembly: that way the bit-setting
  19. * is guaranteed to be atomic. All bit operations return 0 if the bit
  20. * was cleared before the operation and != 0 if it was not.
  21. *
  22. * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  23. */
  24. /**
  25. * set_bit - Atomically set a bit in memory
  26. * @nr: the bit to set
  27. * @addr: the address to start counting from
  28. *
  29. * This function is atomic and may not be reordered. See __set_bit()
  30. * if you do not require the atomic guarantees.
  31. * Note that @nr may be almost arbitrarily large; this function is not
  32. * restricted to acting on a single-word quantity.
  33. */
  34. static __inline__ void set_bit(int nr, volatile void * addr)
  35. {
  36. __u32 mask;
  37. volatile __u32 *a = addr;
  38. unsigned long flags;
  39. unsigned long tmp;
  40. a += (nr >> 5);
  41. mask = (1 << (nr & 0x1F));
  42. local_irq_save(flags);
  43. __asm__ __volatile__ (
  44. DCACHE_CLEAR("%0", "r6", "%1")
  45. M32R_LOCK" %0, @%1; \n\t"
  46. "or %0, %2; \n\t"
  47. M32R_UNLOCK" %0, @%1; \n\t"
  48. : "=&r" (tmp)
  49. : "r" (a), "r" (mask)
  50. : "memory"
  51. #ifdef CONFIG_CHIP_M32700_TS1
  52. , "r6"
  53. #endif /* CONFIG_CHIP_M32700_TS1 */
  54. );
  55. local_irq_restore(flags);
  56. }
  57. /**
  58. * clear_bit - Clears a bit in memory
  59. * @nr: Bit to clear
  60. * @addr: Address to start counting from
  61. *
  62. * clear_bit() is atomic and may not be reordered. However, it does
  63. * not contain a memory barrier, so if it is used for locking purposes,
  64. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  65. * in order to ensure changes are visible on other processors.
  66. */
  67. static __inline__ void clear_bit(int nr, volatile void * addr)
  68. {
  69. __u32 mask;
  70. volatile __u32 *a = addr;
  71. unsigned long flags;
  72. unsigned long tmp;
  73. a += (nr >> 5);
  74. mask = (1 << (nr & 0x1F));
  75. local_irq_save(flags);
  76. __asm__ __volatile__ (
  77. DCACHE_CLEAR("%0", "r6", "%1")
  78. M32R_LOCK" %0, @%1; \n\t"
  79. "and %0, %2; \n\t"
  80. M32R_UNLOCK" %0, @%1; \n\t"
  81. : "=&r" (tmp)
  82. : "r" (a), "r" (~mask)
  83. : "memory"
  84. #ifdef CONFIG_CHIP_M32700_TS1
  85. , "r6"
  86. #endif /* CONFIG_CHIP_M32700_TS1 */
  87. );
  88. local_irq_restore(flags);
  89. }
  90. #define smp_mb__before_clear_bit() barrier()
  91. #define smp_mb__after_clear_bit() barrier()
  92. /**
  93. * change_bit - Toggle a bit in memory
  94. * @nr: Bit to clear
  95. * @addr: Address to start counting from
  96. *
  97. * change_bit() is atomic and may not be reordered.
  98. * Note that @nr may be almost arbitrarily large; this function is not
  99. * restricted to acting on a single-word quantity.
  100. */
  101. static __inline__ void change_bit(int nr, volatile void * addr)
  102. {
  103. __u32 mask;
  104. volatile __u32 *a = addr;
  105. unsigned long flags;
  106. unsigned long tmp;
  107. a += (nr >> 5);
  108. mask = (1 << (nr & 0x1F));
  109. local_irq_save(flags);
  110. __asm__ __volatile__ (
  111. DCACHE_CLEAR("%0", "r6", "%1")
  112. M32R_LOCK" %0, @%1; \n\t"
  113. "xor %0, %2; \n\t"
  114. M32R_UNLOCK" %0, @%1; \n\t"
  115. : "=&r" (tmp)
  116. : "r" (a), "r" (mask)
  117. : "memory"
  118. #ifdef CONFIG_CHIP_M32700_TS1
  119. , "r6"
  120. #endif /* CONFIG_CHIP_M32700_TS1 */
  121. );
  122. local_irq_restore(flags);
  123. }
  124. /**
  125. * test_and_set_bit - Set a bit and return its old value
  126. * @nr: Bit to set
  127. * @addr: Address to count from
  128. *
  129. * This operation is atomic and cannot be reordered.
  130. * It also implies a memory barrier.
  131. */
  132. static __inline__ int test_and_set_bit(int nr, volatile void * addr)
  133. {
  134. __u32 mask, oldbit;
  135. volatile __u32 *a = addr;
  136. unsigned long flags;
  137. unsigned long tmp;
  138. a += (nr >> 5);
  139. mask = (1 << (nr & 0x1F));
  140. local_irq_save(flags);
  141. __asm__ __volatile__ (
  142. DCACHE_CLEAR("%0", "%1", "%2")
  143. M32R_LOCK" %0, @%2; \n\t"
  144. "mv %1, %0; \n\t"
  145. "and %0, %3; \n\t"
  146. "or %1, %3; \n\t"
  147. M32R_UNLOCK" %1, @%2; \n\t"
  148. : "=&r" (oldbit), "=&r" (tmp)
  149. : "r" (a), "r" (mask)
  150. : "memory"
  151. );
  152. local_irq_restore(flags);
  153. return (oldbit != 0);
  154. }
  155. /**
  156. * test_and_clear_bit - Clear a bit and return its old value
  157. * @nr: Bit to set
  158. * @addr: Address to count from
  159. *
  160. * This operation is atomic and cannot be reordered.
  161. * It also implies a memory barrier.
  162. */
  163. static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  164. {
  165. __u32 mask, oldbit;
  166. volatile __u32 *a = addr;
  167. unsigned long flags;
  168. unsigned long tmp;
  169. a += (nr >> 5);
  170. mask = (1 << (nr & 0x1F));
  171. local_irq_save(flags);
  172. __asm__ __volatile__ (
  173. DCACHE_CLEAR("%0", "%1", "%3")
  174. M32R_LOCK" %0, @%3; \n\t"
  175. "mv %1, %0; \n\t"
  176. "and %0, %2; \n\t"
  177. "not %2, %2; \n\t"
  178. "and %1, %2; \n\t"
  179. M32R_UNLOCK" %1, @%3; \n\t"
  180. : "=&r" (oldbit), "=&r" (tmp), "+r" (mask)
  181. : "r" (a)
  182. : "memory"
  183. );
  184. local_irq_restore(flags);
  185. return (oldbit != 0);
  186. }
  187. /**
  188. * test_and_change_bit - Change a bit and return its old value
  189. * @nr: Bit to set
  190. * @addr: Address to count from
  191. *
  192. * This operation is atomic and cannot be reordered.
  193. * It also implies a memory barrier.
  194. */
  195. static __inline__ int test_and_change_bit(int nr, volatile void * addr)
  196. {
  197. __u32 mask, oldbit;
  198. volatile __u32 *a = addr;
  199. unsigned long flags;
  200. unsigned long tmp;
  201. a += (nr >> 5);
  202. mask = (1 << (nr & 0x1F));
  203. local_irq_save(flags);
  204. __asm__ __volatile__ (
  205. DCACHE_CLEAR("%0", "%1", "%2")
  206. M32R_LOCK" %0, @%2; \n\t"
  207. "mv %1, %0; \n\t"
  208. "and %0, %3; \n\t"
  209. "xor %1, %3; \n\t"
  210. M32R_UNLOCK" %1, @%2; \n\t"
  211. : "=&r" (oldbit), "=&r" (tmp)
  212. : "r" (a), "r" (mask)
  213. : "memory"
  214. );
  215. local_irq_restore(flags);
  216. return (oldbit != 0);
  217. }
  218. #include <asm-generic/bitops/non-atomic.h>
  219. #include <asm-generic/bitops/ffz.h>
  220. #include <asm-generic/bitops/__ffs.h>
  221. #include <asm-generic/bitops/fls.h>
  222. #include <asm-generic/bitops/fls64.h>
  223. #ifdef __KERNEL__
  224. #include <asm-generic/bitops/sched.h>
  225. #include <asm-generic/bitops/find.h>
  226. #include <asm-generic/bitops/ffs.h>
  227. #include <asm-generic/bitops/hweight.h>
  228. #endif /* __KERNEL__ */
  229. #ifdef __KERNEL__
  230. #include <asm-generic/bitops/ext2-non-atomic.h>
  231. #include <asm-generic/bitops/ext2-atomic.h>
  232. #include <asm-generic/bitops/minix.h>
  233. #endif /* __KERNEL__ */
  234. #endif /* _ASM_M32R_BITOPS_H */