bitops.h 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. #ifndef _ASM_M32R_BITOPS_H
  2. #define _ASM_M32R_BITOPS_H
  3. /*
  4. * linux/include/asm-m32r/bitops.h
  5. *
  6. * Copyright 1992, Linus Torvalds.
  7. *
  8. * M32R version:
  9. * Copyright (C) 2001, 2002 Hitoshi Yamamoto
  10. * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
  11. */
  12. #include <linux/config.h>
  13. #include <linux/compiler.h>
  14. #include <asm/assembler.h>
  15. #include <asm/system.h>
  16. #include <asm/byteorder.h>
  17. #include <asm/types.h>
  18. /*
  19. * These have to be done with inline assembly: that way the bit-setting
  20. * is guaranteed to be atomic. All bit operations return 0 if the bit
  21. * was cleared before the operation and != 0 if it was not.
  22. *
  23. * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  24. */
  25. /**
  26. * set_bit - Atomically set a bit in memory
  27. * @nr: the bit to set
  28. * @addr: the address to start counting from
  29. *
  30. * This function is atomic and may not be reordered. See __set_bit()
  31. * if you do not require the atomic guarantees.
  32. * Note that @nr may be almost arbitrarily large; this function is not
  33. * restricted to acting on a single-word quantity.
  34. */
  35. static __inline__ void set_bit(int nr, volatile void * addr)
  36. {
  37. __u32 mask;
  38. volatile __u32 *a = addr;
  39. unsigned long flags;
  40. unsigned long tmp;
  41. a += (nr >> 5);
  42. mask = (1 << (nr & 0x1F));
  43. local_irq_save(flags);
  44. __asm__ __volatile__ (
  45. DCACHE_CLEAR("%0", "r6", "%1")
  46. M32R_LOCK" %0, @%1; \n\t"
  47. "or %0, %2; \n\t"
  48. M32R_UNLOCK" %0, @%1; \n\t"
  49. : "=&r" (tmp)
  50. : "r" (a), "r" (mask)
  51. : "memory"
  52. #ifdef CONFIG_CHIP_M32700_TS1
  53. , "r6"
  54. #endif /* CONFIG_CHIP_M32700_TS1 */
  55. );
  56. local_irq_restore(flags);
  57. }
  58. /**
  59. * clear_bit - Clears a bit in memory
  60. * @nr: Bit to clear
  61. * @addr: Address to start counting from
  62. *
  63. * clear_bit() is atomic and may not be reordered. However, it does
  64. * not contain a memory barrier, so if it is used for locking purposes,
  65. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  66. * in order to ensure changes are visible on other processors.
  67. */
  68. static __inline__ void clear_bit(int nr, volatile void * addr)
  69. {
  70. __u32 mask;
  71. volatile __u32 *a = addr;
  72. unsigned long flags;
  73. unsigned long tmp;
  74. a += (nr >> 5);
  75. mask = (1 << (nr & 0x1F));
  76. local_irq_save(flags);
  77. __asm__ __volatile__ (
  78. DCACHE_CLEAR("%0", "r6", "%1")
  79. M32R_LOCK" %0, @%1; \n\t"
  80. "and %0, %2; \n\t"
  81. M32R_UNLOCK" %0, @%1; \n\t"
  82. : "=&r" (tmp)
  83. : "r" (a), "r" (~mask)
  84. : "memory"
  85. #ifdef CONFIG_CHIP_M32700_TS1
  86. , "r6"
  87. #endif /* CONFIG_CHIP_M32700_TS1 */
  88. );
  89. local_irq_restore(flags);
  90. }
  91. #define smp_mb__before_clear_bit() barrier()
  92. #define smp_mb__after_clear_bit() barrier()
  93. /**
  94. * change_bit - Toggle a bit in memory
  95. * @nr: Bit to clear
  96. * @addr: Address to start counting from
  97. *
  98. * change_bit() is atomic and may not be reordered.
  99. * Note that @nr may be almost arbitrarily large; this function is not
  100. * restricted to acting on a single-word quantity.
  101. */
  102. static __inline__ void change_bit(int nr, volatile void * addr)
  103. {
  104. __u32 mask;
  105. volatile __u32 *a = addr;
  106. unsigned long flags;
  107. unsigned long tmp;
  108. a += (nr >> 5);
  109. mask = (1 << (nr & 0x1F));
  110. local_irq_save(flags);
  111. __asm__ __volatile__ (
  112. DCACHE_CLEAR("%0", "r6", "%1")
  113. M32R_LOCK" %0, @%1; \n\t"
  114. "xor %0, %2; \n\t"
  115. M32R_UNLOCK" %0, @%1; \n\t"
  116. : "=&r" (tmp)
  117. : "r" (a), "r" (mask)
  118. : "memory"
  119. #ifdef CONFIG_CHIP_M32700_TS1
  120. , "r6"
  121. #endif /* CONFIG_CHIP_M32700_TS1 */
  122. );
  123. local_irq_restore(flags);
  124. }
  125. /**
  126. * test_and_set_bit - Set a bit and return its old value
  127. * @nr: Bit to set
  128. * @addr: Address to count from
  129. *
  130. * This operation is atomic and cannot be reordered.
  131. * It also implies a memory barrier.
  132. */
  133. static __inline__ int test_and_set_bit(int nr, volatile void * addr)
  134. {
  135. __u32 mask, oldbit;
  136. volatile __u32 *a = addr;
  137. unsigned long flags;
  138. unsigned long tmp;
  139. a += (nr >> 5);
  140. mask = (1 << (nr & 0x1F));
  141. local_irq_save(flags);
  142. __asm__ __volatile__ (
  143. DCACHE_CLEAR("%0", "%1", "%2")
  144. M32R_LOCK" %0, @%2; \n\t"
  145. "mv %1, %0; \n\t"
  146. "and %0, %3; \n\t"
  147. "or %1, %3; \n\t"
  148. M32R_UNLOCK" %1, @%2; \n\t"
  149. : "=&r" (oldbit), "=&r" (tmp)
  150. : "r" (a), "r" (mask)
  151. : "memory"
  152. );
  153. local_irq_restore(flags);
  154. return (oldbit != 0);
  155. }
  156. /**
  157. * test_and_clear_bit - Clear a bit and return its old value
  158. * @nr: Bit to set
  159. * @addr: Address to count from
  160. *
  161. * This operation is atomic and cannot be reordered.
  162. * It also implies a memory barrier.
  163. */
  164. static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  165. {
  166. __u32 mask, oldbit;
  167. volatile __u32 *a = addr;
  168. unsigned long flags;
  169. unsigned long tmp;
  170. a += (nr >> 5);
  171. mask = (1 << (nr & 0x1F));
  172. local_irq_save(flags);
  173. __asm__ __volatile__ (
  174. DCACHE_CLEAR("%0", "%1", "%3")
  175. M32R_LOCK" %0, @%3; \n\t"
  176. "mv %1, %0; \n\t"
  177. "and %0, %2; \n\t"
  178. "not %2, %2; \n\t"
  179. "and %1, %2; \n\t"
  180. M32R_UNLOCK" %1, @%3; \n\t"
  181. : "=&r" (oldbit), "=&r" (tmp), "+r" (mask)
  182. : "r" (a)
  183. : "memory"
  184. );
  185. local_irq_restore(flags);
  186. return (oldbit != 0);
  187. }
  188. /**
  189. * test_and_change_bit - Change a bit and return its old value
  190. * @nr: Bit to set
  191. * @addr: Address to count from
  192. *
  193. * This operation is atomic and cannot be reordered.
  194. * It also implies a memory barrier.
  195. */
  196. static __inline__ int test_and_change_bit(int nr, volatile void * addr)
  197. {
  198. __u32 mask, oldbit;
  199. volatile __u32 *a = addr;
  200. unsigned long flags;
  201. unsigned long tmp;
  202. a += (nr >> 5);
  203. mask = (1 << (nr & 0x1F));
  204. local_irq_save(flags);
  205. __asm__ __volatile__ (
  206. DCACHE_CLEAR("%0", "%1", "%2")
  207. M32R_LOCK" %0, @%2; \n\t"
  208. "mv %1, %0; \n\t"
  209. "and %0, %3; \n\t"
  210. "xor %1, %3; \n\t"
  211. M32R_UNLOCK" %1, @%2; \n\t"
  212. : "=&r" (oldbit), "=&r" (tmp)
  213. : "r" (a), "r" (mask)
  214. : "memory"
  215. );
  216. local_irq_restore(flags);
  217. return (oldbit != 0);
  218. }
  219. #include <asm-generic/bitops/non-atomic.h>
  220. #include <asm-generic/bitops/ffz.h>
  221. #include <asm-generic/bitops/__ffs.h>
  222. #include <asm-generic/bitops/fls.h>
  223. #include <asm-generic/bitops/fls64.h>
  224. #ifdef __KERNEL__
  225. #include <asm-generic/bitops/sched.h>
  226. #include <asm-generic/bitops/find.h>
  227. #include <asm-generic/bitops/ffs.h>
  228. #include <asm-generic/bitops/hweight.h>
  229. #endif /* __KERNEL__ */
  230. #ifdef __KERNEL__
  231. #include <asm-generic/bitops/ext2-non-atomic.h>
  232. #include <asm-generic/bitops/ext2-atomic.h>
  233. #include <asm-generic/bitops/minix.h>
  234. #endif /* __KERNEL__ */
  235. #endif /* _ASM_M32R_BITOPS_H */