bitops.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. #ifndef _BLACKFIN_BITOPS_H
  2. #define _BLACKFIN_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. */
  6. #include <linux/compiler.h>
  7. #include <asm/byteorder.h> /* swab32 */
  8. #include <asm/system.h> /* save_flags */
  9. #ifdef __KERNEL__
  10. #ifndef _LINUX_BITOPS_H
  11. #error only <linux/bitops.h> can be included directly
  12. #endif
  13. #include <asm-generic/bitops/ffs.h>
  14. #include <asm-generic/bitops/__ffs.h>
  15. #include <asm-generic/bitops/sched.h>
  16. #include <asm-generic/bitops/ffz.h>
  17. static __inline__ void set_bit(int nr, volatile unsigned long *addr)
  18. {
  19. int *a = (int *)addr;
  20. int mask;
  21. unsigned long flags;
  22. a += nr >> 5;
  23. mask = 1 << (nr & 0x1f);
  24. local_irq_save(flags);
  25. *a |= mask;
  26. local_irq_restore(flags);
  27. }
  28. static __inline__ void __set_bit(int nr, volatile unsigned long *addr)
  29. {
  30. int *a = (int *)addr;
  31. int mask;
  32. a += nr >> 5;
  33. mask = 1 << (nr & 0x1f);
  34. *a |= mask;
  35. }
  36. /*
  37. * clear_bit() doesn't provide any barrier for the compiler.
  38. */
  39. #define smp_mb__before_clear_bit() barrier()
  40. #define smp_mb__after_clear_bit() barrier()
  41. static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
  42. {
  43. int *a = (int *)addr;
  44. int mask;
  45. unsigned long flags;
  46. a += nr >> 5;
  47. mask = 1 << (nr & 0x1f);
  48. local_irq_save(flags);
  49. *a &= ~mask;
  50. local_irq_restore(flags);
  51. }
  52. static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
  53. {
  54. int *a = (int *)addr;
  55. int mask;
  56. a += nr >> 5;
  57. mask = 1 << (nr & 0x1f);
  58. *a &= ~mask;
  59. }
  60. static __inline__ void change_bit(int nr, volatile unsigned long *addr)
  61. {
  62. int mask, flags;
  63. unsigned long *ADDR = (unsigned long *)addr;
  64. ADDR += nr >> 5;
  65. mask = 1 << (nr & 31);
  66. local_irq_save(flags);
  67. *ADDR ^= mask;
  68. local_irq_restore(flags);
  69. }
  70. static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
  71. {
  72. int mask;
  73. unsigned long *ADDR = (unsigned long *)addr;
  74. ADDR += nr >> 5;
  75. mask = 1 << (nr & 31);
  76. *ADDR ^= mask;
  77. }
  78. static __inline__ int test_and_set_bit(int nr, void *addr)
  79. {
  80. int mask, retval;
  81. volatile unsigned int *a = (volatile unsigned int *)addr;
  82. unsigned long flags;
  83. a += nr >> 5;
  84. mask = 1 << (nr & 0x1f);
  85. local_irq_save(flags);
  86. retval = (mask & *a) != 0;
  87. *a |= mask;
  88. local_irq_restore(flags);
  89. return retval;
  90. }
  91. static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr)
  92. {
  93. int mask, retval;
  94. volatile unsigned int *a = (volatile unsigned int *)addr;
  95. a += nr >> 5;
  96. mask = 1 << (nr & 0x1f);
  97. retval = (mask & *a) != 0;
  98. *a |= mask;
  99. return retval;
  100. }
  101. static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr)
  102. {
  103. int mask, retval;
  104. volatile unsigned int *a = (volatile unsigned int *)addr;
  105. unsigned long flags;
  106. a += nr >> 5;
  107. mask = 1 << (nr & 0x1f);
  108. local_irq_save(flags);
  109. retval = (mask & *a) != 0;
  110. *a &= ~mask;
  111. local_irq_restore(flags);
  112. return retval;
  113. }
  114. static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
  115. {
  116. int mask, retval;
  117. volatile unsigned int *a = (volatile unsigned int *)addr;
  118. a += nr >> 5;
  119. mask = 1 << (nr & 0x1f);
  120. retval = (mask & *a) != 0;
  121. *a &= ~mask;
  122. return retval;
  123. }
  124. static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr)
  125. {
  126. int mask, retval;
  127. volatile unsigned int *a = (volatile unsigned int *)addr;
  128. unsigned long flags;
  129. a += nr >> 5;
  130. mask = 1 << (nr & 0x1f);
  131. local_irq_save(flags);
  132. retval = (mask & *a) != 0;
  133. *a ^= mask;
  134. local_irq_restore(flags);
  135. return retval;
  136. }
  137. static __inline__ int __test_and_change_bit(int nr,
  138. volatile unsigned long *addr)
  139. {
  140. int mask, retval;
  141. volatile unsigned int *a = (volatile unsigned int *)addr;
  142. a += nr >> 5;
  143. mask = 1 << (nr & 0x1f);
  144. retval = (mask & *a) != 0;
  145. *a ^= mask;
  146. return retval;
  147. }
  148. /*
  149. * This routine doesn't need to be atomic.
  150. */
  151. static __inline__ int __constant_test_bit(int nr, const void *addr)
  152. {
  153. return ((1UL << (nr & 31)) &
  154. (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
  155. }
  156. static __inline__ int __test_bit(int nr, const void *addr)
  157. {
  158. int *a = (int *)addr;
  159. int mask;
  160. a += nr >> 5;
  161. mask = 1 << (nr & 0x1f);
  162. return ((mask & *a) != 0);
  163. }
  164. #define test_bit(nr,addr) \
  165. (__builtin_constant_p(nr) ? \
  166. __constant_test_bit((nr),(addr)) : \
  167. __test_bit((nr),(addr)))
  168. #include <asm-generic/bitops/find.h>
  169. #include <asm-generic/bitops/hweight.h>
  170. #include <asm-generic/bitops/lock.h>
  171. #include <asm-generic/bitops/ext2-atomic.h>
  172. #include <asm-generic/bitops/ext2-non-atomic.h>
  173. #include <asm-generic/bitops/minix.h>
  174. #endif /* __KERNEL__ */
  175. #include <asm-generic/bitops/fls.h>
  176. #include <asm-generic/bitops/fls64.h>
  177. #endif /* _BLACKFIN_BITOPS_H */