bitops.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. #ifndef _BLACKFIN_BITOPS_H
  2. #define _BLACKFIN_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. */
  6. #include <linux/compiler.h>
  7. #include <asm/byteorder.h> /* swab32 */
  8. #include <asm/system.h> /* save_flags */
  9. #ifdef __KERNEL__
  10. #include <asm-generic/bitops/ffs.h>
  11. #include <asm-generic/bitops/__ffs.h>
  12. #include <asm-generic/bitops/sched.h>
  13. #include <asm-generic/bitops/ffz.h>
  14. static __inline__ void set_bit(int nr, volatile unsigned long *addr)
  15. {
  16. int *a = (int *)addr;
  17. int mask;
  18. unsigned long flags;
  19. a += nr >> 5;
  20. mask = 1 << (nr & 0x1f);
  21. local_irq_save(flags);
  22. *a |= mask;
  23. local_irq_restore(flags);
  24. }
  25. static __inline__ void __set_bit(int nr, volatile unsigned long *addr)
  26. {
  27. int *a = (int *)addr;
  28. int mask;
  29. a += nr >> 5;
  30. mask = 1 << (nr & 0x1f);
  31. *a |= mask;
  32. }
  33. /*
  34. * clear_bit() doesn't provide any barrier for the compiler.
  35. */
  36. #define smp_mb__before_clear_bit() barrier()
  37. #define smp_mb__after_clear_bit() barrier()
  38. static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
  39. {
  40. int *a = (int *)addr;
  41. int mask;
  42. unsigned long flags;
  43. a += nr >> 5;
  44. mask = 1 << (nr & 0x1f);
  45. local_irq_save(flags);
  46. *a &= ~mask;
  47. local_irq_restore(flags);
  48. }
  49. static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
  50. {
  51. int *a = (int *)addr;
  52. int mask;
  53. a += nr >> 5;
  54. mask = 1 << (nr & 0x1f);
  55. *a &= ~mask;
  56. }
  57. static __inline__ void change_bit(int nr, volatile unsigned long *addr)
  58. {
  59. int mask, flags;
  60. unsigned long *ADDR = (unsigned long *)addr;
  61. ADDR += nr >> 5;
  62. mask = 1 << (nr & 31);
  63. local_irq_save(flags);
  64. *ADDR ^= mask;
  65. local_irq_restore(flags);
  66. }
  67. static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
  68. {
  69. int mask;
  70. unsigned long *ADDR = (unsigned long *)addr;
  71. ADDR += nr >> 5;
  72. mask = 1 << (nr & 31);
  73. *ADDR ^= mask;
  74. }
  75. static __inline__ int test_and_set_bit(int nr, void *addr)
  76. {
  77. int mask, retval;
  78. volatile unsigned int *a = (volatile unsigned int *)addr;
  79. unsigned long flags;
  80. a += nr >> 5;
  81. mask = 1 << (nr & 0x1f);
  82. local_irq_save(flags);
  83. retval = (mask & *a) != 0;
  84. *a |= mask;
  85. local_irq_restore(flags);
  86. return retval;
  87. }
  88. static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr)
  89. {
  90. int mask, retval;
  91. volatile unsigned int *a = (volatile unsigned int *)addr;
  92. a += nr >> 5;
  93. mask = 1 << (nr & 0x1f);
  94. retval = (mask & *a) != 0;
  95. *a |= mask;
  96. return retval;
  97. }
  98. static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr)
  99. {
  100. int mask, retval;
  101. volatile unsigned int *a = (volatile unsigned int *)addr;
  102. unsigned long flags;
  103. a += nr >> 5;
  104. mask = 1 << (nr & 0x1f);
  105. local_irq_save(flags);
  106. retval = (mask & *a) != 0;
  107. *a &= ~mask;
  108. local_irq_restore(flags);
  109. return retval;
  110. }
  111. static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
  112. {
  113. int mask, retval;
  114. volatile unsigned int *a = (volatile unsigned int *)addr;
  115. a += nr >> 5;
  116. mask = 1 << (nr & 0x1f);
  117. retval = (mask & *a) != 0;
  118. *a &= ~mask;
  119. return retval;
  120. }
  121. static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr)
  122. {
  123. int mask, retval;
  124. volatile unsigned int *a = (volatile unsigned int *)addr;
  125. unsigned long flags;
  126. a += nr >> 5;
  127. mask = 1 << (nr & 0x1f);
  128. local_irq_save(flags);
  129. retval = (mask & *a) != 0;
  130. *a ^= mask;
  131. local_irq_restore(flags);
  132. return retval;
  133. }
  134. static __inline__ int __test_and_change_bit(int nr,
  135. volatile unsigned long *addr)
  136. {
  137. int mask, retval;
  138. volatile unsigned int *a = (volatile unsigned int *)addr;
  139. a += nr >> 5;
  140. mask = 1 << (nr & 0x1f);
  141. retval = (mask & *a) != 0;
  142. *a ^= mask;
  143. return retval;
  144. }
  145. /*
  146. * This routine doesn't need to be atomic.
  147. */
  148. static __inline__ int __constant_test_bit(int nr, const void *addr)
  149. {
  150. return ((1UL << (nr & 31)) &
  151. (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
  152. }
  153. static __inline__ int __test_bit(int nr, const void *addr)
  154. {
  155. int *a = (int *)addr;
  156. int mask;
  157. a += nr >> 5;
  158. mask = 1 << (nr & 0x1f);
  159. return ((mask & *a) != 0);
  160. }
  161. #define test_bit(nr,addr) \
  162. (__builtin_constant_p(nr) ? \
  163. __constant_test_bit((nr),(addr)) : \
  164. __test_bit((nr),(addr)))
  165. #include <asm-generic/bitops/find.h>
  166. #include <asm-generic/bitops/hweight.h>
  167. #include <asm-generic/bitops/ext2-atomic.h>
  168. #include <asm-generic/bitops/ext2-non-atomic.h>
  169. #include <asm-generic/bitops/minix.h>
  170. #endif /* __KERNEL__ */
  171. #include <asm-generic/bitops/fls.h>
  172. #include <asm-generic/bitops/fls64.h>
  173. #endif /* _BLACKFIN_BITOPS_H */