bitops.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 1994 - 1997, 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
  7. * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
  8. */
  9. #ifndef _ASM_BITOPS_H
  10. #define _ASM_BITOPS_H
  11. #include <linux/compiler.h>
  12. #include <linux/irqflags.h>
  13. #include <linux/types.h>
  14. #include <asm/barrier.h>
  15. #include <asm/bug.h>
  16. #include <asm/byteorder.h> /* sigh ... */
  17. #include <asm/cpu-features.h>
  18. #include <asm/sgidefs.h>
  19. #include <asm/war.h>
  20. #if (_MIPS_SZLONG == 32)
  21. #define SZLONG_LOG 5
  22. #define SZLONG_MASK 31UL
  23. #define __LL "ll "
  24. #define __SC "sc "
  25. #elif (_MIPS_SZLONG == 64)
  26. #define SZLONG_LOG 6
  27. #define SZLONG_MASK 63UL
  28. #define __LL "lld "
  29. #define __SC "scd "
  30. #endif
  31. /*
  32. * clear_bit() doesn't provide any barrier for the compiler.
  33. */
  34. #define smp_mb__before_clear_bit() smp_mb()
  35. #define smp_mb__after_clear_bit() smp_mb()
  36. /*
  37. * set_bit - Atomically set a bit in memory
  38. * @nr: the bit to set
  39. * @addr: the address to start counting from
  40. *
  41. * This function is atomic and may not be reordered. See __set_bit()
  42. * if you do not require the atomic guarantees.
  43. * Note that @nr may be almost arbitrarily large; this function is not
  44. * restricted to acting on a single-word quantity.
  45. */
  46. static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
  47. {
  48. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  49. unsigned long temp;
  50. if (cpu_has_llsc && R10000_LLSC_WAR) {
  51. __asm__ __volatile__(
  52. " .set mips3 \n"
  53. "1: " __LL "%0, %1 # set_bit \n"
  54. " or %0, %2 \n"
  55. " " __SC "%0, %1 \n"
  56. " beqzl %0, 1b \n"
  57. " .set mips0 \n"
  58. : "=&r" (temp), "=m" (*m)
  59. : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
  60. } else if (cpu_has_llsc) {
  61. __asm__ __volatile__(
  62. " .set mips3 \n"
  63. "1: " __LL "%0, %1 # set_bit \n"
  64. " or %0, %2 \n"
  65. " " __SC "%0, %1 \n"
  66. " beqz %0, 1b \n"
  67. " .set mips0 \n"
  68. : "=&r" (temp), "=m" (*m)
  69. : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
  70. } else {
  71. volatile unsigned long *a = addr;
  72. unsigned long mask;
  73. unsigned long flags;
  74. a += nr >> SZLONG_LOG;
  75. mask = 1UL << (nr & SZLONG_MASK);
  76. local_irq_save(flags);
  77. *a |= mask;
  78. local_irq_restore(flags);
  79. }
  80. }
  81. /*
  82. * clear_bit - Clears a bit in memory
  83. * @nr: Bit to clear
  84. * @addr: Address to start counting from
  85. *
  86. * clear_bit() is atomic and may not be reordered. However, it does
  87. * not contain a memory barrier, so if it is used for locking purposes,
  88. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  89. * in order to ensure changes are visible on other processors.
  90. */
  91. static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
  92. {
  93. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  94. unsigned long temp;
  95. if (cpu_has_llsc && R10000_LLSC_WAR) {
  96. __asm__ __volatile__(
  97. " .set mips3 \n"
  98. "1: " __LL "%0, %1 # clear_bit \n"
  99. " and %0, %2 \n"
  100. " " __SC "%0, %1 \n"
  101. " beqzl %0, 1b \n"
  102. " .set mips0 \n"
  103. : "=&r" (temp), "=m" (*m)
  104. : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
  105. } else if (cpu_has_llsc) {
  106. __asm__ __volatile__(
  107. " .set mips3 \n"
  108. "1: " __LL "%0, %1 # clear_bit \n"
  109. " and %0, %2 \n"
  110. " " __SC "%0, %1 \n"
  111. " beqz %0, 1b \n"
  112. " .set mips0 \n"
  113. : "=&r" (temp), "=m" (*m)
  114. : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
  115. } else {
  116. volatile unsigned long *a = addr;
  117. unsigned long mask;
  118. unsigned long flags;
  119. a += nr >> SZLONG_LOG;
  120. mask = 1UL << (nr & SZLONG_MASK);
  121. local_irq_save(flags);
  122. *a &= ~mask;
  123. local_irq_restore(flags);
  124. }
  125. }
  126. /*
  127. * change_bit - Toggle a bit in memory
  128. * @nr: Bit to change
  129. * @addr: Address to start counting from
  130. *
  131. * change_bit() is atomic and may not be reordered.
  132. * Note that @nr may be almost arbitrarily large; this function is not
  133. * restricted to acting on a single-word quantity.
  134. */
  135. static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
  136. {
  137. if (cpu_has_llsc && R10000_LLSC_WAR) {
  138. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  139. unsigned long temp;
  140. __asm__ __volatile__(
  141. " .set mips3 \n"
  142. "1: " __LL "%0, %1 # change_bit \n"
  143. " xor %0, %2 \n"
  144. " " __SC "%0, %1 \n"
  145. " beqzl %0, 1b \n"
  146. " .set mips0 \n"
  147. : "=&r" (temp), "=m" (*m)
  148. : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
  149. } else if (cpu_has_llsc) {
  150. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  151. unsigned long temp;
  152. __asm__ __volatile__(
  153. " .set mips3 \n"
  154. "1: " __LL "%0, %1 # change_bit \n"
  155. " xor %0, %2 \n"
  156. " " __SC "%0, %1 \n"
  157. " beqz %0, 1b \n"
  158. " .set mips0 \n"
  159. : "=&r" (temp), "=m" (*m)
  160. : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
  161. } else {
  162. volatile unsigned long *a = addr;
  163. unsigned long mask;
  164. unsigned long flags;
  165. a += nr >> SZLONG_LOG;
  166. mask = 1UL << (nr & SZLONG_MASK);
  167. local_irq_save(flags);
  168. *a ^= mask;
  169. local_irq_restore(flags);
  170. }
  171. }
  172. /*
  173. * test_and_set_bit - Set a bit and return its old value
  174. * @nr: Bit to set
  175. * @addr: Address to count from
  176. *
  177. * This operation is atomic and cannot be reordered.
  178. * It also implies a memory barrier.
  179. */
  180. static inline int test_and_set_bit(unsigned long nr,
  181. volatile unsigned long *addr)
  182. {
  183. if (cpu_has_llsc && R10000_LLSC_WAR) {
  184. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  185. unsigned long temp, res;
  186. __asm__ __volatile__(
  187. " .set mips3 \n"
  188. "1: " __LL "%0, %1 # test_and_set_bit \n"
  189. " or %2, %0, %3 \n"
  190. " " __SC "%2, %1 \n"
  191. " beqzl %2, 1b \n"
  192. " and %2, %0, %3 \n"
  193. " .set mips0 \n"
  194. : "=&r" (temp), "=m" (*m), "=&r" (res)
  195. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  196. : "memory");
  197. return res != 0;
  198. } else if (cpu_has_llsc) {
  199. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  200. unsigned long temp, res;
  201. __asm__ __volatile__(
  202. " .set push \n"
  203. " .set noreorder \n"
  204. " .set mips3 \n"
  205. "1: " __LL "%0, %1 # test_and_set_bit \n"
  206. " or %2, %0, %3 \n"
  207. " " __SC "%2, %1 \n"
  208. " beqz %2, 1b \n"
  209. " and %2, %0, %3 \n"
  210. " .set pop \n"
  211. : "=&r" (temp), "=m" (*m), "=&r" (res)
  212. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  213. : "memory");
  214. return res != 0;
  215. } else {
  216. volatile unsigned long *a = addr;
  217. unsigned long mask;
  218. int retval;
  219. unsigned long flags;
  220. a += nr >> SZLONG_LOG;
  221. mask = 1UL << (nr & SZLONG_MASK);
  222. local_irq_save(flags);
  223. retval = (mask & *a) != 0;
  224. *a |= mask;
  225. local_irq_restore(flags);
  226. return retval;
  227. }
  228. smp_mb();
  229. }
  230. /*
  231. * test_and_clear_bit - Clear a bit and return its old value
  232. * @nr: Bit to clear
  233. * @addr: Address to count from
  234. *
  235. * This operation is atomic and cannot be reordered.
  236. * It also implies a memory barrier.
  237. */
  238. static inline int test_and_clear_bit(unsigned long nr,
  239. volatile unsigned long *addr)
  240. {
  241. if (cpu_has_llsc && R10000_LLSC_WAR) {
  242. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  243. unsigned long temp, res;
  244. __asm__ __volatile__(
  245. " .set mips3 \n"
  246. "1: " __LL "%0, %1 # test_and_clear_bit \n"
  247. " or %2, %0, %3 \n"
  248. " xor %2, %3 \n"
  249. " " __SC "%2, %1 \n"
  250. " beqzl %2, 1b \n"
  251. " and %2, %0, %3 \n"
  252. " .set mips0 \n"
  253. : "=&r" (temp), "=m" (*m), "=&r" (res)
  254. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  255. : "memory");
  256. return res != 0;
  257. } else if (cpu_has_llsc) {
  258. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  259. unsigned long temp, res;
  260. __asm__ __volatile__(
  261. " .set push \n"
  262. " .set noreorder \n"
  263. " .set mips3 \n"
  264. "1: " __LL "%0, %1 # test_and_clear_bit \n"
  265. " or %2, %0, %3 \n"
  266. " xor %2, %3 \n"
  267. " " __SC "%2, %1 \n"
  268. " beqz %2, 1b \n"
  269. " and %2, %0, %3 \n"
  270. " .set pop \n"
  271. : "=&r" (temp), "=m" (*m), "=&r" (res)
  272. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  273. : "memory");
  274. return res != 0;
  275. } else {
  276. volatile unsigned long *a = addr;
  277. unsigned long mask;
  278. int retval;
  279. unsigned long flags;
  280. a += nr >> SZLONG_LOG;
  281. mask = 1UL << (nr & SZLONG_MASK);
  282. local_irq_save(flags);
  283. retval = (mask & *a) != 0;
  284. *a &= ~mask;
  285. local_irq_restore(flags);
  286. return retval;
  287. }
  288. smp_mb();
  289. }
  290. /*
  291. * test_and_change_bit - Change a bit and return its old value
  292. * @nr: Bit to change
  293. * @addr: Address to count from
  294. *
  295. * This operation is atomic and cannot be reordered.
  296. * It also implies a memory barrier.
  297. */
  298. static inline int test_and_change_bit(unsigned long nr,
  299. volatile unsigned long *addr)
  300. {
  301. if (cpu_has_llsc && R10000_LLSC_WAR) {
  302. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  303. unsigned long temp, res;
  304. __asm__ __volatile__(
  305. " .set mips3 \n"
  306. "1: " __LL "%0, %1 # test_and_change_bit \n"
  307. " xor %2, %0, %3 \n"
  308. " " __SC "%2, %1 \n"
  309. " beqzl %2, 1b \n"
  310. " and %2, %0, %3 \n"
  311. " .set mips0 \n"
  312. : "=&r" (temp), "=m" (*m), "=&r" (res)
  313. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  314. : "memory");
  315. return res != 0;
  316. } else if (cpu_has_llsc) {
  317. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  318. unsigned long temp, res;
  319. __asm__ __volatile__(
  320. " .set push \n"
  321. " .set noreorder \n"
  322. " .set mips3 \n"
  323. "1: " __LL "%0, %1 # test_and_change_bit \n"
  324. " xor %2, %0, %3 \n"
  325. " " __SC "\t%2, %1 \n"
  326. " beqz %2, 1b \n"
  327. " and %2, %0, %3 \n"
  328. " .set pop \n"
  329. : "=&r" (temp), "=m" (*m), "=&r" (res)
  330. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  331. : "memory");
  332. return res != 0;
  333. } else {
  334. volatile unsigned long *a = addr;
  335. unsigned long mask, retval;
  336. unsigned long flags;
  337. a += nr >> SZLONG_LOG;
  338. mask = 1UL << (nr & SZLONG_MASK);
  339. local_irq_save(flags);
  340. retval = (mask & *a) != 0;
  341. *a ^= mask;
  342. local_irq_restore(flags);
  343. return retval;
  344. }
  345. smp_mb();
  346. }
  347. #include <asm-generic/bitops/non-atomic.h>
  348. /*
  349. * Return the bit position (0..63) of the most significant 1 bit in a word
  350. * Returns -1 if no 1 bit exists
  351. */
  352. static inline int __ilog2(unsigned long x)
  353. {
  354. int lz;
  355. if (sizeof(x) == 4) {
  356. __asm__ (
  357. " .set push \n"
  358. " .set mips32 \n"
  359. " clz %0, %1 \n"
  360. " .set pop \n"
  361. : "=r" (lz)
  362. : "r" (x));
  363. return 31 - lz;
  364. }
  365. BUG_ON(sizeof(x) != 8);
  366. __asm__ (
  367. " .set push \n"
  368. " .set mips64 \n"
  369. " dclz %0, %1 \n"
  370. " .set pop \n"
  371. : "=r" (lz)
  372. : "r" (x));
  373. return 63 - lz;
  374. }
  375. #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
  376. /*
  377. * __ffs - find first bit in word.
  378. * @word: The word to search
  379. *
  380. * Returns 0..SZLONG-1
  381. * Undefined if no bit exists, so code should check against 0 first.
  382. */
  383. static inline unsigned long __ffs(unsigned long word)
  384. {
  385. return __ilog2(word & -word);
  386. }
  387. /*
  388. * fls - find last bit set.
  389. * @word: The word to search
  390. *
  391. * This is defined the same way as ffs.
  392. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  393. */
  394. static inline int fls(int word)
  395. {
  396. __asm__ ("clz %0, %1" : "=r" (word) : "r" (word));
  397. return 32 - word;
  398. }
  399. #if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPS64)
  400. static inline int fls64(__u64 word)
  401. {
  402. __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word));
  403. return 64 - word;
  404. }
  405. #else
  406. #include <asm-generic/bitops/fls64.h>
  407. #endif
  408. /*
  409. * ffs - find first bit set.
  410. * @word: The word to search
  411. *
  412. * This is defined the same way as
  413. * the libc and compiler builtin ffs routines, therefore
  414. * differs in spirit from the above ffz (man ffs).
  415. */
  416. static inline int ffs(int word)
  417. {
  418. if (!word)
  419. return 0;
  420. return fls(word & -word);
  421. }
  422. #else
  423. #include <asm-generic/bitops/__ffs.h>
  424. #include <asm-generic/bitops/ffs.h>
  425. #include <asm-generic/bitops/fls.h>
  426. #include <asm-generic/bitops/fls64.h>
  427. #endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
  428. #include <asm-generic/bitops/ffz.h>
  429. #include <asm-generic/bitops/find.h>
  430. #ifdef __KERNEL__
  431. #include <asm-generic/bitops/sched.h>
  432. #include <asm-generic/bitops/hweight.h>
  433. #include <asm-generic/bitops/ext2-non-atomic.h>
  434. #include <asm-generic/bitops/ext2-atomic.h>
  435. #include <asm-generic/bitops/minix.h>
  436. #endif /* __KERNEL__ */
  437. #endif /* _ASM_BITOPS_H */