bitops.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
  7. * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
  8. */
  9. #ifndef _ASM_BITOPS_H
  10. #define _ASM_BITOPS_H
  11. #include <linux/config.h>
  12. #include <linux/compiler.h>
  13. #include <linux/types.h>
  14. #include <asm/bug.h>
  15. #include <asm/byteorder.h> /* sigh ... */
  16. #include <asm/cpu-features.h>
  17. #if (_MIPS_SZLONG == 32)
  18. #define SZLONG_LOG 5
  19. #define SZLONG_MASK 31UL
  20. #define __LL "ll "
  21. #define __SC "sc "
  22. #define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x))
  23. #elif (_MIPS_SZLONG == 64)
  24. #define SZLONG_LOG 6
  25. #define SZLONG_MASK 63UL
  26. #define __LL "lld "
  27. #define __SC "scd "
  28. #define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x))
  29. #endif
  30. #ifdef __KERNEL__
  31. #include <asm/interrupt.h>
  32. #include <asm/sgidefs.h>
  33. #include <asm/war.h>
  34. /*
  35. * clear_bit() doesn't provide any barrier for the compiler.
  36. */
  37. #define smp_mb__before_clear_bit() smp_mb()
  38. #define smp_mb__after_clear_bit() smp_mb()
  39. /*
  40. * Only disable interrupt for kernel mode stuff to keep usermode stuff
  41. * that dares to use kernel include files alive.
  42. */
  43. #define __bi_flags unsigned long flags
  44. #define __bi_local_irq_save(x) local_irq_save(x)
  45. #define __bi_local_irq_restore(x) local_irq_restore(x)
  46. #else
  47. #define __bi_flags
  48. #define __bi_local_irq_save(x)
  49. #define __bi_local_irq_restore(x)
  50. #endif /* __KERNEL__ */
  51. /*
  52. * set_bit - Atomically set a bit in memory
  53. * @nr: the bit to set
  54. * @addr: the address to start counting from
  55. *
  56. * This function is atomic and may not be reordered. See __set_bit()
  57. * if you do not require the atomic guarantees.
  58. * Note that @nr may be almost arbitrarily large; this function is not
  59. * restricted to acting on a single-word quantity.
  60. */
  61. static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
  62. {
  63. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  64. unsigned long temp;
  65. if (cpu_has_llsc && R10000_LLSC_WAR) {
  66. __asm__ __volatile__(
  67. " .set mips3 \n"
  68. "1: " __LL "%0, %1 # set_bit \n"
  69. " or %0, %2 \n"
  70. " " __SC "%0, %1 \n"
  71. " beqzl %0, 1b \n"
  72. " .set mips0 \n"
  73. : "=&r" (temp), "=m" (*m)
  74. : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
  75. } else if (cpu_has_llsc) {
  76. __asm__ __volatile__(
  77. " .set mips3 \n"
  78. "1: " __LL "%0, %1 # set_bit \n"
  79. " or %0, %2 \n"
  80. " " __SC "%0, %1 \n"
  81. " beqz %0, 1b \n"
  82. " .set mips0 \n"
  83. : "=&r" (temp), "=m" (*m)
  84. : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
  85. } else {
  86. volatile unsigned long *a = addr;
  87. unsigned long mask;
  88. __bi_flags;
  89. a += nr >> SZLONG_LOG;
  90. mask = 1UL << (nr & SZLONG_MASK);
  91. __bi_local_irq_save(flags);
  92. *a |= mask;
  93. __bi_local_irq_restore(flags);
  94. }
  95. }
  96. /*
  97. * clear_bit - Clears a bit in memory
  98. * @nr: Bit to clear
  99. * @addr: Address to start counting from
  100. *
  101. * clear_bit() is atomic and may not be reordered. However, it does
  102. * not contain a memory barrier, so if it is used for locking purposes,
  103. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  104. * in order to ensure changes are visible on other processors.
  105. */
  106. static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
  107. {
  108. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  109. unsigned long temp;
  110. if (cpu_has_llsc && R10000_LLSC_WAR) {
  111. __asm__ __volatile__(
  112. " .set mips3 \n"
  113. "1: " __LL "%0, %1 # clear_bit \n"
  114. " and %0, %2 \n"
  115. " " __SC "%0, %1 \n"
  116. " beqzl %0, 1b \n"
  117. " .set mips0 \n"
  118. : "=&r" (temp), "=m" (*m)
  119. : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
  120. } else if (cpu_has_llsc) {
  121. __asm__ __volatile__(
  122. " .set mips3 \n"
  123. "1: " __LL "%0, %1 # clear_bit \n"
  124. " and %0, %2 \n"
  125. " " __SC "%0, %1 \n"
  126. " beqz %0, 1b \n"
  127. " .set mips0 \n"
  128. : "=&r" (temp), "=m" (*m)
  129. : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
  130. } else {
  131. volatile unsigned long *a = addr;
  132. unsigned long mask;
  133. __bi_flags;
  134. a += nr >> SZLONG_LOG;
  135. mask = 1UL << (nr & SZLONG_MASK);
  136. __bi_local_irq_save(flags);
  137. *a &= ~mask;
  138. __bi_local_irq_restore(flags);
  139. }
  140. }
  141. /*
  142. * change_bit - Toggle a bit in memory
  143. * @nr: Bit to change
  144. * @addr: Address to start counting from
  145. *
  146. * change_bit() is atomic and may not be reordered.
  147. * Note that @nr may be almost arbitrarily large; this function is not
  148. * restricted to acting on a single-word quantity.
  149. */
  150. static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
  151. {
  152. if (cpu_has_llsc && R10000_LLSC_WAR) {
  153. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  154. unsigned long temp;
  155. __asm__ __volatile__(
  156. " .set mips3 \n"
  157. "1: " __LL "%0, %1 # change_bit \n"
  158. " xor %0, %2 \n"
  159. " " __SC "%0, %1 \n"
  160. " beqzl %0, 1b \n"
  161. " .set mips0 \n"
  162. : "=&r" (temp), "=m" (*m)
  163. : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
  164. } else if (cpu_has_llsc) {
  165. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  166. unsigned long temp;
  167. __asm__ __volatile__(
  168. " .set mips3 \n"
  169. "1: " __LL "%0, %1 # change_bit \n"
  170. " xor %0, %2 \n"
  171. " " __SC "%0, %1 \n"
  172. " beqz %0, 1b \n"
  173. " .set mips0 \n"
  174. : "=&r" (temp), "=m" (*m)
  175. : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
  176. } else {
  177. volatile unsigned long *a = addr;
  178. unsigned long mask;
  179. __bi_flags;
  180. a += nr >> SZLONG_LOG;
  181. mask = 1UL << (nr & SZLONG_MASK);
  182. __bi_local_irq_save(flags);
  183. *a ^= mask;
  184. __bi_local_irq_restore(flags);
  185. }
  186. }
  187. /*
  188. * test_and_set_bit - Set a bit and return its old value
  189. * @nr: Bit to set
  190. * @addr: Address to count from
  191. *
  192. * This operation is atomic and cannot be reordered.
  193. * It also implies a memory barrier.
  194. */
  195. static inline int test_and_set_bit(unsigned long nr,
  196. volatile unsigned long *addr)
  197. {
  198. if (cpu_has_llsc && R10000_LLSC_WAR) {
  199. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  200. unsigned long temp, res;
  201. __asm__ __volatile__(
  202. " .set mips3 \n"
  203. "1: " __LL "%0, %1 # test_and_set_bit \n"
  204. " or %2, %0, %3 \n"
  205. " " __SC "%2, %1 \n"
  206. " beqzl %2, 1b \n"
  207. " and %2, %0, %3 \n"
  208. #ifdef CONFIG_SMP
  209. " sync \n"
  210. #endif
  211. " .set mips0 \n"
  212. : "=&r" (temp), "=m" (*m), "=&r" (res)
  213. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  214. : "memory");
  215. return res != 0;
  216. } else if (cpu_has_llsc) {
  217. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  218. unsigned long temp, res;
  219. __asm__ __volatile__(
  220. " .set push \n"
  221. " .set noreorder \n"
  222. " .set mips3 \n"
  223. "1: " __LL "%0, %1 # test_and_set_bit \n"
  224. " or %2, %0, %3 \n"
  225. " " __SC "%2, %1 \n"
  226. " beqz %2, 1b \n"
  227. " and %2, %0, %3 \n"
  228. #ifdef CONFIG_SMP
  229. " sync \n"
  230. #endif
  231. " .set pop \n"
  232. : "=&r" (temp), "=m" (*m), "=&r" (res)
  233. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  234. : "memory");
  235. return res != 0;
  236. } else {
  237. volatile unsigned long *a = addr;
  238. unsigned long mask;
  239. int retval;
  240. __bi_flags;
  241. a += nr >> SZLONG_LOG;
  242. mask = 1UL << (nr & SZLONG_MASK);
  243. __bi_local_irq_save(flags);
  244. retval = (mask & *a) != 0;
  245. *a |= mask;
  246. __bi_local_irq_restore(flags);
  247. return retval;
  248. }
  249. }
  250. /*
  251. * test_and_clear_bit - Clear a bit and return its old value
  252. * @nr: Bit to clear
  253. * @addr: Address to count from
  254. *
  255. * This operation is atomic and cannot be reordered.
  256. * It also implies a memory barrier.
  257. */
  258. static inline int test_and_clear_bit(unsigned long nr,
  259. volatile unsigned long *addr)
  260. {
  261. if (cpu_has_llsc && R10000_LLSC_WAR) {
  262. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  263. unsigned long temp, res;
  264. __asm__ __volatile__(
  265. " .set mips3 \n"
  266. "1: " __LL "%0, %1 # test_and_clear_bit \n"
  267. " or %2, %0, %3 \n"
  268. " xor %2, %3 \n"
  269. " " __SC "%2, %1 \n"
  270. " beqzl %2, 1b \n"
  271. " and %2, %0, %3 \n"
  272. #ifdef CONFIG_SMP
  273. " sync \n"
  274. #endif
  275. " .set mips0 \n"
  276. : "=&r" (temp), "=m" (*m), "=&r" (res)
  277. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  278. : "memory");
  279. return res != 0;
  280. } else if (cpu_has_llsc) {
  281. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  282. unsigned long temp, res;
  283. __asm__ __volatile__(
  284. " .set push \n"
  285. " .set noreorder \n"
  286. " .set mips3 \n"
  287. "1: " __LL "%0, %1 # test_and_clear_bit \n"
  288. " or %2, %0, %3 \n"
  289. " xor %2, %3 \n"
  290. " " __SC "%2, %1 \n"
  291. " beqz %2, 1b \n"
  292. " and %2, %0, %3 \n"
  293. #ifdef CONFIG_SMP
  294. " sync \n"
  295. #endif
  296. " .set pop \n"
  297. : "=&r" (temp), "=m" (*m), "=&r" (res)
  298. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  299. : "memory");
  300. return res != 0;
  301. } else {
  302. volatile unsigned long *a = addr;
  303. unsigned long mask;
  304. int retval;
  305. __bi_flags;
  306. a += nr >> SZLONG_LOG;
  307. mask = 1UL << (nr & SZLONG_MASK);
  308. __bi_local_irq_save(flags);
  309. retval = (mask & *a) != 0;
  310. *a &= ~mask;
  311. __bi_local_irq_restore(flags);
  312. return retval;
  313. }
  314. }
  315. /*
  316. * test_and_change_bit - Change a bit and return its old value
  317. * @nr: Bit to change
  318. * @addr: Address to count from
  319. *
  320. * This operation is atomic and cannot be reordered.
  321. * It also implies a memory barrier.
  322. */
  323. static inline int test_and_change_bit(unsigned long nr,
  324. volatile unsigned long *addr)
  325. {
  326. if (cpu_has_llsc && R10000_LLSC_WAR) {
  327. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  328. unsigned long temp, res;
  329. __asm__ __volatile__(
  330. " .set mips3 \n"
  331. "1: " __LL "%0, %1 # test_and_change_bit \n"
  332. " xor %2, %0, %3 \n"
  333. " " __SC "%2, %1 \n"
  334. " beqzl %2, 1b \n"
  335. " and %2, %0, %3 \n"
  336. #ifdef CONFIG_SMP
  337. " sync \n"
  338. #endif
  339. " .set mips0 \n"
  340. : "=&r" (temp), "=m" (*m), "=&r" (res)
  341. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  342. : "memory");
  343. return res != 0;
  344. } else if (cpu_has_llsc) {
  345. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  346. unsigned long temp, res;
  347. __asm__ __volatile__(
  348. " .set push \n"
  349. " .set noreorder \n"
  350. " .set mips3 \n"
  351. "1: " __LL "%0, %1 # test_and_change_bit \n"
  352. " xor %2, %0, %3 \n"
  353. " " __SC "\t%2, %1 \n"
  354. " beqz %2, 1b \n"
  355. " and %2, %0, %3 \n"
  356. #ifdef CONFIG_SMP
  357. " sync \n"
  358. #endif
  359. " .set pop \n"
  360. : "=&r" (temp), "=m" (*m), "=&r" (res)
  361. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  362. : "memory");
  363. return res != 0;
  364. } else {
  365. volatile unsigned long *a = addr;
  366. unsigned long mask, retval;
  367. __bi_flags;
  368. a += nr >> SZLONG_LOG;
  369. mask = 1UL << (nr & SZLONG_MASK);
  370. __bi_local_irq_save(flags);
  371. retval = (mask & *a) != 0;
  372. *a ^= mask;
  373. __bi_local_irq_restore(flags);
  374. return retval;
  375. }
  376. }
  377. #undef __bi_flags
  378. #undef __bi_local_irq_save
  379. #undef __bi_local_irq_restore
  380. #include <asm-generic/bitops/non-atomic.h>
  381. /*
  382. * Return the bit position (0..63) of the most significant 1 bit in a word
  383. * Returns -1 if no 1 bit exists
  384. */
  385. static inline int __ilog2(unsigned long x)
  386. {
  387. int lz;
  388. if (sizeof(x) == 4) {
  389. __asm__ (
  390. " .set push \n"
  391. " .set mips32 \n"
  392. " clz %0, %1 \n"
  393. " .set pop \n"
  394. : "=r" (lz)
  395. : "r" (x));
  396. return 31 - lz;
  397. }
  398. BUG_ON(sizeof(x) != 8);
  399. __asm__ (
  400. " .set push \n"
  401. " .set mips64 \n"
  402. " dclz %0, %1 \n"
  403. " .set pop \n"
  404. : "=r" (lz)
  405. : "r" (x));
  406. return 63 - lz;
  407. }
  408. #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
  409. /*
  410. * __ffs - find first bit in word.
  411. * @word: The word to search
  412. *
  413. * Returns 0..SZLONG-1
  414. * Undefined if no bit exists, so code should check against 0 first.
  415. */
  416. static inline unsigned long __ffs(unsigned long word)
  417. {
  418. return __ilog2(word & -word);
  419. }
  420. /*
  421. * fls - find last bit set.
  422. * @word: The word to search
  423. *
  424. * This is defined the same way as ffs.
  425. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  426. */
  427. static inline int fls(int word)
  428. {
  429. __asm__ ("clz %0, %1" : "=r" (word) : "r" (word));
  430. return 32 - word;
  431. }
  432. #if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPS64)
  433. static inline int fls64(__u64 word)
  434. {
  435. __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word));
  436. return 64 - word;
  437. }
  438. #else
  439. #include <asm-generic/bitops/fls64.h>
  440. #endif
  441. /*
  442. * ffs - find first bit set.
  443. * @word: The word to search
  444. *
  445. * This is defined the same way as
  446. * the libc and compiler builtin ffs routines, therefore
  447. * differs in spirit from the above ffz (man ffs).
  448. */
  449. static inline int ffs(int word)
  450. {
  451. if (!word)
  452. return 0;
  453. return fls(word & -word);
  454. }
  455. #else
  456. #include <asm-generic/bitops/__ffs.h>
  457. #include <asm-generic/bitops/ffs.h>
  458. #include <asm-generic/bitops/fls.h>
  459. #include <asm-generic/bitops/fls64.h>
  460. #endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
  461. #include <asm-generic/bitops/ffz.h>
  462. #include <asm-generic/bitops/find.h>
  463. #ifdef __KERNEL__
  464. #include <asm-generic/bitops/sched.h>
  465. #include <asm-generic/bitops/hweight.h>
  466. #include <asm-generic/bitops/ext2-non-atomic.h>
  467. #include <asm-generic/bitops/ext2-atomic.h>
  468. #include <asm-generic/bitops/minix.h>
  469. #endif /* __KERNEL__ */
  470. #endif /* _ASM_BITOPS_H */