bitops.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
  7. * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
  8. */
  9. #ifndef _ASM_BITOPS_H
  10. #define _ASM_BITOPS_H
  11. #include <linux/compiler.h>
  12. #include <linux/types.h>
  13. #include <asm/bug.h>
  14. #include <asm/byteorder.h> /* sigh ... */
  15. #include <asm/cpu-features.h>
  16. #if (_MIPS_SZLONG == 32)
  17. #define SZLONG_LOG 5
  18. #define SZLONG_MASK 31UL
  19. #define __LL "ll "
  20. #define __SC "sc "
  21. #define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x))
  22. #elif (_MIPS_SZLONG == 64)
  23. #define SZLONG_LOG 6
  24. #define SZLONG_MASK 63UL
  25. #define __LL "lld "
  26. #define __SC "scd "
  27. #define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x))
  28. #endif
  29. #ifdef __KERNEL__
  30. #include <linux/irqflags.h>
  31. #include <asm/sgidefs.h>
  32. #include <asm/war.h>
  33. /*
  34. * clear_bit() doesn't provide any barrier for the compiler.
  35. */
  36. #define smp_mb__before_clear_bit() smp_mb()
  37. #define smp_mb__after_clear_bit() smp_mb()
  38. /*
  39. * Only disable interrupt for kernel mode stuff to keep usermode stuff
  40. * that dares to use kernel include files alive.
  41. */
  42. #define __bi_flags unsigned long flags
  43. #define __bi_local_irq_save(x) local_irq_save(x)
  44. #define __bi_local_irq_restore(x) local_irq_restore(x)
  45. #else
  46. #define __bi_flags
  47. #define __bi_local_irq_save(x)
  48. #define __bi_local_irq_restore(x)
  49. #endif /* __KERNEL__ */
  50. /*
  51. * set_bit - Atomically set a bit in memory
  52. * @nr: the bit to set
  53. * @addr: the address to start counting from
  54. *
  55. * This function is atomic and may not be reordered. See __set_bit()
  56. * if you do not require the atomic guarantees.
  57. * Note that @nr may be almost arbitrarily large; this function is not
  58. * restricted to acting on a single-word quantity.
  59. */
  60. static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
  61. {
  62. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  63. unsigned long temp;
  64. if (cpu_has_llsc && R10000_LLSC_WAR) {
  65. __asm__ __volatile__(
  66. " .set mips3 \n"
  67. "1: " __LL "%0, %1 # set_bit \n"
  68. " or %0, %2 \n"
  69. " " __SC "%0, %1 \n"
  70. " beqzl %0, 1b \n"
  71. " .set mips0 \n"
  72. : "=&r" (temp), "=m" (*m)
  73. : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
  74. } else if (cpu_has_llsc) {
  75. __asm__ __volatile__(
  76. " .set mips3 \n"
  77. "1: " __LL "%0, %1 # set_bit \n"
  78. " or %0, %2 \n"
  79. " " __SC "%0, %1 \n"
  80. " beqz %0, 1b \n"
  81. " .set mips0 \n"
  82. : "=&r" (temp), "=m" (*m)
  83. : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
  84. } else {
  85. volatile unsigned long *a = addr;
  86. unsigned long mask;
  87. __bi_flags;
  88. a += nr >> SZLONG_LOG;
  89. mask = 1UL << (nr & SZLONG_MASK);
  90. __bi_local_irq_save(flags);
  91. *a |= mask;
  92. __bi_local_irq_restore(flags);
  93. }
  94. }
  95. /*
  96. * clear_bit - Clears a bit in memory
  97. * @nr: Bit to clear
  98. * @addr: Address to start counting from
  99. *
  100. * clear_bit() is atomic and may not be reordered. However, it does
  101. * not contain a memory barrier, so if it is used for locking purposes,
  102. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  103. * in order to ensure changes are visible on other processors.
  104. */
  105. static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
  106. {
  107. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  108. unsigned long temp;
  109. if (cpu_has_llsc && R10000_LLSC_WAR) {
  110. __asm__ __volatile__(
  111. " .set mips3 \n"
  112. "1: " __LL "%0, %1 # clear_bit \n"
  113. " and %0, %2 \n"
  114. " " __SC "%0, %1 \n"
  115. " beqzl %0, 1b \n"
  116. " .set mips0 \n"
  117. : "=&r" (temp), "=m" (*m)
  118. : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
  119. } else if (cpu_has_llsc) {
  120. __asm__ __volatile__(
  121. " .set mips3 \n"
  122. "1: " __LL "%0, %1 # clear_bit \n"
  123. " and %0, %2 \n"
  124. " " __SC "%0, %1 \n"
  125. " beqz %0, 1b \n"
  126. " .set mips0 \n"
  127. : "=&r" (temp), "=m" (*m)
  128. : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
  129. } else {
  130. volatile unsigned long *a = addr;
  131. unsigned long mask;
  132. __bi_flags;
  133. a += nr >> SZLONG_LOG;
  134. mask = 1UL << (nr & SZLONG_MASK);
  135. __bi_local_irq_save(flags);
  136. *a &= ~mask;
  137. __bi_local_irq_restore(flags);
  138. }
  139. }
  140. /*
  141. * change_bit - Toggle a bit in memory
  142. * @nr: Bit to change
  143. * @addr: Address to start counting from
  144. *
  145. * change_bit() is atomic and may not be reordered.
  146. * Note that @nr may be almost arbitrarily large; this function is not
  147. * restricted to acting on a single-word quantity.
  148. */
  149. static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
  150. {
  151. if (cpu_has_llsc && R10000_LLSC_WAR) {
  152. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  153. unsigned long temp;
  154. __asm__ __volatile__(
  155. " .set mips3 \n"
  156. "1: " __LL "%0, %1 # change_bit \n"
  157. " xor %0, %2 \n"
  158. " " __SC "%0, %1 \n"
  159. " beqzl %0, 1b \n"
  160. " .set mips0 \n"
  161. : "=&r" (temp), "=m" (*m)
  162. : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
  163. } else if (cpu_has_llsc) {
  164. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  165. unsigned long temp;
  166. __asm__ __volatile__(
  167. " .set mips3 \n"
  168. "1: " __LL "%0, %1 # change_bit \n"
  169. " xor %0, %2 \n"
  170. " " __SC "%0, %1 \n"
  171. " beqz %0, 1b \n"
  172. " .set mips0 \n"
  173. : "=&r" (temp), "=m" (*m)
  174. : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
  175. } else {
  176. volatile unsigned long *a = addr;
  177. unsigned long mask;
  178. __bi_flags;
  179. a += nr >> SZLONG_LOG;
  180. mask = 1UL << (nr & SZLONG_MASK);
  181. __bi_local_irq_save(flags);
  182. *a ^= mask;
  183. __bi_local_irq_restore(flags);
  184. }
  185. }
  186. /*
  187. * test_and_set_bit - Set a bit and return its old value
  188. * @nr: Bit to set
  189. * @addr: Address to count from
  190. *
  191. * This operation is atomic and cannot be reordered.
  192. * It also implies a memory barrier.
  193. */
  194. static inline int test_and_set_bit(unsigned long nr,
  195. volatile unsigned long *addr)
  196. {
  197. if (cpu_has_llsc && R10000_LLSC_WAR) {
  198. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  199. unsigned long temp, res;
  200. __asm__ __volatile__(
  201. " .set mips3 \n"
  202. "1: " __LL "%0, %1 # test_and_set_bit \n"
  203. " or %2, %0, %3 \n"
  204. " " __SC "%2, %1 \n"
  205. " beqzl %2, 1b \n"
  206. " and %2, %0, %3 \n"
  207. #ifdef CONFIG_SMP
  208. " sync \n"
  209. #endif
  210. " .set mips0 \n"
  211. : "=&r" (temp), "=m" (*m), "=&r" (res)
  212. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  213. : "memory");
  214. return res != 0;
  215. } else if (cpu_has_llsc) {
  216. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  217. unsigned long temp, res;
  218. __asm__ __volatile__(
  219. " .set push \n"
  220. " .set noreorder \n"
  221. " .set mips3 \n"
  222. "1: " __LL "%0, %1 # test_and_set_bit \n"
  223. " or %2, %0, %3 \n"
  224. " " __SC "%2, %1 \n"
  225. " beqz %2, 1b \n"
  226. " and %2, %0, %3 \n"
  227. #ifdef CONFIG_SMP
  228. " sync \n"
  229. #endif
  230. " .set pop \n"
  231. : "=&r" (temp), "=m" (*m), "=&r" (res)
  232. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  233. : "memory");
  234. return res != 0;
  235. } else {
  236. volatile unsigned long *a = addr;
  237. unsigned long mask;
  238. int retval;
  239. __bi_flags;
  240. a += nr >> SZLONG_LOG;
  241. mask = 1UL << (nr & SZLONG_MASK);
  242. __bi_local_irq_save(flags);
  243. retval = (mask & *a) != 0;
  244. *a |= mask;
  245. __bi_local_irq_restore(flags);
  246. return retval;
  247. }
  248. }
  249. /*
  250. * test_and_clear_bit - Clear a bit and return its old value
  251. * @nr: Bit to clear
  252. * @addr: Address to count from
  253. *
  254. * This operation is atomic and cannot be reordered.
  255. * It also implies a memory barrier.
  256. */
  257. static inline int test_and_clear_bit(unsigned long nr,
  258. volatile unsigned long *addr)
  259. {
  260. if (cpu_has_llsc && R10000_LLSC_WAR) {
  261. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  262. unsigned long temp, res;
  263. __asm__ __volatile__(
  264. " .set mips3 \n"
  265. "1: " __LL "%0, %1 # test_and_clear_bit \n"
  266. " or %2, %0, %3 \n"
  267. " xor %2, %3 \n"
  268. " " __SC "%2, %1 \n"
  269. " beqzl %2, 1b \n"
  270. " and %2, %0, %3 \n"
  271. #ifdef CONFIG_SMP
  272. " sync \n"
  273. #endif
  274. " .set mips0 \n"
  275. : "=&r" (temp), "=m" (*m), "=&r" (res)
  276. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  277. : "memory");
  278. return res != 0;
  279. } else if (cpu_has_llsc) {
  280. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  281. unsigned long temp, res;
  282. __asm__ __volatile__(
  283. " .set push \n"
  284. " .set noreorder \n"
  285. " .set mips3 \n"
  286. "1: " __LL "%0, %1 # test_and_clear_bit \n"
  287. " or %2, %0, %3 \n"
  288. " xor %2, %3 \n"
  289. " " __SC "%2, %1 \n"
  290. " beqz %2, 1b \n"
  291. " and %2, %0, %3 \n"
  292. #ifdef CONFIG_SMP
  293. " sync \n"
  294. #endif
  295. " .set pop \n"
  296. : "=&r" (temp), "=m" (*m), "=&r" (res)
  297. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  298. : "memory");
  299. return res != 0;
  300. } else {
  301. volatile unsigned long *a = addr;
  302. unsigned long mask;
  303. int retval;
  304. __bi_flags;
  305. a += nr >> SZLONG_LOG;
  306. mask = 1UL << (nr & SZLONG_MASK);
  307. __bi_local_irq_save(flags);
  308. retval = (mask & *a) != 0;
  309. *a &= ~mask;
  310. __bi_local_irq_restore(flags);
  311. return retval;
  312. }
  313. }
  314. /*
  315. * test_and_change_bit - Change a bit and return its old value
  316. * @nr: Bit to change
  317. * @addr: Address to count from
  318. *
  319. * This operation is atomic and cannot be reordered.
  320. * It also implies a memory barrier.
  321. */
  322. static inline int test_and_change_bit(unsigned long nr,
  323. volatile unsigned long *addr)
  324. {
  325. if (cpu_has_llsc && R10000_LLSC_WAR) {
  326. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  327. unsigned long temp, res;
  328. __asm__ __volatile__(
  329. " .set mips3 \n"
  330. "1: " __LL "%0, %1 # test_and_change_bit \n"
  331. " xor %2, %0, %3 \n"
  332. " " __SC "%2, %1 \n"
  333. " beqzl %2, 1b \n"
  334. " and %2, %0, %3 \n"
  335. #ifdef CONFIG_SMP
  336. " sync \n"
  337. #endif
  338. " .set mips0 \n"
  339. : "=&r" (temp), "=m" (*m), "=&r" (res)
  340. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  341. : "memory");
  342. return res != 0;
  343. } else if (cpu_has_llsc) {
  344. unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
  345. unsigned long temp, res;
  346. __asm__ __volatile__(
  347. " .set push \n"
  348. " .set noreorder \n"
  349. " .set mips3 \n"
  350. "1: " __LL "%0, %1 # test_and_change_bit \n"
  351. " xor %2, %0, %3 \n"
  352. " " __SC "\t%2, %1 \n"
  353. " beqz %2, 1b \n"
  354. " and %2, %0, %3 \n"
  355. #ifdef CONFIG_SMP
  356. " sync \n"
  357. #endif
  358. " .set pop \n"
  359. : "=&r" (temp), "=m" (*m), "=&r" (res)
  360. : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
  361. : "memory");
  362. return res != 0;
  363. } else {
  364. volatile unsigned long *a = addr;
  365. unsigned long mask, retval;
  366. __bi_flags;
  367. a += nr >> SZLONG_LOG;
  368. mask = 1UL << (nr & SZLONG_MASK);
  369. __bi_local_irq_save(flags);
  370. retval = (mask & *a) != 0;
  371. *a ^= mask;
  372. __bi_local_irq_restore(flags);
  373. return retval;
  374. }
  375. }
  376. #undef __bi_flags
  377. #undef __bi_local_irq_save
  378. #undef __bi_local_irq_restore
  379. #include <asm-generic/bitops/non-atomic.h>
  380. /*
  381. * Return the bit position (0..63) of the most significant 1 bit in a word
  382. * Returns -1 if no 1 bit exists
  383. */
  384. static inline int __ilog2(unsigned long x)
  385. {
  386. int lz;
  387. if (sizeof(x) == 4) {
  388. __asm__ (
  389. " .set push \n"
  390. " .set mips32 \n"
  391. " clz %0, %1 \n"
  392. " .set pop \n"
  393. : "=r" (lz)
  394. : "r" (x));
  395. return 31 - lz;
  396. }
  397. BUG_ON(sizeof(x) != 8);
  398. __asm__ (
  399. " .set push \n"
  400. " .set mips64 \n"
  401. " dclz %0, %1 \n"
  402. " .set pop \n"
  403. : "=r" (lz)
  404. : "r" (x));
  405. return 63 - lz;
  406. }
  407. #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
  408. /*
  409. * __ffs - find first bit in word.
  410. * @word: The word to search
  411. *
  412. * Returns 0..SZLONG-1
  413. * Undefined if no bit exists, so code should check against 0 first.
  414. */
  415. static inline unsigned long __ffs(unsigned long word)
  416. {
  417. return __ilog2(word & -word);
  418. }
  419. /*
  420. * fls - find last bit set.
  421. * @word: The word to search
  422. *
  423. * This is defined the same way as ffs.
  424. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  425. */
  426. static inline int fls(int word)
  427. {
  428. __asm__ ("clz %0, %1" : "=r" (word) : "r" (word));
  429. return 32 - word;
  430. }
  431. #if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPS64)
  432. static inline int fls64(__u64 word)
  433. {
  434. __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word));
  435. return 64 - word;
  436. }
  437. #else
  438. #include <asm-generic/bitops/fls64.h>
  439. #endif
  440. /*
  441. * ffs - find first bit set.
  442. * @word: The word to search
  443. *
  444. * This is defined the same way as
  445. * the libc and compiler builtin ffs routines, therefore
  446. * differs in spirit from the above ffz (man ffs).
  447. */
  448. static inline int ffs(int word)
  449. {
  450. if (!word)
  451. return 0;
  452. return fls(word & -word);
  453. }
  454. #else
  455. #include <asm-generic/bitops/__ffs.h>
  456. #include <asm-generic/bitops/ffs.h>
  457. #include <asm-generic/bitops/fls.h>
  458. #include <asm-generic/bitops/fls64.h>
  459. #endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
  460. #include <asm-generic/bitops/ffz.h>
  461. #include <asm-generic/bitops/find.h>
  462. #ifdef __KERNEL__
  463. #include <asm-generic/bitops/sched.h>
  464. #include <asm-generic/bitops/hweight.h>
  465. #include <asm-generic/bitops/ext2-non-atomic.h>
  466. #include <asm-generic/bitops/ext2-atomic.h>
  467. #include <asm-generic/bitops/minix.h>
  468. #endif /* __KERNEL__ */
  469. #endif /* _ASM_BITOPS_H */