bitops.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702
  1. #ifndef _ASM_M32R_BITOPS_H
  2. #define _ASM_M32R_BITOPS_H
  3. /*
  4. * linux/include/asm-m32r/bitops.h
  5. *
  6. * Copyright 1992, Linus Torvalds.
  7. *
  8. * M32R version:
  9. * Copyright (C) 2001, 2002 Hitoshi Yamamoto
  10. * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
  11. */
  12. #include <linux/config.h>
  13. #include <linux/compiler.h>
  14. #include <asm/assembler.h>
  15. #include <asm/system.h>
  16. #include <asm/byteorder.h>
  17. #include <asm/types.h>
  18. /*
  19. * These have to be done with inline assembly: that way the bit-setting
  20. * is guaranteed to be atomic. All bit operations return 0 if the bit
  21. * was cleared before the operation and != 0 if it was not.
  22. *
  23. * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  24. */
  25. /**
  26. * set_bit - Atomically set a bit in memory
  27. * @nr: the bit to set
  28. * @addr: the address to start counting from
  29. *
  30. * This function is atomic and may not be reordered. See __set_bit()
  31. * if you do not require the atomic guarantees.
  32. * Note that @nr may be almost arbitrarily large; this function is not
  33. * restricted to acting on a single-word quantity.
  34. */
  35. static __inline__ void set_bit(int nr, volatile void * addr)
  36. {
  37. __u32 mask;
  38. volatile __u32 *a = addr;
  39. unsigned long flags;
  40. unsigned long tmp;
  41. a += (nr >> 5);
  42. mask = (1 << (nr & 0x1F));
  43. local_irq_save(flags);
  44. __asm__ __volatile__ (
  45. DCACHE_CLEAR("%0", "r6", "%1")
  46. M32R_LOCK" %0, @%1; \n\t"
  47. "or %0, %2; \n\t"
  48. M32R_UNLOCK" %0, @%1; \n\t"
  49. : "=&r" (tmp)
  50. : "r" (a), "r" (mask)
  51. : "memory"
  52. #ifdef CONFIG_CHIP_M32700_TS1
  53. , "r6"
  54. #endif /* CONFIG_CHIP_M32700_TS1 */
  55. );
  56. local_irq_restore(flags);
  57. }
  58. /**
  59. * __set_bit - Set a bit in memory
  60. * @nr: the bit to set
  61. * @addr: the address to start counting from
  62. *
  63. * Unlike set_bit(), this function is non-atomic and may be reordered.
  64. * If it's called on the same region of memory simultaneously, the effect
  65. * may be that only one operation succeeds.
  66. */
  67. static __inline__ void __set_bit(int nr, volatile void * addr)
  68. {
  69. __u32 mask;
  70. volatile __u32 *a = addr;
  71. a += (nr >> 5);
  72. mask = (1 << (nr & 0x1F));
  73. *a |= mask;
  74. }
  75. /**
  76. * clear_bit - Clears a bit in memory
  77. * @nr: Bit to clear
  78. * @addr: Address to start counting from
  79. *
  80. * clear_bit() is atomic and may not be reordered. However, it does
  81. * not contain a memory barrier, so if it is used for locking purposes,
  82. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  83. * in order to ensure changes are visible on other processors.
  84. */
  85. static __inline__ void clear_bit(int nr, volatile void * addr)
  86. {
  87. __u32 mask;
  88. volatile __u32 *a = addr;
  89. unsigned long flags;
  90. unsigned long tmp;
  91. a += (nr >> 5);
  92. mask = (1 << (nr & 0x1F));
  93. local_irq_save(flags);
  94. __asm__ __volatile__ (
  95. DCACHE_CLEAR("%0", "r6", "%1")
  96. M32R_LOCK" %0, @%1; \n\t"
  97. "and %0, %2; \n\t"
  98. M32R_UNLOCK" %0, @%1; \n\t"
  99. : "=&r" (tmp)
  100. : "r" (a), "r" (~mask)
  101. : "memory"
  102. #ifdef CONFIG_CHIP_M32700_TS1
  103. , "r6"
  104. #endif /* CONFIG_CHIP_M32700_TS1 */
  105. );
  106. local_irq_restore(flags);
  107. }
  108. static __inline__ void __clear_bit(int nr, volatile unsigned long * addr)
  109. {
  110. unsigned long mask;
  111. volatile unsigned long *a = addr;
  112. a += (nr >> 5);
  113. mask = (1 << (nr & 0x1F));
  114. *a &= ~mask;
  115. }
  116. #define smp_mb__before_clear_bit() barrier()
  117. #define smp_mb__after_clear_bit() barrier()
  118. /**
  119. * __change_bit - Toggle a bit in memory
  120. * @nr: the bit to set
  121. * @addr: the address to start counting from
  122. *
  123. * Unlike change_bit(), this function is non-atomic and may be reordered.
  124. * If it's called on the same region of memory simultaneously, the effect
  125. * may be that only one operation succeeds.
  126. */
  127. static __inline__ void __change_bit(int nr, volatile void * addr)
  128. {
  129. __u32 mask;
  130. volatile __u32 *a = addr;
  131. a += (nr >> 5);
  132. mask = (1 << (nr & 0x1F));
  133. *a ^= mask;
  134. }
  135. /**
  136. * change_bit - Toggle a bit in memory
  137. * @nr: Bit to clear
  138. * @addr: Address to start counting from
  139. *
  140. * change_bit() is atomic and may not be reordered.
  141. * Note that @nr may be almost arbitrarily large; this function is not
  142. * restricted to acting on a single-word quantity.
  143. */
  144. static __inline__ void change_bit(int nr, volatile void * addr)
  145. {
  146. __u32 mask;
  147. volatile __u32 *a = addr;
  148. unsigned long flags;
  149. unsigned long tmp;
  150. a += (nr >> 5);
  151. mask = (1 << (nr & 0x1F));
  152. local_irq_save(flags);
  153. __asm__ __volatile__ (
  154. DCACHE_CLEAR("%0", "r6", "%1")
  155. M32R_LOCK" %0, @%1; \n\t"
  156. "xor %0, %2; \n\t"
  157. M32R_UNLOCK" %0, @%1; \n\t"
  158. : "=&r" (tmp)
  159. : "r" (a), "r" (mask)
  160. : "memory"
  161. #ifdef CONFIG_CHIP_M32700_TS1
  162. , "r6"
  163. #endif /* CONFIG_CHIP_M32700_TS1 */
  164. );
  165. local_irq_restore(flags);
  166. }
  167. /**
  168. * test_and_set_bit - Set a bit and return its old value
  169. * @nr: Bit to set
  170. * @addr: Address to count from
  171. *
  172. * This operation is atomic and cannot be reordered.
  173. * It also implies a memory barrier.
  174. */
  175. static __inline__ int test_and_set_bit(int nr, volatile void * addr)
  176. {
  177. __u32 mask, oldbit;
  178. volatile __u32 *a = addr;
  179. unsigned long flags;
  180. unsigned long tmp;
  181. a += (nr >> 5);
  182. mask = (1 << (nr & 0x1F));
  183. local_irq_save(flags);
  184. __asm__ __volatile__ (
  185. DCACHE_CLEAR("%0", "%1", "%2")
  186. M32R_LOCK" %0, @%2; \n\t"
  187. "mv %1, %0; \n\t"
  188. "and %0, %3; \n\t"
  189. "or %1, %3; \n\t"
  190. M32R_UNLOCK" %1, @%2; \n\t"
  191. : "=&r" (oldbit), "=&r" (tmp)
  192. : "r" (a), "r" (mask)
  193. : "memory"
  194. );
  195. local_irq_restore(flags);
  196. return (oldbit != 0);
  197. }
  198. /**
  199. * __test_and_set_bit - Set a bit and return its old value
  200. * @nr: Bit to set
  201. * @addr: Address to count from
  202. *
  203. * This operation is non-atomic and can be reordered.
  204. * If two examples of this operation race, one can appear to succeed
  205. * but actually fail. You must protect multiple accesses with a lock.
  206. */
  207. static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
  208. {
  209. __u32 mask, oldbit;
  210. volatile __u32 *a = addr;
  211. a += (nr >> 5);
  212. mask = (1 << (nr & 0x1F));
  213. oldbit = (*a & mask);
  214. *a |= mask;
  215. return (oldbit != 0);
  216. }
  217. /**
  218. * test_and_clear_bit - Clear a bit and return its old value
  219. * @nr: Bit to set
  220. * @addr: Address to count from
  221. *
  222. * This operation is atomic and cannot be reordered.
  223. * It also implies a memory barrier.
  224. */
  225. static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  226. {
  227. __u32 mask, oldbit;
  228. volatile __u32 *a = addr;
  229. unsigned long flags;
  230. unsigned long tmp;
  231. a += (nr >> 5);
  232. mask = (1 << (nr & 0x1F));
  233. local_irq_save(flags);
  234. __asm__ __volatile__ (
  235. DCACHE_CLEAR("%0", "%1", "%3")
  236. M32R_LOCK" %0, @%3; \n\t"
  237. "mv %1, %0; \n\t"
  238. "and %0, %2; \n\t"
  239. "not %2, %2; \n\t"
  240. "and %1, %2; \n\t"
  241. M32R_UNLOCK" %1, @%3; \n\t"
  242. : "=&r" (oldbit), "=&r" (tmp), "+r" (mask)
  243. : "r" (a)
  244. : "memory"
  245. );
  246. local_irq_restore(flags);
  247. return (oldbit != 0);
  248. }
  249. /**
  250. * __test_and_clear_bit - Clear a bit and return its old value
  251. * @nr: Bit to set
  252. * @addr: Address to count from
  253. *
  254. * This operation is non-atomic and can be reordered.
  255. * If two examples of this operation race, one can appear to succeed
  256. * but actually fail. You must protect multiple accesses with a lock.
  257. */
  258. static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
  259. {
  260. __u32 mask, oldbit;
  261. volatile __u32 *a = addr;
  262. a += (nr >> 5);
  263. mask = (1 << (nr & 0x1F));
  264. oldbit = (*a & mask);
  265. *a &= ~mask;
  266. return (oldbit != 0);
  267. }
  268. /* WARNING: non atomic and it can be reordered! */
  269. static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
  270. {
  271. __u32 mask, oldbit;
  272. volatile __u32 *a = addr;
  273. a += (nr >> 5);
  274. mask = (1 << (nr & 0x1F));
  275. oldbit = (*a & mask);
  276. *a ^= mask;
  277. return (oldbit != 0);
  278. }
  279. /**
  280. * test_and_change_bit - Change a bit and return its old value
  281. * @nr: Bit to set
  282. * @addr: Address to count from
  283. *
  284. * This operation is atomic and cannot be reordered.
  285. * It also implies a memory barrier.
  286. */
  287. static __inline__ int test_and_change_bit(int nr, volatile void * addr)
  288. {
  289. __u32 mask, oldbit;
  290. volatile __u32 *a = addr;
  291. unsigned long flags;
  292. unsigned long tmp;
  293. a += (nr >> 5);
  294. mask = (1 << (nr & 0x1F));
  295. local_irq_save(flags);
  296. __asm__ __volatile__ (
  297. DCACHE_CLEAR("%0", "%1", "%2")
  298. M32R_LOCK" %0, @%2; \n\t"
  299. "mv %1, %0; \n\t"
  300. "and %0, %3; \n\t"
  301. "xor %1, %3; \n\t"
  302. M32R_UNLOCK" %1, @%2; \n\t"
  303. : "=&r" (oldbit), "=&r" (tmp)
  304. : "r" (a), "r" (mask)
  305. : "memory"
  306. );
  307. local_irq_restore(flags);
  308. return (oldbit != 0);
  309. }
  310. /**
  311. * test_bit - Determine whether a bit is set
  312. * @nr: bit number to test
  313. * @addr: Address to start counting from
  314. */
  315. static __inline__ int test_bit(int nr, const volatile void * addr)
  316. {
  317. __u32 mask;
  318. const volatile __u32 *a = addr;
  319. a += (nr >> 5);
  320. mask = (1 << (nr & 0x1F));
  321. return ((*a & mask) != 0);
  322. }
  323. /**
  324. * ffz - find first zero in word.
  325. * @word: The word to search
  326. *
  327. * Undefined if no zero exists, so code should check against ~0UL first.
  328. */
  329. static __inline__ unsigned long ffz(unsigned long word)
  330. {
  331. int k;
  332. word = ~word;
  333. k = 0;
  334. if (!(word & 0x0000ffff)) { k += 16; word >>= 16; }
  335. if (!(word & 0x000000ff)) { k += 8; word >>= 8; }
  336. if (!(word & 0x0000000f)) { k += 4; word >>= 4; }
  337. if (!(word & 0x00000003)) { k += 2; word >>= 2; }
  338. if (!(word & 0x00000001)) { k += 1; }
  339. return k;
  340. }
  341. /**
  342. * find_first_zero_bit - find the first zero bit in a memory region
  343. * @addr: The address to start the search at
  344. * @size: The maximum size to search
  345. *
  346. * Returns the bit-number of the first zero bit, not the number of the byte
  347. * containing a bit.
  348. */
  349. #define find_first_zero_bit(addr, size) \
  350. find_next_zero_bit((addr), (size), 0)
  351. /**
  352. * find_next_zero_bit - find the first zero bit in a memory region
  353. * @addr: The address to base the search on
  354. * @offset: The bitnumber to start searching at
  355. * @size: The maximum size to search
  356. */
  357. static __inline__ int find_next_zero_bit(const unsigned long *addr,
  358. int size, int offset)
  359. {
  360. const unsigned long *p = addr + (offset >> 5);
  361. unsigned long result = offset & ~31UL;
  362. unsigned long tmp;
  363. if (offset >= size)
  364. return size;
  365. size -= result;
  366. offset &= 31UL;
  367. if (offset) {
  368. tmp = *(p++);
  369. tmp |= ~0UL >> (32-offset);
  370. if (size < 32)
  371. goto found_first;
  372. if (~tmp)
  373. goto found_middle;
  374. size -= 32;
  375. result += 32;
  376. }
  377. while (size & ~31UL) {
  378. if (~(tmp = *(p++)))
  379. goto found_middle;
  380. result += 32;
  381. size -= 32;
  382. }
  383. if (!size)
  384. return result;
  385. tmp = *p;
  386. found_first:
  387. tmp |= ~0UL << size;
  388. found_middle:
  389. return result + ffz(tmp);
  390. }
  391. /**
  392. * __ffs - find first bit in word.
  393. * @word: The word to search
  394. *
  395. * Undefined if no bit exists, so code should check against 0 first.
  396. */
  397. static __inline__ unsigned long __ffs(unsigned long word)
  398. {
  399. int k = 0;
  400. if (!(word & 0x0000ffff)) { k += 16; word >>= 16; }
  401. if (!(word & 0x000000ff)) { k += 8; word >>= 8; }
  402. if (!(word & 0x0000000f)) { k += 4; word >>= 4; }
  403. if (!(word & 0x00000003)) { k += 2; word >>= 2; }
  404. if (!(word & 0x00000001)) { k += 1;}
  405. return k;
  406. }
  407. /*
  408. * fls: find last bit set.
  409. */
  410. #define fls(x) generic_fls(x)
  411. #ifdef __KERNEL__
  412. /*
  413. * Every architecture must define this function. It's the fastest
  414. * way of searching a 140-bit bitmap where the first 100 bits are
  415. * unlikely to be set. It's guaranteed that at least one of the 140
  416. * bits is cleared.
  417. */
  418. static inline int sched_find_first_bit(unsigned long *b)
  419. {
  420. if (unlikely(b[0]))
  421. return __ffs(b[0]);
  422. if (unlikely(b[1]))
  423. return __ffs(b[1]) + 32;
  424. if (unlikely(b[2]))
  425. return __ffs(b[2]) + 64;
  426. if (b[3])
  427. return __ffs(b[3]) + 96;
  428. return __ffs(b[4]) + 128;
  429. }
  430. /**
  431. * find_next_bit - find the first set bit in a memory region
  432. * @addr: The address to base the search on
  433. * @offset: The bitnumber to start searching at
  434. * @size: The maximum size to search
  435. */
  436. static inline unsigned long find_next_bit(const unsigned long *addr,
  437. unsigned long size, unsigned long offset)
  438. {
  439. unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
  440. unsigned int result = offset & ~31UL;
  441. unsigned int tmp;
  442. if (offset >= size)
  443. return size;
  444. size -= result;
  445. offset &= 31UL;
  446. if (offset) {
  447. tmp = *p++;
  448. tmp &= ~0UL << offset;
  449. if (size < 32)
  450. goto found_first;
  451. if (tmp)
  452. goto found_middle;
  453. size -= 32;
  454. result += 32;
  455. }
  456. while (size >= 32) {
  457. if ((tmp = *p++) != 0)
  458. goto found_middle;
  459. result += 32;
  460. size -= 32;
  461. }
  462. if (!size)
  463. return result;
  464. tmp = *p;
  465. found_first:
  466. tmp &= ~0UL >> (32 - size);
  467. if (tmp == 0UL) /* Are any bits set? */
  468. return result + size; /* Nope. */
  469. found_middle:
  470. return result + __ffs(tmp);
  471. }
  472. /**
  473. * find_first_bit - find the first set bit in a memory region
  474. * @addr: The address to start the search at
  475. * @size: The maximum size to search
  476. *
  477. * Returns the bit-number of the first set bit, not the number of the byte
  478. * containing a bit.
  479. */
  480. #define find_first_bit(addr, size) \
  481. find_next_bit((addr), (size), 0)
  482. /**
  483. * ffs - find first bit set
  484. * @x: the word to search
  485. *
  486. * This is defined the same way as
  487. * the libc and compiler builtin ffs routines, therefore
  488. * differs in spirit from the above ffz (man ffs).
  489. */
  490. #define ffs(x) generic_ffs(x)
  491. /**
  492. * hweightN - returns the hamming weight of a N-bit word
  493. * @x: the word to weigh
  494. *
  495. * The Hamming Weight of a number is the total number of bits set in it.
  496. */
  497. #define hweight32(x) generic_hweight32(x)
  498. #define hweight16(x) generic_hweight16(x)
  499. #define hweight8(x) generic_hweight8(x)
  500. #endif /* __KERNEL__ */
  501. #ifdef __KERNEL__
  502. /*
  503. * ext2_XXXX function
  504. * orig: include/asm-sh/bitops.h
  505. */
  506. #ifdef __LITTLE_ENDIAN__
  507. #define ext2_set_bit test_and_set_bit
  508. #define ext2_clear_bit __test_and_clear_bit
  509. #define ext2_test_bit test_bit
  510. #define ext2_find_first_zero_bit find_first_zero_bit
  511. #define ext2_find_next_zero_bit find_next_zero_bit
  512. #else
  513. static inline int ext2_set_bit(int nr, volatile void * addr)
  514. {
  515. __u8 mask, oldbit;
  516. volatile __u8 *a = addr;
  517. a += (nr >> 3);
  518. mask = (1 << (nr & 0x07));
  519. oldbit = (*a & mask);
  520. *a |= mask;
  521. return (oldbit != 0);
  522. }
  523. static inline int ext2_clear_bit(int nr, volatile void * addr)
  524. {
  525. __u8 mask, oldbit;
  526. volatile __u8 *a = addr;
  527. a += (nr >> 3);
  528. mask = (1 << (nr & 0x07));
  529. oldbit = (*a & mask);
  530. *a &= ~mask;
  531. return (oldbit != 0);
  532. }
  533. static inline int ext2_test_bit(int nr, const volatile void * addr)
  534. {
  535. __u32 mask;
  536. const volatile __u8 *a = addr;
  537. a += (nr >> 3);
  538. mask = (1 << (nr & 0x07));
  539. return ((mask & *a) != 0);
  540. }
  541. #define ext2_find_first_zero_bit(addr, size) \
  542. ext2_find_next_zero_bit((addr), (size), 0)
  543. static inline unsigned long ext2_find_next_zero_bit(void *addr,
  544. unsigned long size, unsigned long offset)
  545. {
  546. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  547. unsigned long result = offset & ~31UL;
  548. unsigned long tmp;
  549. if (offset >= size)
  550. return size;
  551. size -= result;
  552. offset &= 31UL;
  553. if(offset) {
  554. /* We hold the little endian value in tmp, but then the
  555. * shift is illegal. So we could keep a big endian value
  556. * in tmp, like this:
  557. *
  558. * tmp = __swab32(*(p++));
  559. * tmp |= ~0UL >> (32-offset);
  560. *
  561. * but this would decrease preformance, so we change the
  562. * shift:
  563. */
  564. tmp = *(p++);
  565. tmp |= __swab32(~0UL >> (32-offset));
  566. if(size < 32)
  567. goto found_first;
  568. if(~tmp)
  569. goto found_middle;
  570. size -= 32;
  571. result += 32;
  572. }
  573. while(size & ~31UL) {
  574. if(~(tmp = *(p++)))
  575. goto found_middle;
  576. result += 32;
  577. size -= 32;
  578. }
  579. if(!size)
  580. return result;
  581. tmp = *p;
  582. found_first:
  583. /* tmp is little endian, so we would have to swab the shift,
  584. * see above. But then we have to swab tmp below for ffz, so
  585. * we might as well do this here.
  586. */
  587. return result + ffz(__swab32(tmp) | (~0UL << size));
  588. found_middle:
  589. return result + ffz(__swab32(tmp));
  590. }
  591. #endif
  592. #define ext2_set_bit_atomic(lock, nr, addr) \
  593. ({ \
  594. int ret; \
  595. spin_lock(lock); \
  596. ret = ext2_set_bit((nr), (addr)); \
  597. spin_unlock(lock); \
  598. ret; \
  599. })
  600. #define ext2_clear_bit_atomic(lock, nr, addr) \
  601. ({ \
  602. int ret; \
  603. spin_lock(lock); \
  604. ret = ext2_clear_bit((nr), (addr)); \
  605. spin_unlock(lock); \
  606. ret; \
  607. })
  608. /* Bitmap functions for the minix filesystem. */
  609. #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
  610. #define minix_set_bit(nr,addr) __set_bit(nr,addr)
  611. #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
  612. #define minix_test_bit(nr,addr) test_bit(nr,addr)
  613. #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
  614. #endif /* __KERNEL__ */
  615. #endif /* _ASM_M32R_BITOPS_H */