bitops.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703
  1. #ifndef _ASM_M32R_BITOPS_H
  2. #define _ASM_M32R_BITOPS_H
  3. /*
  4. * linux/include/asm-m32r/bitops.h
  5. *
  6. * Copyright 1992, Linus Torvalds.
  7. *
  8. * M32R version:
  9. * Copyright (C) 2001, 2002 Hitoshi Yamamoto
  10. * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
  11. */
  12. #include <linux/config.h>
  13. #include <linux/compiler.h>
  14. #include <asm/assembler.h>
  15. #include <asm/system.h>
  16. #include <asm/byteorder.h>
  17. #include <asm/types.h>
  18. /*
  19. * These have to be done with inline assembly: that way the bit-setting
  20. * is guaranteed to be atomic. All bit operations return 0 if the bit
  21. * was cleared before the operation and != 0 if it was not.
  22. *
  23. * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  24. */
  25. /**
  26. * set_bit - Atomically set a bit in memory
  27. * @nr: the bit to set
  28. * @addr: the address to start counting from
  29. *
  30. * This function is atomic and may not be reordered. See __set_bit()
  31. * if you do not require the atomic guarantees.
  32. * Note that @nr may be almost arbitrarily large; this function is not
  33. * restricted to acting on a single-word quantity.
  34. */
  35. static __inline__ void set_bit(int nr, volatile void * addr)
  36. {
  37. __u32 mask;
  38. volatile __u32 *a = addr;
  39. unsigned long flags;
  40. unsigned long tmp;
  41. a += (nr >> 5);
  42. mask = (1 << (nr & 0x1F));
  43. local_irq_save(flags);
  44. __asm__ __volatile__ (
  45. DCACHE_CLEAR("%0", "r6", "%1")
  46. M32R_LOCK" %0, @%1; \n\t"
  47. "or %0, %2; \n\t"
  48. M32R_UNLOCK" %0, @%1; \n\t"
  49. : "=&r" (tmp)
  50. : "r" (a), "r" (mask)
  51. : "memory"
  52. #ifdef CONFIG_CHIP_M32700_TS1
  53. , "r6"
  54. #endif /* CONFIG_CHIP_M32700_TS1 */
  55. );
  56. local_irq_restore(flags);
  57. }
  58. /**
  59. * __set_bit - Set a bit in memory
  60. * @nr: the bit to set
  61. * @addr: the address to start counting from
  62. *
  63. * Unlike set_bit(), this function is non-atomic and may be reordered.
  64. * If it's called on the same region of memory simultaneously, the effect
  65. * may be that only one operation succeeds.
  66. */
  67. static __inline__ void __set_bit(int nr, volatile void * addr)
  68. {
  69. __u32 mask;
  70. volatile __u32 *a = addr;
  71. a += (nr >> 5);
  72. mask = (1 << (nr & 0x1F));
  73. *a |= mask;
  74. }
  75. /**
  76. * clear_bit - Clears a bit in memory
  77. * @nr: Bit to clear
  78. * @addr: Address to start counting from
  79. *
  80. * clear_bit() is atomic and may not be reordered. However, it does
  81. * not contain a memory barrier, so if it is used for locking purposes,
  82. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  83. * in order to ensure changes are visible on other processors.
  84. */
  85. static __inline__ void clear_bit(int nr, volatile void * addr)
  86. {
  87. __u32 mask;
  88. volatile __u32 *a = addr;
  89. unsigned long flags;
  90. unsigned long tmp;
  91. a += (nr >> 5);
  92. mask = (1 << (nr & 0x1F));
  93. local_irq_save(flags);
  94. __asm__ __volatile__ (
  95. DCACHE_CLEAR("%0", "r6", "%1")
  96. M32R_LOCK" %0, @%1; \n\t"
  97. "and %0, %2; \n\t"
  98. M32R_UNLOCK" %0, @%1; \n\t"
  99. : "=&r" (tmp)
  100. : "r" (a), "r" (~mask)
  101. : "memory"
  102. #ifdef CONFIG_CHIP_M32700_TS1
  103. , "r6"
  104. #endif /* CONFIG_CHIP_M32700_TS1 */
  105. );
  106. local_irq_restore(flags);
  107. }
  108. static __inline__ void __clear_bit(int nr, volatile unsigned long * addr)
  109. {
  110. unsigned long mask;
  111. volatile unsigned long *a = addr;
  112. a += (nr >> 5);
  113. mask = (1 << (nr & 0x1F));
  114. *a &= ~mask;
  115. }
  116. #define smp_mb__before_clear_bit() barrier()
  117. #define smp_mb__after_clear_bit() barrier()
  118. /**
  119. * __change_bit - Toggle a bit in memory
  120. * @nr: the bit to set
  121. * @addr: the address to start counting from
  122. *
  123. * Unlike change_bit(), this function is non-atomic and may be reordered.
  124. * If it's called on the same region of memory simultaneously, the effect
  125. * may be that only one operation succeeds.
  126. */
  127. static __inline__ void __change_bit(int nr, volatile void * addr)
  128. {
  129. __u32 mask;
  130. volatile __u32 *a = addr;
  131. a += (nr >> 5);
  132. mask = (1 << (nr & 0x1F));
  133. *a ^= mask;
  134. }
  135. /**
  136. * change_bit - Toggle a bit in memory
  137. * @nr: Bit to clear
  138. * @addr: Address to start counting from
  139. *
  140. * change_bit() is atomic and may not be reordered.
  141. * Note that @nr may be almost arbitrarily large; this function is not
  142. * restricted to acting on a single-word quantity.
  143. */
  144. static __inline__ void change_bit(int nr, volatile void * addr)
  145. {
  146. __u32 mask;
  147. volatile __u32 *a = addr;
  148. unsigned long flags;
  149. unsigned long tmp;
  150. a += (nr >> 5);
  151. mask = (1 << (nr & 0x1F));
  152. local_irq_save(flags);
  153. __asm__ __volatile__ (
  154. DCACHE_CLEAR("%0", "r6", "%1")
  155. M32R_LOCK" %0, @%1; \n\t"
  156. "xor %0, %2; \n\t"
  157. M32R_UNLOCK" %0, @%1; \n\t"
  158. : "=&r" (tmp)
  159. : "r" (a), "r" (mask)
  160. : "memory"
  161. #ifdef CONFIG_CHIP_M32700_TS1
  162. , "r6"
  163. #endif /* CONFIG_CHIP_M32700_TS1 */
  164. );
  165. local_irq_restore(flags);
  166. }
  167. /**
  168. * test_and_set_bit - Set a bit and return its old value
  169. * @nr: Bit to set
  170. * @addr: Address to count from
  171. *
  172. * This operation is atomic and cannot be reordered.
  173. * It also implies a memory barrier.
  174. */
  175. static __inline__ int test_and_set_bit(int nr, volatile void * addr)
  176. {
  177. __u32 mask, oldbit;
  178. volatile __u32 *a = addr;
  179. unsigned long flags;
  180. unsigned long tmp;
  181. a += (nr >> 5);
  182. mask = (1 << (nr & 0x1F));
  183. local_irq_save(flags);
  184. __asm__ __volatile__ (
  185. DCACHE_CLEAR("%0", "%1", "%2")
  186. M32R_LOCK" %0, @%2; \n\t"
  187. "mv %1, %0; \n\t"
  188. "and %0, %3; \n\t"
  189. "or %1, %3; \n\t"
  190. M32R_UNLOCK" %1, @%2; \n\t"
  191. : "=&r" (oldbit), "=&r" (tmp)
  192. : "r" (a), "r" (mask)
  193. : "memory"
  194. );
  195. local_irq_restore(flags);
  196. return (oldbit != 0);
  197. }
  198. /**
  199. * __test_and_set_bit - Set a bit and return its old value
  200. * @nr: Bit to set
  201. * @addr: Address to count from
  202. *
  203. * This operation is non-atomic and can be reordered.
  204. * If two examples of this operation race, one can appear to succeed
  205. * but actually fail. You must protect multiple accesses with a lock.
  206. */
  207. static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
  208. {
  209. __u32 mask, oldbit;
  210. volatile __u32 *a = addr;
  211. a += (nr >> 5);
  212. mask = (1 << (nr & 0x1F));
  213. oldbit = (*a & mask);
  214. *a |= mask;
  215. return (oldbit != 0);
  216. }
  217. /**
  218. * test_and_clear_bit - Clear a bit and return its old value
  219. * @nr: Bit to set
  220. * @addr: Address to count from
  221. *
  222. * This operation is atomic and cannot be reordered.
  223. * It also implies a memory barrier.
  224. */
  225. static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  226. {
  227. __u32 mask, oldbit;
  228. volatile __u32 *a = addr;
  229. unsigned long flags;
  230. unsigned long tmp;
  231. a += (nr >> 5);
  232. mask = (1 << (nr & 0x1F));
  233. local_irq_save(flags);
  234. __asm__ __volatile__ (
  235. DCACHE_CLEAR("%0", "%1", "%3")
  236. M32R_LOCK" %0, @%3; \n\t"
  237. "mv %1, %0; \n\t"
  238. "and %0, %2; \n\t"
  239. "not %2, %2; \n\t"
  240. "and %1, %2; \n\t"
  241. M32R_UNLOCK" %1, @%3; \n\t"
  242. : "=&r" (oldbit), "=&r" (tmp), "+r" (mask)
  243. : "r" (a)
  244. : "memory"
  245. );
  246. local_irq_restore(flags);
  247. return (oldbit != 0);
  248. }
  249. /**
  250. * __test_and_clear_bit - Clear a bit and return its old value
  251. * @nr: Bit to set
  252. * @addr: Address to count from
  253. *
  254. * This operation is non-atomic and can be reordered.
  255. * If two examples of this operation race, one can appear to succeed
  256. * but actually fail. You must protect multiple accesses with a lock.
  257. */
  258. static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
  259. {
  260. __u32 mask, oldbit;
  261. volatile __u32 *a = addr;
  262. a += (nr >> 5);
  263. mask = (1 << (nr & 0x1F));
  264. oldbit = (*a & mask);
  265. *a &= ~mask;
  266. return (oldbit != 0);
  267. }
  268. /* WARNING: non atomic and it can be reordered! */
  269. static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
  270. {
  271. __u32 mask, oldbit;
  272. volatile __u32 *a = addr;
  273. a += (nr >> 5);
  274. mask = (1 << (nr & 0x1F));
  275. oldbit = (*a & mask);
  276. *a ^= mask;
  277. return (oldbit != 0);
  278. }
  279. /**
  280. * test_and_change_bit - Change a bit and return its old value
  281. * @nr: Bit to set
  282. * @addr: Address to count from
  283. *
  284. * This operation is atomic and cannot be reordered.
  285. * It also implies a memory barrier.
  286. */
  287. static __inline__ int test_and_change_bit(int nr, volatile void * addr)
  288. {
  289. __u32 mask, oldbit;
  290. volatile __u32 *a = addr;
  291. unsigned long flags;
  292. unsigned long tmp;
  293. a += (nr >> 5);
  294. mask = (1 << (nr & 0x1F));
  295. local_irq_save(flags);
  296. __asm__ __volatile__ (
  297. DCACHE_CLEAR("%0", "%1", "%2")
  298. M32R_LOCK" %0, @%2; \n\t"
  299. "mv %1, %0; \n\t"
  300. "and %0, %3; \n\t"
  301. "xor %1, %3; \n\t"
  302. M32R_UNLOCK" %1, @%2; \n\t"
  303. : "=&r" (oldbit), "=&r" (tmp)
  304. : "r" (a), "r" (mask)
  305. : "memory"
  306. );
  307. local_irq_restore(flags);
  308. return (oldbit != 0);
  309. }
  310. /**
  311. * test_bit - Determine whether a bit is set
  312. * @nr: bit number to test
  313. * @addr: Address to start counting from
  314. */
  315. static __inline__ int test_bit(int nr, const volatile void * addr)
  316. {
  317. __u32 mask;
  318. const volatile __u32 *a = addr;
  319. a += (nr >> 5);
  320. mask = (1 << (nr & 0x1F));
  321. return ((*a & mask) != 0);
  322. }
  323. /**
  324. * ffz - find first zero in word.
  325. * @word: The word to search
  326. *
  327. * Undefined if no zero exists, so code should check against ~0UL first.
  328. */
  329. static __inline__ unsigned long ffz(unsigned long word)
  330. {
  331. int k;
  332. word = ~word;
  333. k = 0;
  334. if (!(word & 0x0000ffff)) { k += 16; word >>= 16; }
  335. if (!(word & 0x000000ff)) { k += 8; word >>= 8; }
  336. if (!(word & 0x0000000f)) { k += 4; word >>= 4; }
  337. if (!(word & 0x00000003)) { k += 2; word >>= 2; }
  338. if (!(word & 0x00000001)) { k += 1; }
  339. return k;
  340. }
  341. /**
  342. * find_first_zero_bit - find the first zero bit in a memory region
  343. * @addr: The address to start the search at
  344. * @size: The maximum size to search
  345. *
  346. * Returns the bit-number of the first zero bit, not the number of the byte
  347. * containing a bit.
  348. */
  349. #define find_first_zero_bit(addr, size) \
  350. find_next_zero_bit((addr), (size), 0)
  351. /**
  352. * find_next_zero_bit - find the first zero bit in a memory region
  353. * @addr: The address to base the search on
  354. * @offset: The bitnumber to start searching at
  355. * @size: The maximum size to search
  356. */
  357. static __inline__ int find_next_zero_bit(const unsigned long *addr,
  358. int size, int offset)
  359. {
  360. const unsigned long *p = addr + (offset >> 5);
  361. unsigned long result = offset & ~31UL;
  362. unsigned long tmp;
  363. if (offset >= size)
  364. return size;
  365. size -= result;
  366. offset &= 31UL;
  367. if (offset) {
  368. tmp = *(p++);
  369. tmp |= ~0UL >> (32-offset);
  370. if (size < 32)
  371. goto found_first;
  372. if (~tmp)
  373. goto found_middle;
  374. size -= 32;
  375. result += 32;
  376. }
  377. while (size & ~31UL) {
  378. if (~(tmp = *(p++)))
  379. goto found_middle;
  380. result += 32;
  381. size -= 32;
  382. }
  383. if (!size)
  384. return result;
  385. tmp = *p;
  386. found_first:
  387. tmp |= ~0UL << size;
  388. found_middle:
  389. return result + ffz(tmp);
  390. }
  391. /**
  392. * __ffs - find first bit in word.
  393. * @word: The word to search
  394. *
  395. * Undefined if no bit exists, so code should check against 0 first.
  396. */
  397. static __inline__ unsigned long __ffs(unsigned long word)
  398. {
  399. int k = 0;
  400. if (!(word & 0x0000ffff)) { k += 16; word >>= 16; }
  401. if (!(word & 0x000000ff)) { k += 8; word >>= 8; }
  402. if (!(word & 0x0000000f)) { k += 4; word >>= 4; }
  403. if (!(word & 0x00000003)) { k += 2; word >>= 2; }
  404. if (!(word & 0x00000001)) { k += 1;}
  405. return k;
  406. }
  407. /*
  408. * fls: find last bit set.
  409. */
  410. #define fls(x) generic_fls(x)
  411. #define fls64(x) generic_fls64(x)
  412. #ifdef __KERNEL__
  413. /*
  414. * Every architecture must define this function. It's the fastest
  415. * way of searching a 140-bit bitmap where the first 100 bits are
  416. * unlikely to be set. It's guaranteed that at least one of the 140
  417. * bits is cleared.
  418. */
  419. static inline int sched_find_first_bit(unsigned long *b)
  420. {
  421. if (unlikely(b[0]))
  422. return __ffs(b[0]);
  423. if (unlikely(b[1]))
  424. return __ffs(b[1]) + 32;
  425. if (unlikely(b[2]))
  426. return __ffs(b[2]) + 64;
  427. if (b[3])
  428. return __ffs(b[3]) + 96;
  429. return __ffs(b[4]) + 128;
  430. }
  431. /**
  432. * find_next_bit - find the first set bit in a memory region
  433. * @addr: The address to base the search on
  434. * @offset: The bitnumber to start searching at
  435. * @size: The maximum size to search
  436. */
  437. static inline unsigned long find_next_bit(const unsigned long *addr,
  438. unsigned long size, unsigned long offset)
  439. {
  440. unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
  441. unsigned int result = offset & ~31UL;
  442. unsigned int tmp;
  443. if (offset >= size)
  444. return size;
  445. size -= result;
  446. offset &= 31UL;
  447. if (offset) {
  448. tmp = *p++;
  449. tmp &= ~0UL << offset;
  450. if (size < 32)
  451. goto found_first;
  452. if (tmp)
  453. goto found_middle;
  454. size -= 32;
  455. result += 32;
  456. }
  457. while (size >= 32) {
  458. if ((tmp = *p++) != 0)
  459. goto found_middle;
  460. result += 32;
  461. size -= 32;
  462. }
  463. if (!size)
  464. return result;
  465. tmp = *p;
  466. found_first:
  467. tmp &= ~0UL >> (32 - size);
  468. if (tmp == 0UL) /* Are any bits set? */
  469. return result + size; /* Nope. */
  470. found_middle:
  471. return result + __ffs(tmp);
  472. }
  473. /**
  474. * find_first_bit - find the first set bit in a memory region
  475. * @addr: The address to start the search at
  476. * @size: The maximum size to search
  477. *
  478. * Returns the bit-number of the first set bit, not the number of the byte
  479. * containing a bit.
  480. */
  481. #define find_first_bit(addr, size) \
  482. find_next_bit((addr), (size), 0)
  483. /**
  484. * ffs - find first bit set
  485. * @x: the word to search
  486. *
  487. * This is defined the same way as
  488. * the libc and compiler builtin ffs routines, therefore
  489. * differs in spirit from the above ffz (man ffs).
  490. */
  491. #define ffs(x) generic_ffs(x)
  492. /**
  493. * hweightN - returns the hamming weight of a N-bit word
  494. * @x: the word to weigh
  495. *
  496. * The Hamming Weight of a number is the total number of bits set in it.
  497. */
  498. #define hweight32(x) generic_hweight32(x)
  499. #define hweight16(x) generic_hweight16(x)
  500. #define hweight8(x) generic_hweight8(x)
  501. #endif /* __KERNEL__ */
  502. #ifdef __KERNEL__
  503. /*
  504. * ext2_XXXX function
  505. * orig: include/asm-sh/bitops.h
  506. */
  507. #ifdef __LITTLE_ENDIAN__
  508. #define ext2_set_bit test_and_set_bit
  509. #define ext2_clear_bit __test_and_clear_bit
  510. #define ext2_test_bit test_bit
  511. #define ext2_find_first_zero_bit find_first_zero_bit
  512. #define ext2_find_next_zero_bit find_next_zero_bit
  513. #else
  514. static inline int ext2_set_bit(int nr, volatile void * addr)
  515. {
  516. __u8 mask, oldbit;
  517. volatile __u8 *a = addr;
  518. a += (nr >> 3);
  519. mask = (1 << (nr & 0x07));
  520. oldbit = (*a & mask);
  521. *a |= mask;
  522. return (oldbit != 0);
  523. }
  524. static inline int ext2_clear_bit(int nr, volatile void * addr)
  525. {
  526. __u8 mask, oldbit;
  527. volatile __u8 *a = addr;
  528. a += (nr >> 3);
  529. mask = (1 << (nr & 0x07));
  530. oldbit = (*a & mask);
  531. *a &= ~mask;
  532. return (oldbit != 0);
  533. }
  534. static inline int ext2_test_bit(int nr, const volatile void * addr)
  535. {
  536. __u32 mask;
  537. const volatile __u8 *a = addr;
  538. a += (nr >> 3);
  539. mask = (1 << (nr & 0x07));
  540. return ((mask & *a) != 0);
  541. }
  542. #define ext2_find_first_zero_bit(addr, size) \
  543. ext2_find_next_zero_bit((addr), (size), 0)
  544. static inline unsigned long ext2_find_next_zero_bit(void *addr,
  545. unsigned long size, unsigned long offset)
  546. {
  547. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  548. unsigned long result = offset & ~31UL;
  549. unsigned long tmp;
  550. if (offset >= size)
  551. return size;
  552. size -= result;
  553. offset &= 31UL;
  554. if(offset) {
  555. /* We hold the little endian value in tmp, but then the
  556. * shift is illegal. So we could keep a big endian value
  557. * in tmp, like this:
  558. *
  559. * tmp = __swab32(*(p++));
  560. * tmp |= ~0UL >> (32-offset);
  561. *
  562. * but this would decrease preformance, so we change the
  563. * shift:
  564. */
  565. tmp = *(p++);
  566. tmp |= __swab32(~0UL >> (32-offset));
  567. if(size < 32)
  568. goto found_first;
  569. if(~tmp)
  570. goto found_middle;
  571. size -= 32;
  572. result += 32;
  573. }
  574. while(size & ~31UL) {
  575. if(~(tmp = *(p++)))
  576. goto found_middle;
  577. result += 32;
  578. size -= 32;
  579. }
  580. if(!size)
  581. return result;
  582. tmp = *p;
  583. found_first:
  584. /* tmp is little endian, so we would have to swab the shift,
  585. * see above. But then we have to swab tmp below for ffz, so
  586. * we might as well do this here.
  587. */
  588. return result + ffz(__swab32(tmp) | (~0UL << size));
  589. found_middle:
  590. return result + ffz(__swab32(tmp));
  591. }
  592. #endif
  593. #define ext2_set_bit_atomic(lock, nr, addr) \
  594. ({ \
  595. int ret; \
  596. spin_lock(lock); \
  597. ret = ext2_set_bit((nr), (addr)); \
  598. spin_unlock(lock); \
  599. ret; \
  600. })
  601. #define ext2_clear_bit_atomic(lock, nr, addr) \
  602. ({ \
  603. int ret; \
  604. spin_lock(lock); \
  605. ret = ext2_clear_bit((nr), (addr)); \
  606. spin_unlock(lock); \
  607. ret; \
  608. })
  609. /* Bitmap functions for the minix filesystem. */
  610. #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
  611. #define minix_set_bit(nr,addr) __set_bit(nr,addr)
  612. #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
  613. #define minix_test_bit(nr,addr) test_bit(nr,addr)
  614. #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
  615. #endif /* __KERNEL__ */
  616. #endif /* _ASM_M32R_BITOPS_H */