bitops_32.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #ifndef _ASM_TILE_BITOPS_32_H
  15. #define _ASM_TILE_BITOPS_32_H
  16. #include <linux/compiler.h>
  17. #include <linux/atomic.h>
  18. #include <asm/system.h>
  19. /* Tile-specific routines to support <asm/bitops.h>. */
  20. unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask);
  21. unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask);
  22. unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask);
  23. /**
  24. * set_bit - Atomically set a bit in memory
  25. * @nr: the bit to set
  26. * @addr: the address to start counting from
  27. *
  28. * This function is atomic and may not be reordered.
  29. * See __set_bit() if you do not require the atomic guarantees.
  30. * Note that @nr may be almost arbitrarily large; this function is not
  31. * restricted to acting on a single-word quantity.
  32. */
  33. static inline void set_bit(unsigned nr, volatile unsigned long *addr)
  34. {
  35. _atomic_or(addr + BIT_WORD(nr), BIT_MASK(nr));
  36. }
  37. /**
  38. * clear_bit - Clears a bit in memory
  39. * @nr: Bit to clear
  40. * @addr: Address to start counting from
  41. *
  42. * clear_bit() is atomic and may not be reordered.
  43. * See __clear_bit() if you do not require the atomic guarantees.
  44. * Note that @nr may be almost arbitrarily large; this function is not
  45. * restricted to acting on a single-word quantity.
  46. *
  47. * clear_bit() may not contain a memory barrier, so if it is used for
  48. * locking purposes, you should call smp_mb__before_clear_bit() and/or
  49. * smp_mb__after_clear_bit() to ensure changes are visible on other cpus.
  50. */
  51. static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
  52. {
  53. _atomic_andn(addr + BIT_WORD(nr), BIT_MASK(nr));
  54. }
  55. /**
  56. * change_bit - Toggle a bit in memory
  57. * @nr: Bit to change
  58. * @addr: Address to start counting from
  59. *
  60. * change_bit() is atomic and may not be reordered.
  61. * See __change_bit() if you do not require the atomic guarantees.
  62. * Note that @nr may be almost arbitrarily large; this function is not
  63. * restricted to acting on a single-word quantity.
  64. */
  65. static inline void change_bit(unsigned nr, volatile unsigned long *addr)
  66. {
  67. _atomic_xor(addr + BIT_WORD(nr), BIT_MASK(nr));
  68. }
  69. /**
  70. * test_and_set_bit - Set a bit and return its old value
  71. * @nr: Bit to set
  72. * @addr: Address to count from
  73. *
  74. * This operation is atomic and cannot be reordered.
  75. * It also implies a memory barrier.
  76. */
  77. static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
  78. {
  79. unsigned long mask = BIT_MASK(nr);
  80. addr += BIT_WORD(nr);
  81. smp_mb(); /* barrier for proper semantics */
  82. return (_atomic_or(addr, mask) & mask) != 0;
  83. }
  84. /**
  85. * test_and_clear_bit - Clear a bit and return its old value
  86. * @nr: Bit to clear
  87. * @addr: Address to count from
  88. *
  89. * This operation is atomic and cannot be reordered.
  90. * It also implies a memory barrier.
  91. */
  92. static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
  93. {
  94. unsigned long mask = BIT_MASK(nr);
  95. addr += BIT_WORD(nr);
  96. smp_mb(); /* barrier for proper semantics */
  97. return (_atomic_andn(addr, mask) & mask) != 0;
  98. }
  99. /**
  100. * test_and_change_bit - Change a bit and return its old value
  101. * @nr: Bit to change
  102. * @addr: Address to count from
  103. *
  104. * This operation is atomic and cannot be reordered.
  105. * It also implies a memory barrier.
  106. */
  107. static inline int test_and_change_bit(unsigned nr,
  108. volatile unsigned long *addr)
  109. {
  110. unsigned long mask = BIT_MASK(nr);
  111. addr += BIT_WORD(nr);
  112. smp_mb(); /* barrier for proper semantics */
  113. return (_atomic_xor(addr, mask) & mask) != 0;
  114. }
  115. /* See discussion at smp_mb__before_atomic_dec() in <asm/atomic_32.h>. */
  116. #define smp_mb__before_clear_bit() smp_mb()
  117. #define smp_mb__after_clear_bit() do {} while (0)
  118. #include <asm-generic/bitops/ext2-atomic.h>
  119. #endif /* _ASM_TILE_BITOPS_32_H */