bitops_64.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. /*
  2. * Copyright 2011 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #ifndef _ASM_TILE_BITOPS_64_H
  15. #define _ASM_TILE_BITOPS_64_H
  16. #include <linux/compiler.h>
  17. #include <linux/atomic.h>
  18. #include <asm/system.h>
  19. /* See <asm/bitops.h> for API comments. */
  20. static inline void set_bit(unsigned nr, volatile unsigned long *addr)
  21. {
  22. unsigned long mask = (1UL << (nr % BITS_PER_LONG));
  23. __insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask);
  24. }
  25. static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
  26. {
  27. unsigned long mask = (1UL << (nr % BITS_PER_LONG));
  28. __insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask);
  29. }
  30. #define smp_mb__before_clear_bit() smp_mb()
  31. #define smp_mb__after_clear_bit() smp_mb()
  32. static inline void change_bit(unsigned nr, volatile unsigned long *addr)
  33. {
  34. unsigned long old, mask = (1UL << (nr % BITS_PER_LONG));
  35. long guess, oldval;
  36. addr += nr / BITS_PER_LONG;
  37. old = *addr;
  38. do {
  39. guess = oldval;
  40. oldval = atomic64_cmpxchg((atomic64_t *)addr,
  41. guess, guess ^ mask);
  42. } while (guess != oldval);
  43. }
  44. /*
  45. * The test_and_xxx_bit() routines require a memory fence before we
  46. * start the operation, and after the operation completes. We use
  47. * smp_mb() before, and rely on the "!= 0" comparison, plus a compiler
  48. * barrier(), to block until the atomic op is complete.
  49. */
  50. static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
  51. {
  52. int val;
  53. unsigned long mask = (1UL << (nr % BITS_PER_LONG));
  54. smp_mb(); /* barrier for proper semantics */
  55. val = (__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask)
  56. & mask) != 0;
  57. barrier();
  58. return val;
  59. }
  60. static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
  61. {
  62. int val;
  63. unsigned long mask = (1UL << (nr % BITS_PER_LONG));
  64. smp_mb(); /* barrier for proper semantics */
  65. val = (__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask)
  66. & mask) != 0;
  67. barrier();
  68. return val;
  69. }
  70. static inline int test_and_change_bit(unsigned nr,
  71. volatile unsigned long *addr)
  72. {
  73. unsigned long mask = (1UL << (nr % BITS_PER_LONG));
  74. long guess, oldval = *addr;
  75. addr += nr / BITS_PER_LONG;
  76. oldval = *addr;
  77. do {
  78. guess = oldval;
  79. oldval = atomic64_cmpxchg((atomic64_t *)addr,
  80. guess, guess ^ mask);
  81. } while (guess != oldval);
  82. return (oldval & mask) != 0;
  83. }
  84. #include <asm-generic/bitops/ext2-atomic-setbit.h>
  85. #endif /* _ASM_TILE_BITOPS_64_H */