atomic.h 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. /*
  2. * linux/include/asm-arm/atomic.h
  3. *
  4. * Copyright (C) 1996 Russell King.
  5. * Copyright (C) 2002 Deep Blue Solutions Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #ifndef __ASM_ARM_ATOMIC_H
  12. #define __ASM_ARM_ATOMIC_H
  13. #include <linux/config.h>
  14. typedef struct { volatile int counter; } atomic_t;
  15. #define ATOMIC_INIT(i) { (i) }
  16. #ifdef __KERNEL__
  17. #define atomic_read(v) ((v)->counter)
  18. #if __LINUX_ARM_ARCH__ >= 6
  19. /*
  20. * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
  21. * store exclusive to ensure that these are atomic. We may loop
  22. * to ensure that the update happens. Writing to 'v->counter'
  23. * without using the following operations WILL break the atomic
  24. * nature of these ops.
  25. */
  26. static inline void atomic_set(atomic_t *v, int i)
  27. {
  28. unsigned long tmp;
  29. __asm__ __volatile__("@ atomic_set\n"
  30. "1: ldrex %0, [%1]\n"
  31. " strex %0, %2, [%1]\n"
  32. " teq %0, #0\n"
  33. " bne 1b"
  34. : "=&r" (tmp)
  35. : "r" (&v->counter), "r" (i)
  36. : "cc");
  37. }
  38. static inline int atomic_add_return(int i, atomic_t *v)
  39. {
  40. unsigned long tmp;
  41. int result;
  42. __asm__ __volatile__("@ atomic_add_return\n"
  43. "1: ldrex %0, [%2]\n"
  44. " add %0, %0, %3\n"
  45. " strex %1, %0, [%2]\n"
  46. " teq %1, #0\n"
  47. " bne 1b"
  48. : "=&r" (result), "=&r" (tmp)
  49. : "r" (&v->counter), "Ir" (i)
  50. : "cc");
  51. return result;
  52. }
  53. static inline int atomic_sub_return(int i, atomic_t *v)
  54. {
  55. unsigned long tmp;
  56. int result;
  57. __asm__ __volatile__("@ atomic_sub_return\n"
  58. "1: ldrex %0, [%2]\n"
  59. " sub %0, %0, %3\n"
  60. " strex %1, %0, [%2]\n"
  61. " teq %1, #0\n"
  62. " bne 1b"
  63. : "=&r" (result), "=&r" (tmp)
  64. : "r" (&v->counter), "Ir" (i)
  65. : "cc");
  66. return result;
  67. }
  68. static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
  69. {
  70. unsigned long tmp, tmp2;
  71. __asm__ __volatile__("@ atomic_clear_mask\n"
  72. "1: ldrex %0, %2\n"
  73. " bic %0, %0, %3\n"
  74. " strex %1, %0, %2\n"
  75. " teq %1, #0\n"
  76. " bne 1b"
  77. : "=&r" (tmp), "=&r" (tmp2)
  78. : "r" (addr), "Ir" (mask)
  79. : "cc");
  80. }
  81. #else /* ARM_ARCH_6 */
  82. #include <asm/system.h>
  83. #ifdef CONFIG_SMP
  84. #error SMP not supported on pre-ARMv6 CPUs
  85. #endif
  86. #define atomic_set(v,i) (((v)->counter) = (i))
  87. static inline int atomic_add_return(int i, atomic_t *v)
  88. {
  89. unsigned long flags;
  90. int val;
  91. local_irq_save(flags);
  92. val = v->counter;
  93. v->counter = val += i;
  94. local_irq_restore(flags);
  95. return val;
  96. }
  97. static inline int atomic_sub_return(int i, atomic_t *v)
  98. {
  99. unsigned long flags;
  100. int val;
  101. local_irq_save(flags);
  102. val = v->counter;
  103. v->counter = val -= i;
  104. local_irq_restore(flags);
  105. return val;
  106. }
  107. static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
  108. {
  109. unsigned long flags;
  110. local_irq_save(flags);
  111. *addr &= ~mask;
  112. local_irq_restore(flags);
  113. }
  114. #endif /* __LINUX_ARM_ARCH__ */
  115. #define atomic_add(i, v) (void) atomic_add_return(i, v)
  116. #define atomic_inc(v) (void) atomic_add_return(1, v)
  117. #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
  118. #define atomic_dec(v) (void) atomic_sub_return(1, v)
  119. #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
  120. #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
  121. #define atomic_inc_return(v) (atomic_add_return(1, v))
  122. #define atomic_dec_return(v) (atomic_sub_return(1, v))
  123. #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
  124. #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
  125. /* Atomic operations are already serializing on ARM */
  126. #define smp_mb__before_atomic_dec() barrier()
  127. #define smp_mb__after_atomic_dec() barrier()
  128. #define smp_mb__before_atomic_inc() barrier()
  129. #define smp_mb__after_atomic_inc() barrier()
  130. #endif
  131. #endif