|
@@ -114,7 +114,8 @@ static inline int atomic_sub_return(int i, atomic_t *v)
|
|
|
|
|
|
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
|
|
{
|
|
|
- unsigned long oldval, res;
|
|
|
+ int oldval;
|
|
|
+ unsigned long res;
|
|
|
|
|
|
smp_mb();
|
|
|
|
|
@@ -134,21 +135,6 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
|
|
return oldval;
|
|
|
}
|
|
|
|
|
|
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
|
|
|
-{
|
|
|
- unsigned long tmp, tmp2;
|
|
|
-
|
|
|
- __asm__ __volatile__("@ atomic_clear_mask\n"
|
|
|
-"1: ldrex %0, [%3]\n"
|
|
|
-" bic %0, %0, %4\n"
|
|
|
-" strex %1, %0, [%3]\n"
|
|
|
-" teq %1, #0\n"
|
|
|
-" bne 1b"
|
|
|
- : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
|
|
|
- : "r" (addr), "Ir" (mask)
|
|
|
- : "cc");
|
|
|
-}
|
|
|
-
|
|
|
#else /* ARM_ARCH_6 */
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
@@ -197,15 +183,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
|
|
|
-{
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- raw_local_irq_save(flags);
|
|
|
- *addr &= ~mask;
|
|
|
- raw_local_irq_restore(flags);
|
|
|
-}
|
|
|
-
|
|
|
#endif /* __LINUX_ARM_ARCH__ */
|
|
|
|
|
|
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
|
@@ -238,15 +215,15 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
|
|
|
|
#ifndef CONFIG_GENERIC_ATOMIC64
|
|
|
typedef struct {
|
|
|
- u64 __aligned(8) counter;
|
|
|
+ long long counter;
|
|
|
} atomic64_t;
|
|
|
|
|
|
#define ATOMIC64_INIT(i) { (i) }
|
|
|
|
|
|
#ifdef CONFIG_ARM_LPAE
|
|
|
-static inline u64 atomic64_read(const atomic64_t *v)
|
|
|
+static inline long long atomic64_read(const atomic64_t *v)
|
|
|
{
|
|
|
- u64 result;
|
|
|
+ long long result;
|
|
|
|
|
|
__asm__ __volatile__("@ atomic64_read\n"
|
|
|
" ldrd %0, %H0, [%1]"
|
|
@@ -257,7 +234,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
|
|
|
return result;
|
|
|
}
|
|
|
|
|
|
-static inline void atomic64_set(atomic64_t *v, u64 i)
|
|
|
+static inline void atomic64_set(atomic64_t *v, long long i)
|
|
|
{
|
|
|
__asm__ __volatile__("@ atomic64_set\n"
|
|
|
" strd %2, %H2, [%1]"
|
|
@@ -266,9 +243,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
|
|
|
);
|
|
|
}
|
|
|
#else
|
|
|
-static inline u64 atomic64_read(const atomic64_t *v)
|
|
|
+static inline long long atomic64_read(const atomic64_t *v)
|
|
|
{
|
|
|
- u64 result;
|
|
|
+ long long result;
|
|
|
|
|
|
__asm__ __volatile__("@ atomic64_read\n"
|
|
|
" ldrexd %0, %H0, [%1]"
|
|
@@ -279,9 +256,9 @@ static inline u64 atomic64_read(const atomic64_t *v)
|
|
|
return result;
|
|
|
}
|
|
|
|
|
|
-static inline void atomic64_set(atomic64_t *v, u64 i)
|
|
|
+static inline void atomic64_set(atomic64_t *v, long long i)
|
|
|
{
|
|
|
- u64 tmp;
|
|
|
+ long long tmp;
|
|
|
|
|
|
__asm__ __volatile__("@ atomic64_set\n"
|
|
|
"1: ldrexd %0, %H0, [%2]\n"
|
|
@@ -294,9 +271,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-static inline void atomic64_add(u64 i, atomic64_t *v)
|
|
|
+static inline void atomic64_add(long long i, atomic64_t *v)
|
|
|
{
|
|
|
- u64 result;
|
|
|
+ long long result;
|
|
|
unsigned long tmp;
|
|
|
|
|
|
__asm__ __volatile__("@ atomic64_add\n"
|
|
@@ -311,9 +288,9 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
|
|
|
: "cc");
|
|
|
}
|
|
|
|
|
|
-static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
|
|
|
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
|
|
|
{
|
|
|
- u64 result;
|
|
|
+ long long result;
|
|
|
unsigned long tmp;
|
|
|
|
|
|
smp_mb();
|
|
@@ -334,9 +311,9 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
|
|
|
return result;
|
|
|
}
|
|
|
|
|
|
-static inline void atomic64_sub(u64 i, atomic64_t *v)
|
|
|
+static inline void atomic64_sub(long long i, atomic64_t *v)
|
|
|
{
|
|
|
- u64 result;
|
|
|
+ long long result;
|
|
|
unsigned long tmp;
|
|
|
|
|
|
__asm__ __volatile__("@ atomic64_sub\n"
|
|
@@ -351,9 +328,9 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
|
|
|
: "cc");
|
|
|
}
|
|
|
|
|
|
-static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
|
|
|
+static inline long long atomic64_sub_return(long long i, atomic64_t *v)
|
|
|
{
|
|
|
- u64 result;
|
|
|
+ long long result;
|
|
|
unsigned long tmp;
|
|
|
|
|
|
smp_mb();
|
|
@@ -374,9 +351,10 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
|
|
|
return result;
|
|
|
}
|
|
|
|
|
|
-static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
|
|
|
+static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
|
|
|
+ long long new)
|
|
|
{
|
|
|
- u64 oldval;
|
|
|
+ long long oldval;
|
|
|
unsigned long res;
|
|
|
|
|
|
smp_mb();
|
|
@@ -398,9 +376,9 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
|
|
|
return oldval;
|
|
|
}
|
|
|
|
|
|
-static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
|
|
|
+static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
|
|
|
{
|
|
|
- u64 result;
|
|
|
+ long long result;
|
|
|
unsigned long tmp;
|
|
|
|
|
|
smp_mb();
|
|
@@ -419,9 +397,9 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
|
|
|
return result;
|
|
|
}
|
|
|
|
|
|
-static inline u64 atomic64_dec_if_positive(atomic64_t *v)
|
|
|
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
|
|
|
{
|
|
|
- u64 result;
|
|
|
+ long long result;
|
|
|
unsigned long tmp;
|
|
|
|
|
|
smp_mb();
|
|
@@ -445,9 +423,9 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
|
|
|
return result;
|
|
|
}
|
|
|
|
|
|
-static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
|
|
|
+static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
|
|
|
{
|
|
|
- u64 val;
|
|
|
+ long long val;
|
|
|
unsigned long tmp;
|
|
|
int ret = 1;
|
|
|
|