123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278 |
- /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
- * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
- */
- #ifndef _ASM_PARISC_ATOMIC_H_
- #define _ASM_PARISC_ATOMIC_H_
- #include <linux/config.h>
- #include <linux/types.h>
- #include <asm/system.h>
- /*
- * Atomic operations that C can't guarantee us. Useful for
- * resource counting etc..
- *
- * And probably incredibly slow on parisc. OTOH, we don't
- * have to write any serious assembly. prumpf
- */
- #ifdef CONFIG_SMP
- #include <asm/spinlock.h>
- #include <asm/cache.h> /* we use L1_CACHE_BYTES */
- /* Use an array of spinlocks for our atomic_ts.
- * Hash function to index into a different SPINLOCK.
- * Since "a" is usually an address, use one spinlock per cacheline.
- */
- # define ATOMIC_HASH_SIZE 4
- # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
- extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
- /* Can't use raw_spin_lock_irq because of #include problems, so
- * this is the substitute */
- #define _atomic_spin_lock_irqsave(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
- local_irq_save(f); \
- __raw_spin_lock(s); \
- } while(0)
- #define _atomic_spin_unlock_irqrestore(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
- __raw_spin_unlock(s); \
- local_irq_restore(f); \
- } while(0)
- #else
- # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
- # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
- #endif
- /* This should get optimized out since it's never called.
- ** Or get a link error if xchg is used "wrong".
- */
- extern void __xchg_called_with_bad_pointer(void);
- /* __xchg32/64 defined in arch/parisc/lib/bitops.c */
- extern unsigned long __xchg8(char, char *);
- extern unsigned long __xchg32(int, int *);
- #ifdef __LP64__
- extern unsigned long __xchg64(unsigned long, unsigned long *);
- #endif
- /* optimizer better get rid of switch since size is a constant */
- static __inline__ unsigned long
- __xchg(unsigned long x, __volatile__ void * ptr, int size)
- {
- switch(size) {
- #ifdef __LP64__
- case 8: return __xchg64(x,(unsigned long *) ptr);
- #endif
- case 4: return __xchg32((int) x, (int *) ptr);
- case 1: return __xchg8((char) x, (char *) ptr);
- }
- __xchg_called_with_bad_pointer();
- return x;
- }
- /*
- ** REVISIT - Abandoned use of LDCW in xchg() for now:
- ** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
- ** o and while we are at it, could __LP64__ code use LDCD too?
- **
- ** if (__builtin_constant_p(x) && (x == NULL))
- ** if (((unsigned long)p & 0xf) == 0)
- ** return __ldcw(p);
- */
- #define xchg(ptr,x) \
- ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
- #define __HAVE_ARCH_CMPXCHG 1
- /* bug catcher for when unsupported size is used - won't link */
- extern void __cmpxchg_called_with_bad_pointer(void);
- /* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
- extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_);
- extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_);
- /* don't worry...optimizer will get rid of most of this */
- static __inline__ unsigned long
- __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
- {
- switch(size) {
- #ifdef __LP64__
- case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
- #endif
- case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_);
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
- }
- #define cmpxchg(ptr,o,n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, sizeof(*(ptr))); \
- })
- /* Note that we need not lock read accesses - aligned word writes/reads
- * are atomic, so a reader never sees unconsistent values.
- *
- * Cache-line alignment would conflict with, for example, linux/module.h
- */
- typedef struct { volatile int counter; } atomic_t;
- /* It's possible to reduce all atomic operations to either
- * __atomic_add_return, atomic_set and atomic_read (the latter
- * is there only for consistency).
- */
- static __inline__ int __atomic_add_return(int i, atomic_t *v)
- {
- int ret;
- unsigned long flags;
- _atomic_spin_lock_irqsave(v, flags);
- ret = (v->counter += i);
- _atomic_spin_unlock_irqrestore(v, flags);
- return ret;
- }
- static __inline__ void atomic_set(atomic_t *v, int i)
- {
- unsigned long flags;
- _atomic_spin_lock_irqsave(v, flags);
- v->counter = i;
- _atomic_spin_unlock_irqrestore(v, flags);
- }
- static __inline__ int atomic_read(const atomic_t *v)
- {
- return v->counter;
- }
- /* exported interface */
- #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
- #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
- /**
- * atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
- */
- #define atomic_add_unless(v, a, u) \
- ({ \
- int c, old; \
- c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
- c = old; \
- c != (u); \
- })
- #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
- #define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v))))
- #define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v))))
- #define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
- #define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
- #define atomic_add_return(i,v) (__atomic_add_return( ((int)i),(v)))
- #define atomic_sub_return(i,v) (__atomic_add_return(-((int)i),(v)))
- #define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
- #define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
- #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
- /*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
- #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
- #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
- #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
- #define ATOMIC_INIT(i) ((atomic_t) { (i) })
- #define smp_mb__before_atomic_dec() smp_mb()
- #define smp_mb__after_atomic_dec() smp_mb()
- #define smp_mb__before_atomic_inc() smp_mb()
- #define smp_mb__after_atomic_inc() smp_mb()
- #ifdef __LP64__
- typedef struct { volatile s64 counter; } atomic64_t;
- #define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
- static __inline__ int
- __atomic64_add_return(s64 i, atomic64_t *v)
- {
- int ret;
- unsigned long flags;
- _atomic_spin_lock_irqsave(v, flags);
- ret = (v->counter += i);
- _atomic_spin_unlock_irqrestore(v, flags);
- return ret;
- }
- static __inline__ void
- atomic64_set(atomic64_t *v, s64 i)
- {
- unsigned long flags;
- _atomic_spin_lock_irqsave(v, flags);
- v->counter = i;
- _atomic_spin_unlock_irqrestore(v, flags);
- }
- static __inline__ s64
- atomic64_read(const atomic64_t *v)
- {
- return v->counter;
- }
- #define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)i),(v))))
- #define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)i),(v))))
- #define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))
- #define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))
- #define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)i),(v)))
- #define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)i),(v)))
- #define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))
- #define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))
- #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
- #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
- #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
- #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
- #endif /* __LP64__ */
- #include <asm-generic/atomic.h>
- #endif /* _ASM_PARISC_ATOMIC_H_ */
|