|
@@ -172,70 +172,8 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
|
|
return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
|
|
return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
|
|
}
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_PARAVIRT
|
|
|
|
-/*
|
|
|
|
- * Define virtualization-friendly old-style lock byte lock, for use in
|
|
|
|
- * pv_lock_ops if desired.
|
|
|
|
- *
|
|
|
|
- * This differs from the pre-2.6.24 spinlock by always using xchgb
|
|
|
|
- * rather than decb to take the lock; this allows it to use a
|
|
|
|
- * zero-initialized lock structure. It also maintains a 1-byte
|
|
|
|
- * contention counter, so that we can implement
|
|
|
|
- * __byte_spin_is_contended.
|
|
|
|
- */
|
|
|
|
-struct __byte_spinlock {
|
|
|
|
- s8 lock;
|
|
|
|
- s8 spinners;
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
|
|
|
|
-{
|
|
|
|
- struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
|
|
|
|
- return bl->lock != 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
|
|
|
|
-{
|
|
|
|
- struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
|
|
|
|
- return bl->spinners != 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void __byte_spin_lock(raw_spinlock_t *lock)
|
|
|
|
-{
|
|
|
|
- struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
|
|
|
|
- s8 val = 1;
|
|
|
|
-
|
|
|
|
- asm("1: xchgb %1, %0\n"
|
|
|
|
- " test %1,%1\n"
|
|
|
|
- " jz 3f\n"
|
|
|
|
- " " LOCK_PREFIX "incb %2\n"
|
|
|
|
- "2: rep;nop\n"
|
|
|
|
- " cmpb $1, %0\n"
|
|
|
|
- " je 2b\n"
|
|
|
|
- " " LOCK_PREFIX "decb %2\n"
|
|
|
|
- " jmp 1b\n"
|
|
|
|
- "3:"
|
|
|
|
- : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline int __byte_spin_trylock(raw_spinlock_t *lock)
|
|
|
|
-{
|
|
|
|
- struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
|
|
|
|
- u8 old = 1;
|
|
|
|
-
|
|
|
|
- asm("xchgb %1,%0"
|
|
|
|
- : "+m" (bl->lock), "+q" (old) : : "memory");
|
|
|
|
|
|
+#ifndef CONFIG_PARAVIRT
|
|
|
|
|
|
- return old == 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void __byte_spin_unlock(raw_spinlock_t *lock)
|
|
|
|
-{
|
|
|
|
- struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
|
|
|
|
- smp_wmb();
|
|
|
|
- bl->lock = 0;
|
|
|
|
-}
|
|
|
|
-#else /* !CONFIG_PARAVIRT */
|
|
|
|
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
|
|
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
|
|
{
|
|
{
|
|
return __ticket_spin_is_locked(lock);
|
|
return __ticket_spin_is_locked(lock);
|
|
@@ -267,7 +205,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
|
|
__raw_spin_lock(lock);
|
|
__raw_spin_lock(lock);
|
|
}
|
|
}
|
|
|
|
|
|
-#endif /* CONFIG_PARAVIRT */
|
|
|
|
|
|
+#endif
|
|
|
|
|
|
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
|
|
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
|
|
{
|
|
{
|