|
@@ -55,11 +55,9 @@
|
|
* much between them in performance though, especially as locks are out of line.
|
|
* much between them in performance though, especially as locks are out of line.
|
|
*/
|
|
*/
|
|
#if (NR_CPUS < 256)
|
|
#if (NR_CPUS < 256)
|
|
-#define TICKET_SHIFT 8
|
|
|
|
-
|
|
|
|
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
|
|
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
|
|
{
|
|
{
|
|
- short inc = 0x0100;
|
|
|
|
|
|
+ unsigned short inc = 1 << TICKET_SHIFT;
|
|
|
|
|
|
asm volatile (
|
|
asm volatile (
|
|
LOCK_PREFIX "xaddw %w0, %1\n"
|
|
LOCK_PREFIX "xaddw %w0, %1\n"
|
|
@@ -78,7 +76,7 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
|
|
|
|
|
|
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
|
|
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
|
|
{
|
|
{
|
|
- int tmp, new;
|
|
|
|
|
|
+ unsigned int tmp, new;
|
|
|
|
|
|
asm volatile("movzwl %2, %0\n\t"
|
|
asm volatile("movzwl %2, %0\n\t"
|
|
"cmpb %h0,%b0\n\t"
|
|
"cmpb %h0,%b0\n\t"
|
|
@@ -103,12 +101,10 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
|
|
: "memory", "cc");
|
|
: "memory", "cc");
|
|
}
|
|
}
|
|
#else
|
|
#else
|
|
-#define TICKET_SHIFT 16
|
|
|
|
-
|
|
|
|
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
|
|
static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
|
|
{
|
|
{
|
|
- int inc = 0x00010000;
|
|
|
|
- int tmp;
|
|
|
|
|
|
+ unsigned inc = 1 << TICKET_SHIFT;
|
|
|
|
+ unsigned tmp;
|
|
|
|
|
|
asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
|
|
asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
|
|
"movzwl %w0, %2\n\t"
|
|
"movzwl %w0, %2\n\t"
|
|
@@ -128,8 +124,8 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
|
|
|
|
|
|
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
|
|
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
|
|
{
|
|
{
|
|
- int tmp;
|
|
|
|
- int new;
|
|
|
|
|
|
+ unsigned tmp;
|
|
|
|
+ unsigned new;
|
|
|
|
|
|
asm volatile("movl %2,%0\n\t"
|
|
asm volatile("movl %2,%0\n\t"
|
|
"movl %0,%1\n\t"
|
|
"movl %0,%1\n\t"
|
|
@@ -159,16 +155,16 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
|
|
|
|
|
|
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
|
|
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
|
|
{
|
|
{
|
|
- int tmp = ACCESS_ONCE(lock->slock);
|
|
|
|
|
|
+ struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
|
|
|
|
|
|
- return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
|
|
|
|
|
|
+ return !!(tmp.tail ^ tmp.head);
|
|
}
|
|
}
|
|
|
|
|
|
static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
|
|
static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
|
|
{
|
|
{
|
|
- int tmp = ACCESS_ONCE(lock->slock);
|
|
|
|
|
|
+ struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
|
|
|
|
|
|
- return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
|
|
|
|
|
|
+ return ((tmp.tail - tmp.head) & TICKET_MASK) > 1;
|
|
}
|
|
}
|
|
|
|
|
|
#ifndef CONFIG_PARAVIRT_SPINLOCKS
|
|
#ifndef CONFIG_PARAVIRT_SPINLOCKS
|