Browse Source

ARM: 7854/1: lockref: add support for lockless lockrefs using cmpxchg64

Our spinlocks are only 32-bit (2x16-bit tickets) and, on processors
with 64-bit atomic instructions, cmpxchg64 makes use of the double-word
exclusive accessors.

This patch wires up the cmpxchg-based lockless lockref implementation
for ARM.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Will Deacon 11 years ago
parent
commit
0cbad9c9df
2 changed files with 7 additions and 2 deletions
  1. 1 0
      arch/arm/Kconfig
  2. 6 2
      arch/arm/include/asm/spinlock.h

+ 1 - 0
arch/arm/Kconfig

@@ -5,6 +5,7 @@ config ARM
 	select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
 	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
 	select ARCH_HAVE_CUSTOM_GPIO_H
+	select ARCH_USE_CMPXCHG_LOCKREF
 	select ARCH_WANT_IPC_PARSE_VERSION
 	select BUILDTIME_EXTABLE_SORT if MMU
 	select CLONE_BACKWARDS

+ 6 - 2
arch/arm/include/asm/spinlock.h

@@ -127,10 +127,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 	dsb_sev();
 }
 
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+	return lock.tickets.owner == lock.tickets.next;
+}
+
 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 {
-	struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
-	return tickets.owner != tickets.next;
+	return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
 }
 
 static inline int arch_spin_is_contended(arch_spinlock_t *lock)