|
@@ -59,18 +59,13 @@ static inline void dsb_sev(void)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * ARMv6 Spin-locking.
|
|
|
+ * ARMv6 ticket-based spin-locking.
|
|
|
*
|
|
|
- * We exclusively read the old value. If it is zero, we may have
|
|
|
- * won the lock, so we try exclusively storing it. A memory barrier
|
|
|
- * is required after we get a lock, and before we release it, because
|
|
|
- * V6 CPUs are assumed to have weakly ordered memory.
|
|
|
- *
|
|
|
- * Unlocked value: 0
|
|
|
- * Locked value: 1
|
|
|
+ * A memory barrier is required after we get a lock, and before we
|
|
|
+ * release it, because V6 CPUs are assumed to have weakly ordered
|
|
|
+ * memory.
|
|
|
*/
|
|
|
|
|
|
-#define arch_spin_is_locked(x) ((x)->lock != 0)
|
|
|
#define arch_spin_unlock_wait(lock) \
|
|
|
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
|
|
|
|
|
@@ -79,31 +74,39 @@ static inline void dsb_sev(void)
|
|
|
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
|
|
{
|
|
|
unsigned long tmp;
|
|
|
+ u32 newval;
|
|
|
+ arch_spinlock_t lockval;
|
|
|
|
|
|
__asm__ __volatile__(
|
|
|
-"1: ldrex %0, [%1]\n"
|
|
|
-" teq %0, #0\n"
|
|
|
- WFE("ne")
|
|
|
-" strexeq %0, %2, [%1]\n"
|
|
|
-" teqeq %0, #0\n"
|
|
|
+"1: ldrex %0, [%3]\n"
|
|
|
+" add %1, %0, %4\n"
|
|
|
+" strex %2, %1, [%3]\n"
|
|
|
+" teq %2, #0\n"
|
|
|
" bne 1b"
|
|
|
- : "=&r" (tmp)
|
|
|
- : "r" (&lock->lock), "r" (1)
|
|
|
+ : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
|
|
|
+ : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
|
|
|
: "cc");
|
|
|
|
|
|
+ while (lockval.tickets.next != lockval.tickets.owner) {
|
|
|
+ wfe();
|
|
|
+ lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
|
|
|
+ }
|
|
|
+
|
|
|
smp_mb();
|
|
|
}
|
|
|
|
|
|
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|
|
{
|
|
|
unsigned long tmp;
|
|
|
+ u32 slock;
|
|
|
|
|
|
__asm__ __volatile__(
|
|
|
-" ldrex %0, [%1]\n"
|
|
|
-" teq %0, #0\n"
|
|
|
-" strexeq %0, %2, [%1]"
|
|
|
- : "=&r" (tmp)
|
|
|
- : "r" (&lock->lock), "r" (1)
|
|
|
+" ldrex %0, [%2]\n"
|
|
|
+" subs %1, %0, %0, ror #16\n"
|
|
|
+" addeq %0, %0, %3\n"
|
|
|
+" strexeq %1, %0, [%2]"
|
|
|
+ : "=&r" (slock), "=&r" (tmp)
|
|
|
+ : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
|
|
|
: "cc");
|
|
|
|
|
|
if (tmp == 0) {
|
|
@@ -116,17 +119,38 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|
|
|
|
|
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
|
|
{
|
|
|
+ unsigned long tmp;
|
|
|
+ u32 slock;
|
|
|
+
|
|
|
smp_mb();
|
|
|
|
|
|
__asm__ __volatile__(
|
|
|
-" str %1, [%0]\n"
|
|
|
- :
|
|
|
- : "r" (&lock->lock), "r" (0)
|
|
|
+" mov %1, #1\n"
|
|
|
+"1: ldrex %0, [%2]\n"
|
|
|
+" uadd16 %0, %0, %1\n"
|
|
|
+" strex %1, %0, [%2]\n"
|
|
|
+" teq %1, #0\n"
|
|
|
+" bne 1b"
|
|
|
+ : "=&r" (slock), "=&r" (tmp)
|
|
|
+ : "r" (&lock->slock)
|
|
|
: "cc");
|
|
|
|
|
|
dsb_sev();
|
|
|
}
|
|
|
|
|
|
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
|
|
|
+{
|
|
|
+ struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
|
|
|
+ return tickets.owner != tickets.next;
|
|
|
+}
|
|
|
+
|
|
|
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
|
|
|
+{
|
|
|
+ struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
|
|
|
+ return (tickets.next - tickets.owner) > 1;
|
|
|
+}
|
|
|
+#define arch_spin_is_contended arch_spin_is_contended
|
|
|
+
|
|
|
/*
|
|
|
* RWLOCKS
|
|
|
*
|