|
@@ -1,6 +1,22 @@
|
|
|
#ifndef _X86_SPINLOCK_H_
|
|
|
#define _X86_SPINLOCK_H_
|
|
|
|
|
|
+#include <asm/atomic.h>
|
|
|
+#include <asm/rwlock.h>
|
|
|
+#include <asm/page.h>
|
|
|
+#include <asm/processor.h>
|
|
|
+
|
|
|
+/*
|
|
|
+ * Your basic SMP spinlocks, allowing only a single CPU anywhere
|
|
|
+ *
|
|
|
+ * Simple spin lock operations. There are two variants, one clears IRQ's
|
|
|
+ * on the local processor, one does not.
|
|
|
+ *
|
|
|
+ * We make no fairness assumptions. They have a cost.
|
|
|
+ *
|
|
|
+ * (the type definitions are in asm/spinlock_types.h)
|
|
|
+ */
|
|
|
+
|
|
|
#ifdef CONFIG_PARAVIRT
|
|
|
#include <asm/paravirt.h>
|
|
|
#else
|
|
@@ -11,9 +27,200 @@
|
|
|
#endif /* CONFIG_PARAVIRT */
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
-# include "spinlock_32.h"
|
|
|
+typedef char _slock_t;
|
|
|
+# define LOCK_INS_DEC "decb"
|
|
|
+# define LOCK_INS_XCH "xchgb"
|
|
|
+# define LOCK_INS_MOV "movb"
|
|
|
+# define LOCK_INS_CMP "cmpb"
|
|
|
+# define LOCK_PTR_REG "a"
|
|
|
#else
|
|
|
-# include "spinlock_64.h"
|
|
|
+typedef int _slock_t;
|
|
|
+# define LOCK_INS_DEC "decl"
|
|
|
+# define LOCK_INS_XCH "xchgl"
|
|
|
+# define LOCK_INS_MOV "movl"
|
|
|
+# define LOCK_INS_CMP "cmpl"
|
|
|
+# define LOCK_PTR_REG "D"
|
|
|
+#endif
|
|
|
+
|
|
|
+static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
|
|
|
+{
|
|
|
+ return *(volatile _slock_t *)(&(lock)->slock) <= 0;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
|
|
+{
|
|
|
+ asm volatile(
|
|
|
+ "\n1:\t"
|
|
|
+ LOCK_PREFIX " ; " LOCK_INS_DEC " %0\n\t"
|
|
|
+ "jns 3f\n"
|
|
|
+ "2:\t"
|
|
|
+ "rep;nop\n\t"
|
|
|
+ LOCK_INS_CMP " $0,%0\n\t"
|
|
|
+ "jle 2b\n\t"
|
|
|
+ "jmp 1b\n"
|
|
|
+ "3:\n\t"
|
|
|
+ : "+m" (lock->slock) : : "memory");
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * It is easier for the lock validator if interrupts are not re-enabled
|
|
|
+ * in the middle of a lock-acquire. This is a performance feature anyway
|
|
|
+ * so we turn it off:
|
|
|
+ *
|
|
|
+ * NOTE: there's an irqs-on section here, which normally would have to be
|
|
|
+ * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant.
|
|
|
+ */
|
|
|
+#ifndef CONFIG_PROVE_LOCKING
|
|
|
+static inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
|
|
|
+ unsigned long flags)
|
|
|
+{
|
|
|
+ asm volatile(
|
|
|
+ "\n1:\t"
|
|
|
+ LOCK_PREFIX " ; " LOCK_INS_DEC " %[slock]\n\t"
|
|
|
+ "jns 5f\n"
|
|
|
+ "testl $0x200, %[flags]\n\t"
|
|
|
+ "jz 4f\n\t"
|
|
|
+ STI_STRING "\n"
|
|
|
+ "3:\t"
|
|
|
+ "rep;nop\n\t"
|
|
|
+ LOCK_INS_CMP " $0, %[slock]\n\t"
|
|
|
+ "jle 3b\n\t"
|
|
|
+ CLI_STRING "\n\t"
|
|
|
+ "jmp 1b\n"
|
|
|
+ "4:\t"
|
|
|
+ "rep;nop\n\t"
|
|
|
+ LOCK_INS_CMP " $0, %[slock]\n\t"
|
|
|
+ "jg 1b\n\t"
|
|
|
+ "jmp 4b\n"
|
|
|
+ "5:\n\t"
|
|
|
+ : [slock] "+m" (lock->slock)
|
|
|
+ : [flags] "r" ((u32)flags)
|
|
|
+ CLI_STI_INPUT_ARGS
|
|
|
+ : "memory" CLI_STI_CLOBBERS);
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
|
|
|
+{
|
|
|
+ _slock_t oldval;
|
|
|
+
|
|
|
+ asm volatile(
|
|
|
+ LOCK_INS_XCH " %0,%1"
|
|
|
+ :"=q" (oldval), "+m" (lock->slock)
|
|
|
+ :"0" (0) : "memory");
|
|
|
+
|
|
|
+ return oldval > 0;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * __raw_spin_unlock based on writing $1 to the low byte.
|
|
|
+ * This method works. Despite all the confusion.
|
|
|
+ * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
|
|
|
+ * (PPro errata 66, 92)
|
|
|
+ */
|
|
|
+#if defined(X86_64) || \
|
|
|
+ (!defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE))
|
|
|
+
|
|
|
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
|
|
+{
|
|
|
+ asm volatile(LOCK_INS_MOV " $1,%0" : "=m" (lock->slock) :: "memory");
|
|
|
+}
|
|
|
+
|
|
|
+#else
|
|
|
+
|
|
|
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
|
|
+{
|
|
|
+ unsigned char oldval = 1;
|
|
|
+
|
|
|
+ asm volatile("xchgb %b0, %1"
|
|
|
+ : "=q" (oldval), "+m" (lock->slock)
|
|
|
+ : "0" (oldval) : "memory");
|
|
|
+}
|
|
|
+
|
|
|
#endif
|
|
|
|
|
|
+static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
|
|
|
+{
|
|
|
+ while (__raw_spin_is_locked(lock))
|
|
|
+ cpu_relax();
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Read-write spinlocks, allowing multiple readers
|
|
|
+ * but only one writer.
|
|
|
+ *
|
|
|
+ * NOTE! it is quite common to have readers in interrupts
|
|
|
+ * but no interrupt writers. For those circumstances we
|
|
|
+ * can "mix" irq-safe locks - any writer needs to get a
|
|
|
+ * irq-safe write-lock, but readers can get non-irqsafe
|
|
|
+ * read-locks.
|
|
|
+ *
|
|
|
+ * On x86, we implement read-write locks as a 32-bit counter
|
|
|
+ * with the high bit (sign) being the "contended" bit.
|
|
|
+ */
|
|
|
+
|
|
|
+static inline int __raw_read_can_lock(raw_rwlock_t *lock)
|
|
|
+{
|
|
|
+ return (int)(lock)->lock > 0;
|
|
|
+}
|
|
|
+
|
|
|
+static inline int __raw_write_can_lock(raw_rwlock_t *lock)
|
|
|
+{
|
|
|
+ return (lock)->lock == RW_LOCK_BIAS;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void __raw_read_lock(raw_rwlock_t *rw)
|
|
|
+{
|
|
|
+ asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
|
|
|
+ "jns 1f\n"
|
|
|
+ "call __read_lock_failed\n\t"
|
|
|
+ "1:\n"
|
|
|
+ ::LOCK_PTR_REG (rw) : "memory");
|
|
|
+}
|
|
|
+
|
|
|
+static inline void __raw_write_lock(raw_rwlock_t *rw)
|
|
|
+{
|
|
|
+ asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
|
|
|
+ "jz 1f\n"
|
|
|
+ "call __write_lock_failed\n\t"
|
|
|
+ "1:\n"
|
|
|
+ ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
|
|
|
+}
|
|
|
+
|
|
|
+static inline int __raw_read_trylock(raw_rwlock_t *lock)
|
|
|
+{
|
|
|
+ atomic_t *count = (atomic_t *)lock;
|
|
|
+
|
|
|
+ atomic_dec(count);
|
|
|
+ if (atomic_read(count) >= 0)
|
|
|
+ return 1;
|
|
|
+ atomic_inc(count);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static inline int __raw_write_trylock(raw_rwlock_t *lock)
|
|
|
+{
|
|
|
+ atomic_t *count = (atomic_t *)lock;
|
|
|
+
|
|
|
+ if (atomic_sub_and_test(RW_LOCK_BIAS, count))
|
|
|
+ return 1;
|
|
|
+ atomic_add(RW_LOCK_BIAS, count);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
|
|
+{
|
|
|
+ asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
|
|
|
+}
|
|
|
+
|
|
|
+static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
|
|
+{
|
|
|
+ asm volatile(LOCK_PREFIX "addl %1, %0"
|
|
|
+ : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
|
|
|
+}
|
|
|
+
|
|
|
+#define _raw_spin_relax(lock) cpu_relax()
|
|
|
+#define _raw_read_relax(lock) cpu_relax()
|
|
|
+#define _raw_write_relax(lock) cpu_relax()
|
|
|
+
|
|
|
#endif
|