|
@@ -5,21 +5,13 @@
|
|
|
#error SMP not supported on pre-ARMv6 CPUs
|
|
|
#endif
|
|
|
|
|
|
-#include <asm/processor.h>
|
|
|
+#include <linux/prefetch.h>
|
|
|
|
|
|
/*
|
|
|
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
|
|
|
* extensions, so when running on UP, we have to patch these instructions away.
|
|
|
*/
|
|
|
-#define ALT_SMP(smp, up) \
|
|
|
- "9998: " smp "\n" \
|
|
|
- " .pushsection \".alt.smp.init\", \"a\"\n" \
|
|
|
- " .long 9998b\n" \
|
|
|
- " " up "\n" \
|
|
|
- " .popsection\n"
|
|
|
-
|
|
|
#ifdef CONFIG_THUMB2_KERNEL
|
|
|
-#define SEV ALT_SMP("sev.w", "nop.w")
|
|
|
/*
|
|
|
* For Thumb-2, special care is needed to ensure that the conditional WFE
|
|
|
* instruction really does assemble to exactly 4 bytes (as required by
|
|
@@ -31,17 +23,18 @@
|
|
|
* the assembler won't change IT instructions which are explicitly present
|
|
|
* in the input.
|
|
|
*/
|
|
|
-#define WFE(cond) ALT_SMP( \
|
|
|
+#define WFE(cond) __ALT_SMP_ASM( \
|
|
|
"it " cond "\n\t" \
|
|
|
"wfe" cond ".n", \
|
|
|
\
|
|
|
"nop.w" \
|
|
|
)
|
|
|
#else
|
|
|
-#define SEV ALT_SMP("sev", "nop")
|
|
|
-#define WFE(cond) ALT_SMP("wfe" cond, "nop")
|
|
|
+#define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop")
|
|
|
#endif
|
|
|
|
|
|
+#define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop))
|
|
|
+
|
|
|
static inline void dsb_sev(void)
|
|
|
{
|
|
|
#if __LINUX_ARM_ARCH__ >= 7
|
|
@@ -77,6 +70,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
|
|
|
u32 newval;
|
|
|
arch_spinlock_t lockval;
|
|
|
|
|
|
+ prefetchw(&lock->slock);
|
|
|
__asm__ __volatile__(
|
|
|
"1: ldrex %0, [%3]\n"
|
|
|
" add %1, %0, %4\n"
|
|
@@ -100,6 +94,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|
|
unsigned long contended, res;
|
|
|
u32 slock;
|
|
|
|
|
|
+ prefetchw(&lock->slock);
|
|
|
do {
|
|
|
__asm__ __volatile__(
|
|
|
" ldrex %0, [%3]\n"
|
|
@@ -152,6 +147,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
|
|
|
{
|
|
|
unsigned long tmp;
|
|
|
|
|
|
+ prefetchw(&rw->lock);
|
|
|
__asm__ __volatile__(
|
|
|
"1: ldrex %0, [%1]\n"
|
|
|
" teq %0, #0\n"
|
|
@@ -170,6 +166,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
|
|
|
{
|
|
|
unsigned long contended, res;
|
|
|
|
|
|
+ prefetchw(&rw->lock);
|
|
|
do {
|
|
|
__asm__ __volatile__(
|
|
|
" ldrex %0, [%2]\n"
|
|
@@ -203,7 +200,7 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
|
|
|
}
|
|
|
|
|
|
/* write_can_lock - would write_trylock() succeed? */
|
|
|
-#define arch_write_can_lock(x) ((x)->lock == 0)
|
|
|
+#define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0)
|
|
|
|
|
|
/*
|
|
|
* Read locks are a bit more hairy:
|
|
@@ -221,6 +218,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
|
|
|
{
|
|
|
unsigned long tmp, tmp2;
|
|
|
|
|
|
+ prefetchw(&rw->lock);
|
|
|
__asm__ __volatile__(
|
|
|
"1: ldrex %0, [%2]\n"
|
|
|
" adds %0, %0, #1\n"
|
|
@@ -241,6 +239,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
|
|
|
|
|
|
smp_mb();
|
|
|
|
|
|
+ prefetchw(&rw->lock);
|
|
|
__asm__ __volatile__(
|
|
|
"1: ldrex %0, [%2]\n"
|
|
|
" sub %0, %0, #1\n"
|
|
@@ -259,6 +258,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
|
|
|
{
|
|
|
unsigned long contended, res;
|
|
|
|
|
|
+ prefetchw(&rw->lock);
|
|
|
do {
|
|
|
__asm__ __volatile__(
|
|
|
" ldrex %0, [%2]\n"
|
|
@@ -280,7 +280,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
|
|
|
}
|
|
|
|
|
|
/* read_can_lock - would read_trylock() succeed? */
|
|
|
-#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
|
|
|
+#define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000)
|
|
|
|
|
|
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
|
|
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|