|
@@ -290,7 +290,6 @@ do { \
|
|
|
})
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
-#error SMP not supported
|
|
|
|
|
|
#define smp_mb() mb()
|
|
|
#define smp_rmb() rmb()
|
|
@@ -304,6 +303,8 @@ do { \
|
|
|
#define smp_wmb() barrier()
|
|
|
#define smp_read_barrier_depends() do { } while(0)
|
|
|
|
|
|
+#endif /* CONFIG_SMP */
|
|
|
+
|
|
|
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
|
|
|
/*
|
|
|
* On the StrongARM, "swp" is terminally broken since it bypasses the
|
|
@@ -316,9 +317,16 @@ do { \
|
|
|
*
|
|
|
* We choose (1) since its the "easiest" to achieve here and is not
|
|
|
* dependent on the processor type.
|
|
|
+ *
|
|
|
+ * NOTE that this solution won't work on an SMP system, so explcitly
|
|
|
+ * forbid it here.
|
|
|
*/
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+#error SMP is not supported on SA1100/SA110
|
|
|
+#else
|
|
|
#define swp_is_buggy
|
|
|
#endif
|
|
|
+#endif
|
|
|
|
|
|
static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
|
|
|
{
|
|
@@ -361,8 +369,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-#endif /* CONFIG_SMP */
|
|
|
-
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
|
|
#define arch_align_stack(x) (x)
|