|
@@ -67,8 +67,17 @@ static inline void sched_cacheflush(void)
|
|
|
{
|
|
|
}
|
|
|
|
|
|
-#define nop() __asm__ __volatile__ ("nop")
|
|
|
-
|
|
|
+#ifdef CONFIG_CPU_SH4A
|
|
|
+#define __icbi() \
|
|
|
+{ \
|
|
|
+ unsigned long __addr; \
|
|
|
+ __addr = 0xa8000000; \
|
|
|
+ __asm__ __volatile__( \
|
|
|
+ "icbi %0\n\t" \
|
|
|
+ : /* no output */ \
|
|
|
+ : "m" (__m(__addr))); \
|
|
|
+}
|
|
|
+#endif
|
|
|
|
|
|
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
|
|
|
|
|
@@ -84,15 +93,31 @@ static __inline__ unsigned long tas(volatile int *m)
|
|
|
|
|
|
extern void __xchg_called_with_bad_pointer(void);
|
|
|
|
|
|
+/*
|
|
|
+ * A brief note on ctrl_barrier(), the control register write barrier.
|
|
|
+ *
|
|
|
+ * Legacy SH cores typically require a sequence of 8 nops after
|
|
|
+ * modification of a control register in order for the changes to take
|
|
|
+ * effect. On newer cores (like the sh4a and sh5) this is accomplished
|
|
|
+ * with icbi.
|
|
|
+ *
|
|
|
+ * Also note that on sh4a in the icbi case we can forego a synco for the
|
|
|
+ * write barrier, as it's not necessary for control registers.
|
|
|
+ *
|
|
|
+ * Historically we have only done this type of barrier for the MMUCR, but
|
|
|
+ * it's also necessary for the CCR, so we make it generic here instead.
|
|
|
+ */
|
|
|
#ifdef CONFIG_CPU_SH4A
|
|
|
-#define mb() __asm__ __volatile__ ("synco": : :"memory")
|
|
|
-#define rmb() mb()
|
|
|
-#define wmb() __asm__ __volatile__ ("synco": : :"memory")
|
|
|
+#define mb() __asm__ __volatile__ ("synco": : :"memory")
|
|
|
+#define rmb() mb()
|
|
|
+#define wmb() __asm__ __volatile__ ("synco": : :"memory")
|
|
|
+#define ctrl_barrier() __icbi()
|
|
|
#define read_barrier_depends() do { } while(0)
|
|
|
#else
|
|
|
-#define mb() __asm__ __volatile__ ("": : :"memory")
|
|
|
-#define rmb() mb()
|
|
|
-#define wmb() __asm__ __volatile__ ("": : :"memory")
|
|
|
+#define mb() __asm__ __volatile__ ("": : :"memory")
|
|
|
+#define rmb() mb()
|
|
|
+#define wmb() __asm__ __volatile__ ("": : :"memory")
|
|
|
+#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
|
|
|
#define read_barrier_depends() do { } while(0)
|
|
|
#endif
|
|
|
|
|
@@ -218,8 +243,8 @@ do { \
|
|
|
#define back_to_P1() \
|
|
|
do { \
|
|
|
unsigned long __dummy; \
|
|
|
+ ctrl_barrier(); \
|
|
|
__asm__ __volatile__( \
|
|
|
- "nop;nop;nop;nop;nop;nop;nop\n\t" \
|
|
|
"mov.l 1f, %0\n\t" \
|
|
|
"jmp @%0\n\t" \
|
|
|
" nop\n\t" \
|