فهرست منبع

MIPS: Get rid of the use of .macro in C code.

It fails with LTO and probably has always been a fragile.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Ralf Baechle 12 سال پیش
والد
کامیت
02b849f761
3فایلهای تغییر یافته به همراه403 افزوده شده و 270 حذف شده
  1. 243 128
      arch/mips/include/asm/hazards.h
  2. 76 77
      arch/mips/include/asm/irqflags.h
  3. 84 65
      arch/mips/lib/mips-atomic.c

+ 243 - 128
arch/mips/include/asm/hazards.h

@@ -10,34 +10,13 @@
 #ifndef _ASM_HAZARDS_H
 #define _ASM_HAZARDS_H
 
-#ifdef __ASSEMBLY__
-#define ASMMACRO(name, code...) .macro name; code; .endm
-#else
-
-#include <asm/cpu-features.h>
-
-#define ASMMACRO(name, code...)						\
-__asm__(".macro " #name "; " #code "; .endm");				\
-									\
-static inline void name(void)						\
-{									\
-	__asm__ __volatile__ (#name);					\
-}
-
-/*
- * MIPS R2 instruction hazard barrier.	 Needs to be called as a subroutine.
- */
-extern void mips_ihb(void);
-
-#endif
+#include <linux/stringify.h>
 
-ASMMACRO(_ssnop,
-	 sll	$0, $0, 1
-	)
+#define ___ssnop							\
+	sll	$0, $0, 1
 
-ASMMACRO(_ehb,
-	 sll	$0, $0, 3
-	)
+#define ___ehb								\
+	sll	$0, $0, 3
 
 /*
  * TLB hazards
@@ -48,24 +27,24 @@ ASMMACRO(_ehb,
  * MIPSR2 defines ehb for hazard avoidance
  */
 
-ASMMACRO(mtc0_tlbw_hazard,
-	 _ehb
-	)
-ASMMACRO(tlbw_use_hazard,
-	 _ehb
-	)
-ASMMACRO(tlb_probe_hazard,
-	 _ehb
-	)
-ASMMACRO(irq_enable_hazard,
-	 _ehb
-	)
-ASMMACRO(irq_disable_hazard,
-	_ehb
-	)
-ASMMACRO(back_to_back_c0_hazard,
-	 _ehb
-	)
+#define __mtc0_tlbw_hazard						\
+	___ehb
+
+#define __tlbw_use_hazard						\
+	___ehb
+
+#define __tlb_probe_hazard						\
+	___ehb
+
+#define __irq_enable_hazard						\
+	___ehb
+
+#define __irq_disable_hazard						\
+	___ehb
+
+#define __back_to_back_c0_hazard					\
+	___ehb
+
 /*
  * gcc has a tradition of misscompiling the previous construct using the
  * address of a label as argument to inline assembler.	Gas otoh has the
@@ -94,24 +73,42 @@ do {									\
  * These are slightly complicated by the fact that we guarantee R1 kernels to
  * run fine on R2 processors.
  */
-ASMMACRO(mtc0_tlbw_hazard,
-	_ssnop; _ssnop; _ehb
-	)
-ASMMACRO(tlbw_use_hazard,
-	_ssnop; _ssnop; _ssnop; _ehb
-	)
-ASMMACRO(tlb_probe_hazard,
-	 _ssnop; _ssnop; _ssnop; _ehb
-	)
-ASMMACRO(irq_enable_hazard,
-	 _ssnop; _ssnop; _ssnop; _ehb
-	)
-ASMMACRO(irq_disable_hazard,
-	_ssnop; _ssnop; _ssnop; _ehb
-	)
-ASMMACRO(back_to_back_c0_hazard,
-	 _ssnop; _ssnop; _ssnop; _ehb
-	)
+
+#define __mtc0_tlbw_hazard						\
+	___ssnop;							\
+	___ssnop;							\
+	___ehb
+
+#define __tlbw_use_hazard						\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop;							\
+	___ehb
+
+#define __tlb_probe_hazard						\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop;							\
+	___ehb
+
+#define __irq_enable_hazard						\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop;							\
+	___ehb
+
+#define __irq_disable_hazard						\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop;							\
+	___ehb
+
+#define __back_to_back_c0_hazard					\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop;							\
+	___ehb
+
 /*
  * gcc has a tradition of misscompiling the previous construct using the
  * address of a label as argument to inline assembler.	Gas otoh has the
@@ -147,18 +144,18 @@ do {									\
  * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
  */
 
-ASMMACRO(mtc0_tlbw_hazard,
-	)
-ASMMACRO(tlbw_use_hazard,
-	)
-ASMMACRO(tlb_probe_hazard,
-	)
-ASMMACRO(irq_enable_hazard,
-	)
-ASMMACRO(irq_disable_hazard,
-	)
-ASMMACRO(back_to_back_c0_hazard,
-	)
+#define __mtc0_tlbw_hazard
+
+#define __tlbw_use_hazard
+
+#define __tlb_probe_hazard
+
+#define __irq_enable_hazard
+
+#define __irq_disable_hazard
+
+#define __back_to_back_c0_hazard
+
 #define instruction_hazard() do { } while (0)
 
 #elif defined(CONFIG_CPU_SB1)
@@ -166,19 +163,21 @@ ASMMACRO(back_to_back_c0_hazard,
 /*
  * Mostly like R4000 for historic reasons
  */
-ASMMACRO(mtc0_tlbw_hazard,
-	)
-ASMMACRO(tlbw_use_hazard,
-	)
-ASMMACRO(tlb_probe_hazard,
-	)
-ASMMACRO(irq_enable_hazard,
-	)
-ASMMACRO(irq_disable_hazard,
-	 _ssnop; _ssnop; _ssnop
-	)
-ASMMACRO(back_to_back_c0_hazard,
-	)
+#define __mtc0_tlbw_hazard
+
+#define __tlbw_use_hazard
+
+#define __tlb_probe_hazard
+
+#define __irq_enable_hazard
+
+#define __irq_disable_hazard						\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop
+
+#define __back_to_back_c0_hazard
+
 #define instruction_hazard() do { } while (0)
 
 #else
@@ -192,24 +191,35 @@ ASMMACRO(back_to_back_c0_hazard,
  * hazard so this is nice trick to have an optimal code for a range of
  * processors.
  */
-ASMMACRO(mtc0_tlbw_hazard,
-	nop; nop
-	)
-ASMMACRO(tlbw_use_hazard,
-	nop; nop; nop
-	)
-ASMMACRO(tlb_probe_hazard,
-	 nop; nop; nop
-	)
-ASMMACRO(irq_enable_hazard,
-	 _ssnop; _ssnop; _ssnop;
-	)
-ASMMACRO(irq_disable_hazard,
-	nop; nop; nop
-	)
-ASMMACRO(back_to_back_c0_hazard,
-	 _ssnop; _ssnop; _ssnop;
-	)
+#define __mtc0_tlbw_hazard						\
+	nop;								\
+	nop
+
+#define __tlbw_use_hazard						\
+	nop;								\
+	nop;								\
+	nop
+
+#define __tlb_probe_hazard						\
+	nop;								\
+	nop;								\
+	nop
+
+#define __irq_enable_hazard						\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop
+
+#define __irq_disable_hazard						\
+	nop;								\
+	nop;								\
+	nop
+
+#define __back_to_back_c0_hazard					\
+	___ssnop;							\
+	___ssnop;							\
+	___ssnop
+
 #define instruction_hazard() do { } while (0)
 
 #endif
@@ -218,32 +228,137 @@ ASMMACRO(back_to_back_c0_hazard,
 /* FPU hazards */
 
 #if defined(CONFIG_CPU_SB1)
-ASMMACRO(enable_fpu_hazard,
-	 .set	push;
-	 .set	mips64;
-	 .set	noreorder;
-	 _ssnop;
-	 bnezl	$0, .+4;
-	 _ssnop;
-	 .set	pop
-)
-ASMMACRO(disable_fpu_hazard,
-)
+
+#define __enable_fpu_hazard						\
+	.set	push;							\
+	.set	mips64;							\
+	.set	noreorder;						\
+	___ssnop;							\
+	bnezl	$0, .+4;						\
+	___ssnop;							\
+	.set	pop
+
+#define __disable_fpu_hazard
 
 #elif defined(CONFIG_CPU_MIPSR2)
-ASMMACRO(enable_fpu_hazard,
-	 _ehb
-)
-ASMMACRO(disable_fpu_hazard,
-	 _ehb
-)
+
+#define __enable_fpu_hazard						\
+	___ehb
+
+#define __disable_fpu_hazard						\
+	___ehb
+
 #else
-ASMMACRO(enable_fpu_hazard,
-	 nop; nop; nop; nop
-)
-ASMMACRO(disable_fpu_hazard,
-	 _ehb
-)
+
+#define __enable_fpu_hazard						\
+	nop;								\
+	nop;								\
+	nop;								\
+	nop
+
+#define __disable_fpu_hazard						\
+	___ehb
+
 #endif
 
+#ifdef __ASSEMBLY__
+
+#define _ssnop ___ssnop
+#define	_ehb ___ehb
+#define mtc0_tlbw_hazard __mtc0_tlbw_hazard
+#define tlbw_use_hazard __tlbw_use_hazard
+#define tlb_probe_hazard __tlb_probe_hazard
+#define irq_enable_hazard __irq_enable_hazard
+#define irq_disable_hazard __irq_disable_hazard
+#define back_to_back_c0_hazard __back_to_back_c0_hazard
+#define enable_fpu_hazard __enable_fpu_hazard
+#define disable_fpu_hazard __disable_fpu_hazard
+
+#else
+
+#define _ssnop()							\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(___ssnop)						\
+	);								\
+} while (0)
+
+#define	_ehb()								\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(___ehb)						\
+	);								\
+} while (0)
+
+
+#define mtc0_tlbw_hazard()						\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__mtc0_tlbw_hazard)					\
+	);								\
+} while (0)
+
+
+#define tlbw_use_hazard()						\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__tlbw_use_hazard)					\
+	);								\
+} while (0)
+
+
+#define tlb_probe_hazard()						\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__tlb_probe_hazard)					\
+	);								\
+} while (0)
+
+
+#define irq_enable_hazard()						\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__irq_enable_hazard)				\
+	);								\
+} while (0)
+
+
+#define irq_disable_hazard()						\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__irq_disable_hazard)				\
+	);								\
+} while (0)
+
+
+#define back_to_back_c0_hazard() 					\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__back_to_back_c0_hazard)				\
+	);								\
+} while (0)
+
+
+#define enable_fpu_hazard()						\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__enable_fpu_hazard)				\
+	);								\
+} while (0)
+
+
+#define disable_fpu_hazard()						\
+do {									\
+	__asm__ __volatile__(						\
+	__stringify(__disable_fpu_hazard)				\
+	);								\
+} while (0)
+
+/*
+ * MIPS R2 instruction hazard barrier.   Needs to be called as a subroutine.
+ */
+extern void mips_ihb(void);
+
+#endif /* __ASSEMBLY__  */
+
 #endif /* _ASM_HAZARDS_H */

+ 76 - 77
arch/mips/include/asm/irqflags.h

@@ -14,53 +14,48 @@
 #ifndef __ASSEMBLY__
 
 #include <linux/compiler.h>
+#include <linux/stringify.h>
 #include <asm/hazards.h>
 
 #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
 
-__asm__(
-	"	.macro	arch_local_irq_disable\n"
+static inline void arch_local_irq_disable(void)
+{
+	__asm__ __volatile__(
 	"	.set	push						\n"
 	"	.set	noat						\n"
 	"	di							\n"
-	"	irq_disable_hazard					\n"
+	"	" __stringify(__irq_disable_hazard) "			\n"
 	"	.set	pop						\n"
-	"	.endm							\n");
-
-static inline void arch_local_irq_disable(void)
-{
-	__asm__ __volatile__(
-		"arch_local_irq_disable"
-		: /* no outputs */
-		: /* no inputs */
-		: "memory");
+	: /* no outputs */
+	: /* no inputs */
+	: "memory");
 }
 
+static inline unsigned long arch_local_irq_save(void)
+{
+	unsigned long flags;
 
-__asm__(
-	"	.macro	arch_local_irq_save result			\n"
+	asm __volatile__(
 	"	.set	push						\n"
 	"	.set	reorder						\n"
 	"	.set	noat						\n"
-	"	di	\\result					\n"
-	"	andi	\\result, 1					\n"
-	"	irq_disable_hazard					\n"
+	"	di	%[flags]					\n"
+	"	andi	%[flags], 1					\n"
+	"	" __stringify(__irq_disable_hazard) "			\n"
 	"	.set	pop						\n"
-	"	.endm							\n");
+	: [flags] "=r" (flags)
+	: /* no inputs */
+	: "memory");
 
-static inline unsigned long arch_local_irq_save(void)
-{
-	unsigned long flags;
-	asm volatile("arch_local_irq_save\t%0"
-		     : "=r" (flags)
-		     : /* no inputs */
-		     : "memory");
 	return flags;
 }
 
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+	unsigned long __tmp1;
 
-__asm__(
-	"	.macro	arch_local_irq_restore flags			\n"
+	__asm__ __volatile__(
 	"	.set	push						\n"
 	"	.set	noreorder					\n"
 	"	.set	noat						\n"
@@ -69,7 +64,7 @@ __asm__(
 	 * Slow, but doesn't suffer from a relatively unlikely race
 	 * condition we're having since days 1.
 	 */
-	"	beqz	\\flags, 1f					\n"
+	"	beqz	%[flags], 1f					\n"
 	"	di							\n"
 	"	ei							\n"
 	"1:								\n"
@@ -78,33 +73,44 @@ __asm__(
 	 * Fast, dangerous.  Life is fun, life is good.
 	 */
 	"	mfc0	$1, $12						\n"
-	"	ins	$1, \\flags, 0, 1				\n"
+	"	ins	$1, %[flags], 0, 1				\n"
 	"	mtc0	$1, $12						\n"
 #endif
-	"	irq_disable_hazard					\n"
+	"	" __stringify(__irq_disable_hazard) "			\n"
 	"	.set	pop						\n"
-	"	.endm							\n");
-
-static inline void arch_local_irq_restore(unsigned long flags)
-{
-	unsigned long __tmp1;
-
-	__asm__ __volatile__(
-		"arch_local_irq_restore\t%0"
-		: "=r" (__tmp1)
-		: "0" (flags)
-		: "memory");
+	: [flags] "=r" (__tmp1)
+	: "0" (flags)
+	: "memory");
 }
 
 static inline void __arch_local_irq_restore(unsigned long flags)
 {
-	unsigned long __tmp1;
-
 	__asm__ __volatile__(
-		"arch_local_irq_restore\t%0"
-		: "=r" (__tmp1)
-		: "0" (flags)
-		: "memory");
+	"	.set	push						\n"
+	"	.set	noreorder					\n"
+	"	.set	noat						\n"
+#if defined(CONFIG_IRQ_CPU)
+	/*
+	 * Slow, but doesn't suffer from a relatively unlikely race
+	 * condition we're having since days 1.
+	 */
+	"	beqz	%[flags], 1f					\n"
+	"	di							\n"
+	"	ei							\n"
+	"1:								\n"
+#else
+	/*
+	 * Fast, dangerous.  Life is fun, life is good.
+	 */
+	"	mfc0	$1, $12						\n"
+	"	ins	$1, %[flags], 0, 1				\n"
+	"	mtc0	$1, $12						\n"
+#endif
+	"	" __stringify(__irq_disable_hazard) "			\n"
+	"	.set	pop						\n"
+	: [flags] "=r" (flags)
+	: "0" (flags)
+	: "memory");
 }
 #else
 /* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
@@ -115,8 +121,18 @@ void __arch_local_irq_restore(unsigned long flags);
 #endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
 
 
-__asm__(
-	"	.macro	arch_local_irq_enable				\n"
+extern void smtc_ipi_replay(void);
+
+static inline void arch_local_irq_enable(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+	/*
+	 * SMTC kernel needs to do a software replay of queued
+	 * IPIs, at the cost of call overhead on each local_irq_enable()
+	 */
+	smtc_ipi_replay();
+#endif
+	__asm__ __volatile__(
 	"	.set	push						\n"
 	"	.set	reorder						\n"
 	"	.set	noat						\n"
@@ -133,45 +149,28 @@ __asm__(
 	"	xori	$1,0x1e						\n"
 	"	mtc0	$1,$12						\n"
 #endif
-	"	irq_enable_hazard					\n"
+	"	" __stringify(__irq_enable_hazard) "			\n"
 	"	.set	pop						\n"
-	"	.endm");
-
-extern void smtc_ipi_replay(void);
-
-static inline void arch_local_irq_enable(void)
-{
-#ifdef CONFIG_MIPS_MT_SMTC
-	/*
-	 * SMTC kernel needs to do a software replay of queued
-	 * IPIs, at the cost of call overhead on each local_irq_enable()
-	 */
-	smtc_ipi_replay();
-#endif
-	__asm__ __volatile__(
-		"arch_local_irq_enable"
-		: /* no outputs */
-		: /* no inputs */
-		: "memory");
+	: /* no outputs */
+	: /* no inputs */
+	: "memory");
 }
 
+static inline unsigned long arch_local_save_flags(void)
+{
+	unsigned long flags;
 
-__asm__(
-	"	.macro	arch_local_save_flags flags			\n"
+	asm __volatile__(
 	"	.set	push						\n"
 	"	.set	reorder						\n"
 #ifdef CONFIG_MIPS_MT_SMTC
-	"	mfc0	\\flags, $2, 1					\n"
+	"	mfc0	%[flags], $2, 1					\n"
 #else
-	"	mfc0	\\flags, $12					\n"
+	"	mfc0	%[flags], $12					\n"
 #endif
 	"	.set	pop						\n"
-	"	.endm							\n");
+	: [flags] "=r" (flags));
 
-static inline unsigned long arch_local_save_flags(void)
-{
-	unsigned long flags;
-	asm volatile("arch_local_save_flags %0" : "=r" (flags));
 	return flags;
 }
 

+ 84 - 65
arch/mips/lib/mips-atomic.c

@@ -13,6 +13,7 @@
 #include <linux/compiler.h>
 #include <linux/preempt.h>
 #include <linux/export.h>
+#include <linux/stringify.h>
 
 #if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
 
@@ -34,8 +35,11 @@
  *
  * Workaround: mask EXL bit of the result or place a nop before mfc0.
  */
-__asm__(
-	"	.macro	arch_local_irq_disable\n"
+notrace void arch_local_irq_disable(void)
+{
+	preempt_disable();
+
+	__asm__ __volatile__(
 	"	.set	push						\n"
 	"	.set	noat						\n"
 #ifdef CONFIG_MIPS_MT_SMTC
@@ -52,108 +56,98 @@ __asm__(
 	"	.set	noreorder					\n"
 	"	mtc0	$1,$12						\n"
 #endif
-	"	irq_disable_hazard					\n"
+	"	" __stringify(__irq_disable_hazard) "			\n"
 	"	.set	pop						\n"
-	"	.endm							\n");
+	: /* no outputs */
+	: /* no inputs */
+	: "memory");
 
-notrace void arch_local_irq_disable(void)
-{
-	preempt_disable();
-	__asm__ __volatile__(
-		"arch_local_irq_disable"
-		: /* no outputs */
-		: /* no inputs */
-		: "memory");
 	preempt_enable();
 }
 EXPORT_SYMBOL(arch_local_irq_disable);
 
 
-__asm__(
-	"	.macro	arch_local_irq_save result			\n"
+notrace unsigned long arch_local_irq_save(void)
+{
+	unsigned long flags;
+
+	preempt_disable();
+
+	__asm__ __volatile__(
 	"	.set	push						\n"
 	"	.set	reorder						\n"
 	"	.set	noat						\n"
 #ifdef CONFIG_MIPS_MT_SMTC
-	"	mfc0	\\result, $2, 1					\n"
-	"	ori	$1, \\result, 0x400				\n"
+	"	mfc0	%[flags], $2, 1				\n"
+	"	ori	$1, %[flags], 0x400				\n"
 	"	.set	noreorder					\n"
 	"	mtc0	$1, $2, 1					\n"
-	"	andi	\\result, \\result, 0x400			\n"
+	"	andi	%[flags], %[flags], 0x400			\n"
 #elif defined(CONFIG_CPU_MIPSR2)
 	/* see irqflags.h for inline function */
 #else
-	"	mfc0	\\result, $12					\n"
-	"	ori	$1, \\result, 0x1f				\n"
+	"	mfc0	%[flags], $12					\n"
+	"	ori	$1, %[flags], 0x1f				\n"
 	"	xori	$1, 0x1f					\n"
 	"	.set	noreorder					\n"
 	"	mtc0	$1, $12						\n"
 #endif
-	"	irq_disable_hazard					\n"
+	"	" __stringify(__irq_disable_hazard) "			\n"
 	"	.set	pop						\n"
-	"	.endm							\n");
+	: [flags] "=r" (flags)
+	: /* no inputs */
+	: "memory");
 
-notrace unsigned long arch_local_irq_save(void)
-{
-	unsigned long flags;
-	preempt_disable();
-	asm volatile("arch_local_irq_save\t%0"
-		     : "=r" (flags)
-		     : /* no inputs */
-		     : "memory");
 	preempt_enable();
+
 	return flags;
 }
 EXPORT_SYMBOL(arch_local_irq_save);
 
+notrace void arch_local_irq_restore(unsigned long flags)
+{
+	unsigned long __tmp1;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+	/*
+	 * SMTC kernel needs to do a software replay of queued
+	 * IPIs, at the cost of branch and call overhead on each
+	 * local_irq_restore()
+	 */
+	if (unlikely(!(flags & 0x0400)))
+		smtc_ipi_replay();
+#endif
+	preempt_disable();
 
-__asm__(
-	"	.macro	arch_local_irq_restore flags			\n"
+	__asm__ __volatile__(
 	"	.set	push						\n"
 	"	.set	noreorder					\n"
 	"	.set	noat						\n"
 #ifdef CONFIG_MIPS_MT_SMTC
-	"mfc0	$1, $2, 1						\n"
-	"andi	\\flags, 0x400						\n"
-	"ori	$1, 0x400						\n"
-	"xori	$1, 0x400						\n"
-	"or	\\flags, $1						\n"
-	"mtc0	\\flags, $2, 1						\n"
+	"	mfc0	$1, $2, 1					\n"
+	"	andi	%[flags], 0x400					\n"
+	"	ori	$1, 0x400					\n"
+	"	xori	$1, 0x400					\n"
+	"	or	%[flags], $1					\n"
+	"	mtc0	%[flags], $2, 1					\n"
 #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
 	/* see irqflags.h for inline function */
 #elif defined(CONFIG_CPU_MIPSR2)
 	/* see irqflags.h for inline function */
 #else
 	"	mfc0	$1, $12						\n"
-	"	andi	\\flags, 1					\n"
+	"	andi	%[flags], 1					\n"
 	"	ori	$1, 0x1f					\n"
 	"	xori	$1, 0x1f					\n"
-	"	or	\\flags, $1					\n"
-	"	mtc0	\\flags, $12					\n"
+	"	or	%[flags], $1					\n"
+	"	mtc0	%[flags], $12					\n"
 #endif
-	"	irq_disable_hazard					\n"
+	"	" __stringify(__irq_disable_hazard) "			\n"
 	"	.set	pop						\n"
-	"	.endm							\n");
+	: [flags] "=r" (__tmp1)
+	: "0" (flags)
+	: "memory");
 
-notrace void arch_local_irq_restore(unsigned long flags)
-{
-	unsigned long __tmp1;
-
-#ifdef CONFIG_MIPS_MT_SMTC
-	/*
-	 * SMTC kernel needs to do a software replay of queued
-	 * IPIs, at the cost of branch and call overhead on each
-	 * local_irq_restore()
-	 */
-	if (unlikely(!(flags & 0x0400)))
-		smtc_ipi_replay();
-#endif
-	preempt_disable();
-	__asm__ __volatile__(
-		"arch_local_irq_restore\t%0"
-		: "=r" (__tmp1)
-		: "0" (flags)
-		: "memory");
 	preempt_enable();
 }
 EXPORT_SYMBOL(arch_local_irq_restore);
@@ -164,11 +158,36 @@ notrace void __arch_local_irq_restore(unsigned long flags)
 	unsigned long __tmp1;
 
 	preempt_disable();
+
 	__asm__ __volatile__(
-		"arch_local_irq_restore\t%0"
-		: "=r" (__tmp1)
-		: "0" (flags)
-		: "memory");
+	"	.set	push						\n"
+	"	.set	noreorder					\n"
+	"	.set	noat						\n"
+#ifdef CONFIG_MIPS_MT_SMTC
+	"	mfc0	$1, $2, 1					\n"
+	"	andi	%[flags], 0x400					\n"
+	"	ori	$1, 0x400					\n"
+	"	xori	$1, 0x400					\n"
+	"	or	%[flags], $1					\n"
+	"	mtc0	%[flags], $2, 1					\n"
+#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+	/* see irqflags.h for inline function */
+#elif defined(CONFIG_CPU_MIPSR2)
+	/* see irqflags.h for inline function */
+#else
+	"	mfc0	$1, $12						\n"
+	"	andi	%[flags], 1					\n"
+	"	ori	$1, 0x1f					\n"
+	"	xori	$1, 0x1f					\n"
+	"	or	%[flags], $1					\n"
+	"	mtc0	%[flags], $12					\n"
+#endif
+	"	" __stringify(__irq_disable_hazard) "			\n"
+	"	.set	pop						\n"
+	: [flags] "=r" (__tmp1)
+	: "0" (flags)
+	: "memory");
+
 	preempt_enable();
 }
 EXPORT_SYMBOL(__arch_local_irq_restore);