瀏覽代碼

[MIPS] SMTC: Fix recursion in instant IPI replay code.

local_irq_restore -> raw_local_irq_restore -> irq_restore_epilog ->
	smtc_ipi_replay -> smtc_ipi_dq -> spin_unlock_irqrestore ->
	_spin_unlock_irqrestore -> local_irq_restore

The recursion does abort when there is no more IPI queued for a CPU, so
this isn't usually fatal which is why we got away with this for so long
until this was discovered by code inspection.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Ralf Baechle 18 年之前
父節點
當前提交
8a1e97ee2e
共有 3 個文件被更改,包括 69 次插入42 次删除
  1. 33 7
      arch/mips/kernel/smtc.c
  2. 23 32
      include/asm-mips/irqflags.h
  3. 13 3
      include/asm-mips/smtc_ipi.h

+ 33 - 7
arch/mips/kernel/smtc.c

@@ -999,10 +999,17 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe)
 
 
 /*
 /*
  * SMTC-specific hacks invoked from elsewhere in the kernel.
  * SMTC-specific hacks invoked from elsewhere in the kernel.
+ *
+ * smtc_ipi_replay is called from raw_local_irq_restore which is only ever
+ * called with interrupts disabled.  We do rely on interrupts being disabled
+ * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
+ * result in a recursive call to raw_local_irq_restore().
  */
  */
 
 
-void smtc_ipi_replay(void)
+static void __smtc_ipi_replay(void)
 {
 {
+	unsigned int cpu = smp_processor_id();
+
 	/*
 	/*
 	 * To the extent that we've ever turned interrupts off,
 	 * To the extent that we've ever turned interrupts off,
 	 * we may have accumulated deferred IPIs.  This is subtle.
 	 * we may have accumulated deferred IPIs.  This is subtle.
@@ -1017,17 +1024,30 @@ void smtc_ipi_replay(void)
 	 * is clear, and we'll handle it as a real pseudo-interrupt
 	 * is clear, and we'll handle it as a real pseudo-interrupt
 	 * and not a pseudo-pseudo interrupt.
 	 * and not a pseudo-pseudo interrupt.
 	 */
 	 */
-	if (IPIQ[smp_processor_id()].depth > 0) {
-		struct smtc_ipi *pipi;
-		extern void self_ipi(struct smtc_ipi *);
+	if (IPIQ[cpu].depth > 0) {
+		while (1) {
+			struct smtc_ipi_q *q = &IPIQ[cpu];
+			struct smtc_ipi *pipi;
+			extern void self_ipi(struct smtc_ipi *);
+
+			spin_lock(&q->lock);
+			pipi = __smtc_ipi_dq(q);
+			spin_unlock(&q->lock);
+			if (!pipi)
+				break;
 
 
-		while ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()]))) {
 			self_ipi(pipi);
 			self_ipi(pipi);
-			smtc_cpu_stats[smp_processor_id()].selfipis++;
+			smtc_cpu_stats[cpu].selfipis++;
 		}
 		}
 	}
 	}
 }
 }
 
 
+void smtc_ipi_replay(void)
+{
+	raw_local_irq_disable();
+	__smtc_ipi_replay();
+}
+
 EXPORT_SYMBOL(smtc_ipi_replay);
 EXPORT_SYMBOL(smtc_ipi_replay);
 
 
 void smtc_idle_loop_hook(void)
 void smtc_idle_loop_hook(void)
@@ -1132,7 +1152,13 @@ void smtc_idle_loop_hook(void)
 	 * is in use, there should never be any.
 	 * is in use, there should never be any.
 	 */
 	 */
 #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
 #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
-	smtc_ipi_replay();
+	{
+		unsigned long flags;
+
+		local_irq_save(flags);
+		__smtc_ipi_replay();
+		local_irq_restore(flags);
+	}
 #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
 #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
 }
 }
 
 

+ 23 - 32
include/asm-mips/irqflags.h

@@ -13,29 +13,9 @@
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 
 
+#include <linux/compiler.h>
 #include <asm/hazards.h>
 #include <asm/hazards.h>
 
 
-/*
- * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred IPIs,
- * at the cost of branch and call overhead on each local_irq_restore()
- */
-
-#ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
-
-extern void smtc_ipi_replay(void);
-
-#define irq_restore_epilog(flags)				\
-do {								\
-	if (!(flags & 0x0400))					\
-		smtc_ipi_replay();				\
-} while (0)
-
-#else
-
-#define irq_restore_epilog(ignore) do { } while (0)
-
-#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
-
 __asm__ (
 __asm__ (
 	"	.macro	raw_local_irq_enable				\n"
 	"	.macro	raw_local_irq_enable				\n"
 	"	.set	push						\n"
 	"	.set	push						\n"
@@ -205,17 +185,28 @@ __asm__ (
 	"	.set	pop						\n"
 	"	.set	pop						\n"
 	"	.endm							\n");
 	"	.endm							\n");
 
 
-#define raw_local_irq_restore(flags)					\
-do {									\
-	unsigned long __tmp1;						\
-									\
-	__asm__ __volatile__(						\
-		"raw_local_irq_restore\t%0"				\
-		: "=r" (__tmp1)						\
-		: "0" (flags)						\
-		: "memory");						\
-	irq_restore_epilog(flags);					\
-} while(0)
+extern void smtc_ipi_replay(void);
+
+static inline void raw_local_irq_restore(unsigned long flags)
+{
+	unsigned long __tmp1;
+
+#ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
+	/*
+	 * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred
+	 * IPIs, at the cost of branch and call overhead on each
+	 * local_irq_restore()
+	 */
+	if (unlikely(!(flags & 0x0400)))
+		smtc_ipi_replay();
+#endif
+
+	__asm__ __volatile__(
+		"raw_local_irq_restore\t%0"
+		: "=r" (__tmp1)
+		: "0" (flags)
+		: "memory");
+}
 
 
 static inline int raw_irqs_disabled_flags(unsigned long flags)
 static inline int raw_irqs_disabled_flags(unsigned long flags)
 {
 {

+ 13 - 3
include/asm-mips/smtc_ipi.h

@@ -65,12 +65,10 @@ static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p)
 	spin_unlock_irqrestore(&q->lock, flags);
 	spin_unlock_irqrestore(&q->lock, flags);
 }
 }
 
 
-static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q)
+static inline struct smtc_ipi *__smtc_ipi_dq(struct smtc_ipi_q *q)
 {
 {
 	struct smtc_ipi *p;
 	struct smtc_ipi *p;
-	long flags;
 
 
-	spin_lock_irqsave(&q->lock, flags);
 	if (q->head == NULL)
 	if (q->head == NULL)
 		p = NULL;
 		p = NULL;
 	else {
 	else {
@@ -81,7 +79,19 @@ static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q)
 		if (q->head == NULL)
 		if (q->head == NULL)
 			q->tail = NULL;
 			q->tail = NULL;
 	}
 	}
+
+	return p;
+}
+
+static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q)
+{
+	unsigned long flags;
+	struct smtc_ipi *p;
+
+	spin_lock_irqsave(&q->lock, flags);
+	p = __smtc_ipi_dq(q);
 	spin_unlock_irqrestore(&q->lock, flags);
 	spin_unlock_irqrestore(&q->lock, flags);
+
 	return p;
 	return p;
 }
 }