|
@@ -198,8 +198,15 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
|
|
struct cpu_messages *info = &per_cpu(ipi_message, cpu);
|
|
struct cpu_messages *info = &per_cpu(ipi_message, cpu);
|
|
char *message = (char *)&info->messages;
|
|
char *message = (char *)&info->messages;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Order previous accesses before accesses in the IPI handler.
|
|
|
|
+ */
|
|
|
|
+ smp_mb();
|
|
message[msg] = 1;
|
|
message[msg] = 1;
|
|
- mb();
|
|
|
|
|
|
+ /*
|
|
|
|
+ * cause_ipi functions are required to include a full barrier
|
|
|
|
+ * before doing whatever causes the IPI.
|
|
|
|
+ */
|
|
smp_ops->cause_ipi(cpu, info->data);
|
|
smp_ops->cause_ipi(cpu, info->data);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -211,7 +218,7 @@ irqreturn_t smp_ipi_demux(void)
|
|
mb(); /* order any irq clear */
|
|
mb(); /* order any irq clear */
|
|
|
|
|
|
do {
|
|
do {
|
|
- all = xchg_local(&info->messages, 0);
|
|
|
|
|
|
+ all = xchg(&info->messages, 0);
|
|
|
|
|
|
#ifdef __BIG_ENDIAN
|
|
#ifdef __BIG_ENDIAN
|
|
if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
|
|
if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
|