Browse Source

[S390] spinlock: check virtual cpu running status

This patch introduces a new function that checks the running status
of a cpu in a hypervisor. This status is not virtualized, so the check
is only correct if running in an LPAR. On acquiring a spinlock, if the
cpu holding the lock is scheduled by the hypervisor, we do a busy wait
on the lock. If it is not scheduled, we yield over to that cpu.

Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Gerald Schaefer 15 years ago
parent
commit
59b6978745
3 changed files with 80 additions and 37 deletions
  1. 19 21
      arch/s390/include/asm/sigp.h
  2. 24 0
      arch/s390/include/asm/smp.h
  3. 37 16
      arch/s390/lib/spinlock.c

+ 19 - 21
arch/s390/include/asm/sigp.h

@@ -25,29 +25,28 @@ static inline int cpu_logical_map(int cpu)
 }
 
 enum {
-	sigp_unassigned=0x0,
-	sigp_sense,
-	sigp_external_call,
-	sigp_emergency_signal,
-	sigp_start,
-	sigp_stop,
-	sigp_restart,
-	sigp_unassigned1,
-	sigp_unassigned2,
-	sigp_stop_and_store_status,
-	sigp_unassigned3,
-	sigp_initial_cpu_reset,
-	sigp_cpu_reset,
-	sigp_set_prefix,
-	sigp_store_status_at_address,
-	sigp_store_extended_status_at_address
+	sigp_sense = 1,
+	sigp_external_call = 2,
+	sigp_emergency_signal = 3,
+	sigp_start = 4,
+	sigp_stop = 5,
+	sigp_restart = 6,
+	sigp_stop_and_store_status = 9,
+	sigp_initial_cpu_reset = 11,
+	sigp_cpu_reset = 12,
+	sigp_set_prefix = 13,
+	sigp_store_status_at_address = 14,
+	sigp_store_extended_status_at_address = 15,
+	sigp_set_architecture = 18,
+	sigp_conditional_emergency_signal = 19,
+	sigp_sense_running = 21,
 };
 
 enum {
-        sigp_order_code_accepted=0,
-	sigp_status_stored,
-	sigp_busy,
-	sigp_not_operational
+	sigp_order_code_accepted = 0,
+	sigp_status_stored = 1,
+	sigp_busy = 2,
+	sigp_not_operational = 3,
 };
 
 /*
@@ -57,7 +56,6 @@ enum {
 	ec_schedule = 0,
 	ec_call_function,
 	ec_call_function_single,
-	ec_bit_last
 };
 
 /*

+ 24 - 0
arch/s390/include/asm/smp.h

@@ -36,6 +36,28 @@ extern void smp_switch_to_cpu(void (*)(void *), void *, unsigned long sp,
 			      int from, int to);
 extern void smp_restart_cpu(void);
 
+/*
+ * returns 1 if (virtual) cpu is scheduled
+ * returns 0 otherwise
+ */
+static inline int smp_vcpu_scheduled(int cpu)
+{
+	u32 status;
+
+	switch (sigp_ps(&status, 0, cpu, sigp_sense_running)) {
+	case sigp_status_stored:
+		/* Check for running status */
+		if (status & 0x400)
+			return 0;
+		break;
+	case sigp_not_operational:
+		return 0;
+	default:
+		break;
+	}
+	return 1;
+}
+
 #else /* CONFIG_SMP */
 
 static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
@@ -43,6 +65,8 @@ static inline void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
 	func(data);
 }
 
+#define smp_vcpu_scheduled	(1)
+
 #endif /* CONFIG_SMP */
 
 #ifdef CONFIG_HOTPLUG_CPU

+ 37 - 16
arch/s390/lib/spinlock.c

@@ -43,16 +43,24 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
 {
 	int count = spin_retry;
 	unsigned int cpu = ~smp_processor_id();
+	unsigned int owner;
 
 	while (1) {
-		if (count-- <= 0) {
-			unsigned int owner = lp->owner_cpu;
-			if (owner != 0)
-				_raw_yield_cpu(~owner);
-			count = spin_retry;
+		owner = lp->owner_cpu;
+		if (!owner || smp_vcpu_scheduled(~owner)) {
+			for (count = spin_retry; count > 0; count--) {
+				if (arch_spin_is_locked(lp))
+					continue;
+				if (_raw_compare_and_swap(&lp->owner_cpu, 0,
+							  cpu) == 0)
+					return;
+			}
+			if (MACHINE_IS_LPAR)
+				continue;
 		}
-		if (arch_spin_is_locked(lp))
-			continue;
+		owner = lp->owner_cpu;
+		if (owner)
+			_raw_yield_cpu(~owner);
 		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
 			return;
 	}
@@ -63,17 +71,27 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
 {
 	int count = spin_retry;
 	unsigned int cpu = ~smp_processor_id();
+	unsigned int owner;
 
 	local_irq_restore(flags);
 	while (1) {
-		if (count-- <= 0) {
-			unsigned int owner = lp->owner_cpu;
-			if (owner != 0)
-				_raw_yield_cpu(~owner);
-			count = spin_retry;
+		owner = lp->owner_cpu;
+		if (!owner || smp_vcpu_scheduled(~owner)) {
+			for (count = spin_retry; count > 0; count--) {
+				if (arch_spin_is_locked(lp))
+					continue;
+				local_irq_disable();
+				if (_raw_compare_and_swap(&lp->owner_cpu, 0,
+							  cpu) == 0)
+					return;
+				local_irq_restore(flags);
+			}
+			if (MACHINE_IS_LPAR)
+				continue;
 		}
-		if (arch_spin_is_locked(lp))
-			continue;
+		owner = lp->owner_cpu;
+		if (owner)
+			_raw_yield_cpu(~owner);
 		local_irq_disable();
 		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
 			return;
@@ -100,8 +118,11 @@ EXPORT_SYMBOL(arch_spin_trylock_retry);
 void arch_spin_relax(arch_spinlock_t *lock)
 {
 	unsigned int cpu = lock->owner_cpu;
-	if (cpu != 0)
-		_raw_yield_cpu(~cpu);
+	if (cpu != 0) {
+		if (MACHINE_IS_VM || MACHINE_IS_KVM ||
+		    !smp_vcpu_scheduled(~cpu))
+			_raw_yield_cpu(~cpu);
+	}
 }
 EXPORT_SYMBOL(arch_spin_relax);