Selaa lähdekoodia

kvmclock: Add functions to check if the host has stopped the vm

When a host stops or suspends a VM it will set a flag to show this.  The
watchdog will use these functions to determine if a softlockup is real, or the
result of a suspended VM.

Signed-off-by: Eric B Munson <emunson@mgebm.net>
asm-generic changes Acked-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Eric B Munson 13 vuotta sitten
vanhempi
commit
3b5d56b931

+ 1 - 0
arch/alpha/include/asm/kvm_para.h

@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>

+ 1 - 0
arch/arm/include/asm/kvm_para.h

@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>

+ 1 - 0
arch/avr32/include/asm/kvm_para.h

@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>

+ 1 - 0
arch/blackfin/include/asm/kvm_para.h

@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>

+ 1 - 0
arch/c6x/include/asm/kvm_para.h

@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>

+ 1 - 0
arch/frv/include/asm/kvm_para.h

@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>

+ 1 - 0
arch/h8300/include/asm/kvm_para.h

@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>

+ 1 - 0
arch/hexagon/include/asm/kvm_para.h

@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>

+ 5 - 0
arch/ia64/include/asm/kvm_para.h

@@ -26,6 +26,11 @@ static inline unsigned int kvm_arch_para_features(void)
 	return 0;
 }
 
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+	return false;
+}
+
 #endif
 
 #endif

+ 1 - 0
arch/m68k/include/asm/kvm_para.h

@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>

+ 1 - 0
arch/microblaze/include/asm/kvm_para.h

@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>

+ 1 - 0
arch/mips/include/asm/kvm_para.h

@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>

+ 1 - 0
arch/mn10300/include/asm/kvm_para.h

@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>

+ 1 - 0
arch/openrisc/include/asm/kvm_para.h

@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>

+ 1 - 0
arch/parisc/include/asm/kvm_para.h

@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>

+ 5 - 0
arch/powerpc/include/asm/kvm_para.h

@@ -206,6 +206,11 @@ static inline unsigned int kvm_arch_para_features(void)
 	return r;
 }
 
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+	return false;
+}
+
 #endif /* __KERNEL__ */
 
 #endif /* __POWERPC_KVM_PARA_H__ */

+ 5 - 0
arch/s390/include/asm/kvm_para.h

@@ -149,6 +149,11 @@ static inline unsigned int kvm_arch_para_features(void)
 	return 0;
 }
 
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+	return false;
+}
+
 #endif
 
 #endif /* __S390_KVM_PARA_H */

+ 1 - 0
arch/score/include/asm/kvm_para.h

@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>

+ 1 - 0
arch/sh/include/asm/kvm_para.h

@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>

+ 1 - 0
arch/sparc/include/asm/kvm_para.h

@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>

+ 1 - 0
arch/tile/include/asm/kvm_para.h

@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>

+ 1 - 0
arch/um/include/asm/kvm_para.h

@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>

+ 1 - 0
arch/unicore32/include/asm/kvm_para.h

@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>

+ 8 - 0
arch/x86/include/asm/kvm_para.h

@@ -95,6 +95,14 @@ struct kvm_vcpu_pv_apf_data {
 extern void kvmclock_init(void);
 extern int kvm_register_clock(char *txt);
 
+#ifdef CONFIG_KVM_CLOCK
+bool kvm_check_and_clear_guest_paused(void);
+#else
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+	return false;
+}
+#endif /* CONFIG_KVMCLOCK */
 
 /* This instruction is vmcall.  On non-VT architectures, it will generate a
  * trap that we will then rewrite to the appropriate instruction.

+ 21 - 0
arch/x86/kernel/kvmclock.c

@@ -22,6 +22,7 @@
 #include <asm/msr.h>
 #include <asm/apic.h>
 #include <linux/percpu.h>
+#include <linux/hardirq.h>
 
 #include <asm/x86_init.h>
 #include <asm/reboot.h>
@@ -114,6 +115,26 @@ static void kvm_get_preset_lpj(void)
 	preset_lpj = lpj;
 }
 
+bool kvm_check_and_clear_guest_paused(void)
+{
+	bool ret = false;
+	struct pvclock_vcpu_time_info *src;
+
+	/*
+	 * per_cpu() is safe here because this function is only called from
+	 * timer functions where preemption is already disabled.
+	 */
+	WARN_ON(!in_atomic());
+	src = &__get_cpu_var(hv_clock);
+	if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
+		__this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED);
+		ret = true;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(kvm_check_and_clear_guest_paused);
+
 static struct clocksource kvm_clock = {
 	.name = "kvm-clock",
 	.read = kvm_clock_get_cycles,

+ 1 - 0
arch/xtensa/include/asm/kvm_para.h

@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>

+ 14 - 0
include/asm-generic/kvm_para.h

@@ -0,0 +1,14 @@
+#ifndef _ASM_GENERIC_KVM_PARA_H
+#define _ASM_GENERIC_KVM_PARA_H
+
+
+/*
+ * This function is used by architectures that support kvm to avoid issuing
+ * false soft lockup messages.
+ */
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+	return false;
+}
+
+#endif