|
@@ -16,6 +16,7 @@
|
|
|
#include <linux/delay.h>
|
|
|
#include <linux/elf.h>
|
|
|
#include <linux/elfcore.h>
|
|
|
+#include <linux/module.h>
|
|
|
|
|
|
#include <asm/processor.h>
|
|
|
#include <asm/hardirq.h>
|
|
@@ -30,6 +31,27 @@
|
|
|
|
|
|
int in_crash_kexec;
|
|
|
|
|
|
+/*
|
|
|
+ * This is used to VMCLEAR all VMCSs loaded on the
|
|
|
+ * processor. And when loading kvm_intel module, the
|
|
|
+ * callback function pointer will be assigned.
|
|
|
+ *
|
|
|
+ * protected by rcu.
|
|
|
+ */
|
|
|
+void (*crash_vmclear_loaded_vmcss)(void) = NULL;
|
|
|
+EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
|
|
|
+
|
|
|
+static inline void cpu_crash_vmclear_loaded_vmcss(void)
|
|
|
+{
|
|
|
+ void (*do_vmclear_operation)(void) = NULL;
|
|
|
+
|
|
|
+ rcu_read_lock();
|
|
|
+ do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
|
|
|
+ if (do_vmclear_operation)
|
|
|
+ do_vmclear_operation();
|
|
|
+ rcu_read_unlock();
|
|
|
+}
|
|
|
+
|
|
|
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
|
|
|
|
|
|
static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
|
|
@@ -46,6 +68,11 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
|
|
|
#endif
|
|
|
crash_save_cpu(regs, cpu);
|
|
|
|
|
|
+ /*
|
|
|
+ * VMCLEAR VMCSs loaded on all cpus if needed.
|
|
|
+ */
|
|
|
+ cpu_crash_vmclear_loaded_vmcss();
|
|
|
+
|
|
|
/* Disable VMX or SVM if needed.
|
|
|
*
|
|
|
* We need to disable virtualization on all CPUs.
|
|
@@ -88,6 +115,11 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
|
|
|
|
|
|
kdump_nmi_shootdown_cpus();
|
|
|
|
|
|
+ /*
|
|
|
+ * VMCLEAR VMCSs loaded on this cpu if needed.
|
|
|
+ */
|
|
|
+ cpu_crash_vmclear_loaded_vmcss();
|
|
|
+
|
|
|
/* Booting kdump kernel with VMX or SVM enabled won't work,
|
|
|
* because (among other limitations) we can't disable paging
|
|
|
* with the virt flags.
|