Explorar o código

xen64: register callbacks in arch-independent way

Use callback_op hypercall to register callbacks in a 32/64-bit
independent way (64-bit doesn't need a code segment, but that detail
is hidden in XEN_CALLBACK).

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Stephen Tweedie <sct@redhat.com>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Jeremy Fitzhardinge %!s(int64=17) %!d(string=hai) anos
pai
achega
88459d4c7e
Modificáronse 2 ficheiros con 29 adicións e 10 borrados
  1. 17 10
      arch/x86/xen/setup.c
  2. 12 0
      include/asm-x86/xen/hypercall.h

+ 17 - 10
arch/x86/xen/setup.c

@@ -91,19 +91,25 @@ static void __init fiddle_vdso(void)
 	*mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
 	*mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
 }
 }
 
 
-void xen_enable_sysenter(void)
+static __cpuinit int register_callback(unsigned type, const void *func)
 {
 {
-	int cpu = smp_processor_id();
-	extern void xen_sysenter_target(void);
-	/* Mask events on entry, even though they get enabled immediately */
-	static struct callback_register sysenter = {
-		.type = CALLBACKTYPE_sysenter,
-		.address = XEN_CALLBACK(__KERNEL_CS, xen_sysenter_target),
+	struct callback_register callback = {
+		.type = type,
+		.address = XEN_CALLBACK(__KERNEL_CS, func),
 		.flags = CALLBACKF_mask_events,
 		.flags = CALLBACKF_mask_events,
 	};
 	};
 
 
+	return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
+}
+
+void __cpuinit xen_enable_sysenter(void)
+{
+	int cpu = smp_processor_id();
+	extern void xen_sysenter_target(void);
+
 	if (!boot_cpu_has(X86_FEATURE_SEP) ||
 	if (!boot_cpu_has(X86_FEATURE_SEP) ||
-	    HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) != 0) {
+	    register_callback(CALLBACKTYPE_sysenter,
+			      xen_sysenter_target) != 0) {
 		clear_cpu_cap(&cpu_data(cpu), X86_FEATURE_SEP);
 		clear_cpu_cap(&cpu_data(cpu), X86_FEATURE_SEP);
 		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_SEP);
 		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_SEP);
 	}
 	}
@@ -120,8 +126,9 @@ void __init xen_arch_setup(void)
 	if (!xen_feature(XENFEAT_auto_translated_physmap))
 	if (!xen_feature(XENFEAT_auto_translated_physmap))
 		HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_pae_extended_cr3);
 		HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_pae_extended_cr3);
 
 
-	HYPERVISOR_set_callbacks(__KERNEL_CS, (unsigned long)xen_hypervisor_callback,
-				 __KERNEL_CS, (unsigned long)xen_failsafe_callback);
+	if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
+	    register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
+		BUG();
 
 
 	xen_enable_sysenter();
 	xen_enable_sysenter();
 
 

+ 12 - 0
include/asm-x86/xen/hypercall.h

@@ -226,6 +226,7 @@ HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
 	return _hypercall2(int, stack_switch, ss, esp);
 	return _hypercall2(int, stack_switch, ss, esp);
 }
 }
 
 
+#ifdef CONFIG_X86_32
 static inline int
 static inline int
 HYPERVISOR_set_callbacks(unsigned long event_selector,
 HYPERVISOR_set_callbacks(unsigned long event_selector,
 			 unsigned long event_address,
 			 unsigned long event_address,
@@ -236,6 +237,17 @@ HYPERVISOR_set_callbacks(unsigned long event_selector,
 			   event_selector, event_address,
 			   event_selector, event_address,
 			   failsafe_selector, failsafe_address);
 			   failsafe_selector, failsafe_address);
 }
 }
+#else  /* CONFIG_X86_64 */
+static inline int
+HYPERVISOR_set_callbacks(unsigned long event_address,
+			unsigned long failsafe_address,
+			unsigned long syscall_address)
+{
+	return _hypercall3(int, set_callbacks,
+			   event_address, failsafe_address,
+			   syscall_address);
+}
+#endif  /* CONFIG_X86_{32,64} */
 
 
 static inline int
 static inline int
 HYPERVISOR_callback_op(int cmd, void *arg)
 HYPERVISOR_callback_op(int cmd, void *arg)