浏览代码

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  percpu: Remove the multi-page alignment facility
  x86-32: Allocate irq stacks seperate from percpu area
  x86-32, mm: Remove duplicated #include
  x86, printk: Get rid of <0> from stack output
  x86, kexec: Make sure to stop all CPUs before exiting the kernel
  x86/vsmp: Eliminate kconfig dependency warning
Linus Torvalds 14 年之前
父节点
当前提交
0671b7674f

+ 1 - 0
arch/x86/Kconfig

@@ -347,6 +347,7 @@ endif
 
 
 config X86_VSMP
 config X86_VSMP
 	bool "ScaleMP vSMP"
 	bool "ScaleMP vSMP"
+	select PARAVIRT_GUEST
 	select PARAVIRT
 	select PARAVIRT
 	depends on X86_64 && PCI
 	depends on X86_64 && PCI
 	depends on X86_EXTENDED_PLATFORM
 	depends on X86_EXTENDED_PLATFORM

+ 0 - 2
arch/x86/include/asm/irq.h

@@ -21,10 +21,8 @@ static inline int irq_canonicalize(int irq)
 
 
 #ifdef CONFIG_X86_32
 #ifdef CONFIG_X86_32
 extern void irq_ctx_init(int cpu);
 extern void irq_ctx_init(int cpu);
-extern void irq_ctx_exit(int cpu);
 #else
 #else
 # define irq_ctx_init(cpu) do { } while (0)
 # define irq_ctx_init(cpu) do { } while (0)
-# define irq_ctx_exit(cpu) do { } while (0)
 #endif
 #endif
 
 
 #define __ARCH_HAS_DO_SOFTIRQ
 #define __ARCH_HAS_DO_SOFTIRQ

+ 7 - 2
arch/x86/include/asm/smp.h

@@ -50,7 +50,7 @@ struct smp_ops {
 	void (*smp_prepare_cpus)(unsigned max_cpus);
 	void (*smp_prepare_cpus)(unsigned max_cpus);
 	void (*smp_cpus_done)(unsigned max_cpus);
 	void (*smp_cpus_done)(unsigned max_cpus);
 
 
-	void (*smp_send_stop)(void);
+	void (*stop_other_cpus)(int wait);
 	void (*smp_send_reschedule)(int cpu);
 	void (*smp_send_reschedule)(int cpu);
 
 
 	int (*cpu_up)(unsigned cpu);
 	int (*cpu_up)(unsigned cpu);
@@ -73,7 +73,12 @@ extern struct smp_ops smp_ops;
 
 
 static inline void smp_send_stop(void)
 static inline void smp_send_stop(void)
 {
 {
-	smp_ops.smp_send_stop();
+	smp_ops.stop_other_cpus(0);
+}
+
+static inline void stop_other_cpus(void)
+{
+	smp_ops.stop_other_cpus(1);
 }
 }
 
 
 static inline void smp_prepare_boot_cpu(void)
 static inline void smp_prepare_boot_cpu(void)

+ 3 - 3
arch/x86/kernel/dumpstack_32.c

@@ -82,11 +82,11 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
 		if (kstack_end(stack))
 		if (kstack_end(stack))
 			break;
 			break;
 		if (i && ((i % STACKSLOTS_PER_LINE) == 0))
 		if (i && ((i % STACKSLOTS_PER_LINE) == 0))
-			printk("\n%s", log_lvl);
-		printk(" %08lx", *stack++);
+			printk(KERN_CONT "\n");
+		printk(KERN_CONT " %08lx", *stack++);
 		touch_nmi_watchdog();
 		touch_nmi_watchdog();
 	}
 	}
-	printk("\n");
+	printk(KERN_CONT "\n");
 	show_trace_log_lvl(task, regs, sp, bp, log_lvl);
 	show_trace_log_lvl(task, regs, sp, bp, log_lvl);
 }
 }
 
 

+ 4 - 4
arch/x86/kernel/dumpstack_64.c

@@ -265,20 +265,20 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
 		if (stack >= irq_stack && stack <= irq_stack_end) {
 		if (stack >= irq_stack && stack <= irq_stack_end) {
 			if (stack == irq_stack_end) {
 			if (stack == irq_stack_end) {
 				stack = (unsigned long *) (irq_stack_end[-1]);
 				stack = (unsigned long *) (irq_stack_end[-1]);
-				printk(" <EOI> ");
+				printk(KERN_CONT " <EOI> ");
 			}
 			}
 		} else {
 		} else {
 		if (((long) stack & (THREAD_SIZE-1)) == 0)
 		if (((long) stack & (THREAD_SIZE-1)) == 0)
 			break;
 			break;
 		}
 		}
 		if (i && ((i % STACKSLOTS_PER_LINE) == 0))
 		if (i && ((i % STACKSLOTS_PER_LINE) == 0))
-			printk("\n%s", log_lvl);
-		printk(" %016lx", *stack++);
+			printk(KERN_CONT "\n");
+		printk(KERN_CONT " %016lx", *stack++);
 		touch_nmi_watchdog();
 		touch_nmi_watchdog();
 	}
 	}
 	preempt_enable();
 	preempt_enable();
 
 
-	printk("\n");
+	printk(KERN_CONT "\n");
 	show_trace_log_lvl(task, regs, sp, bp, log_lvl);
 	show_trace_log_lvl(task, regs, sp, bp, log_lvl);
 }
 }
 
 

+ 2 - 10
arch/x86/kernel/irq_32.c

@@ -60,9 +60,6 @@ union irq_ctx {
 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
 
 
-static DEFINE_PER_CPU_MULTIPAGE_ALIGNED(union irq_ctx, hardirq_stack, THREAD_SIZE);
-static DEFINE_PER_CPU_MULTIPAGE_ALIGNED(union irq_ctx, softirq_stack, THREAD_SIZE);
-
 static void call_on_stack(void *func, void *stack)
 static void call_on_stack(void *func, void *stack)
 {
 {
 	asm volatile("xchgl	%%ebx,%%esp	\n"
 	asm volatile("xchgl	%%ebx,%%esp	\n"
@@ -128,7 +125,7 @@ void __cpuinit irq_ctx_init(int cpu)
 	if (per_cpu(hardirq_ctx, cpu))
 	if (per_cpu(hardirq_ctx, cpu))
 		return;
 		return;
 
 
-	irqctx = &per_cpu(hardirq_stack, cpu);
+	irqctx = (union irq_ctx *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER);
 	irqctx->tinfo.task		= NULL;
 	irqctx->tinfo.task		= NULL;
 	irqctx->tinfo.exec_domain	= NULL;
 	irqctx->tinfo.exec_domain	= NULL;
 	irqctx->tinfo.cpu		= cpu;
 	irqctx->tinfo.cpu		= cpu;
@@ -137,7 +134,7 @@ void __cpuinit irq_ctx_init(int cpu)
 
 
 	per_cpu(hardirq_ctx, cpu) = irqctx;
 	per_cpu(hardirq_ctx, cpu) = irqctx;
 
 
-	irqctx = &per_cpu(softirq_stack, cpu);
+	irqctx = (union irq_ctx *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER);
 	irqctx->tinfo.task		= NULL;
 	irqctx->tinfo.task		= NULL;
 	irqctx->tinfo.exec_domain	= NULL;
 	irqctx->tinfo.exec_domain	= NULL;
 	irqctx->tinfo.cpu		= cpu;
 	irqctx->tinfo.cpu		= cpu;
@@ -150,11 +147,6 @@ void __cpuinit irq_ctx_init(int cpu)
 	       cpu, per_cpu(hardirq_ctx, cpu),  per_cpu(softirq_ctx, cpu));
 	       cpu, per_cpu(hardirq_ctx, cpu),  per_cpu(softirq_ctx, cpu));
 }
 }
 
 
-void irq_ctx_exit(int cpu)
-{
-	per_cpu(hardirq_ctx, cpu) = NULL;
-}
-
 asmlinkage void do_softirq(void)
 asmlinkage void do_softirq(void)
 {
 {
 	unsigned long flags;
 	unsigned long flags;

+ 1 - 1
arch/x86/kernel/reboot.c

@@ -635,7 +635,7 @@ void native_machine_shutdown(void)
 	/* O.K Now that I'm on the appropriate processor,
 	/* O.K Now that I'm on the appropriate processor,
 	 * stop all of the others.
 	 * stop all of the others.
 	 */
 	 */
-	smp_send_stop();
+	stop_other_cpus();
 #endif
 #endif
 
 
 	lapic_shutdown();
 	lapic_shutdown();

+ 9 - 6
arch/x86/kernel/smp.c

@@ -159,10 +159,10 @@ asmlinkage void smp_reboot_interrupt(void)
 	irq_exit();
 	irq_exit();
 }
 }
 
 
-static void native_smp_send_stop(void)
+static void native_stop_other_cpus(int wait)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
-	unsigned long wait;
+	unsigned long timeout;
 
 
 	if (reboot_force)
 	if (reboot_force)
 		return;
 		return;
@@ -179,9 +179,12 @@ static void native_smp_send_stop(void)
 	if (num_online_cpus() > 1) {
 	if (num_online_cpus() > 1) {
 		apic->send_IPI_allbutself(REBOOT_VECTOR);
 		apic->send_IPI_allbutself(REBOOT_VECTOR);
 
 
-		/* Don't wait longer than a second */
-		wait = USEC_PER_SEC;
-		while (num_online_cpus() > 1 && wait--)
+		/*
+		 * Don't wait longer than a second if the caller
+		 * didn't ask us to wait.
+		 */
+		timeout = USEC_PER_SEC;
+		while (num_online_cpus() > 1 && (wait || timeout--))
 			udelay(1);
 			udelay(1);
 	}
 	}
 
 
@@ -227,7 +230,7 @@ struct smp_ops smp_ops = {
 	.smp_prepare_cpus	= native_smp_prepare_cpus,
 	.smp_prepare_cpus	= native_smp_prepare_cpus,
 	.smp_cpus_done		= native_smp_cpus_done,
 	.smp_cpus_done		= native_smp_cpus_done,
 
 
-	.smp_send_stop		= native_smp_send_stop,
+	.stop_other_cpus	= native_stop_other_cpus,
 	.smp_send_reschedule	= native_smp_send_reschedule,
 	.smp_send_reschedule	= native_smp_send_reschedule,
 
 
 	.cpu_up			= native_cpu_up,
 	.cpu_up			= native_cpu_up,

+ 0 - 1
arch/x86/kernel/smpboot.c

@@ -1373,7 +1373,6 @@ void play_dead_common(void)
 {
 {
 	idle_task_exit();
 	idle_task_exit();
 	reset_lazy_tlbstate();
 	reset_lazy_tlbstate();
-	irq_ctx_exit(raw_smp_processor_id());
 	c1e_remove_cpu(raw_smp_processor_id());
 	c1e_remove_cpu(raw_smp_processor_id());
 
 
 	mb();
 	mb();

+ 1 - 1
arch/x86/xen/enlighten.c

@@ -1016,7 +1016,7 @@ static void xen_reboot(int reason)
 	struct sched_shutdown r = { .reason = reason };
 	struct sched_shutdown r = { .reason = reason };
 
 
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
-	smp_send_stop();
+	stop_other_cpus();
 #endif
 #endif
 
 
 	if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
 	if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))

+ 3 - 3
arch/x86/xen/smp.c

@@ -400,9 +400,9 @@ static void stop_self(void *v)
 	BUG();
 	BUG();
 }
 }
 
 
-static void xen_smp_send_stop(void)
+static void xen_stop_other_cpus(int wait)
 {
 {
-	smp_call_function(stop_self, NULL, 0);
+	smp_call_function(stop_self, NULL, wait);
 }
 }
 
 
 static void xen_smp_send_reschedule(int cpu)
 static void xen_smp_send_reschedule(int cpu)
@@ -470,7 +470,7 @@ static const struct smp_ops xen_smp_ops __initdata = {
 	.cpu_disable = xen_cpu_disable,
 	.cpu_disable = xen_cpu_disable,
 	.play_dead = xen_play_dead,
 	.play_dead = xen_play_dead,
 
 
-	.smp_send_stop = xen_smp_send_stop,
+	.stop_other_cpus = xen_stop_other_cpus,
 	.smp_send_reschedule = xen_smp_send_reschedule,
 	.smp_send_reschedule = xen_smp_send_reschedule,
 
 
 	.send_call_func_ipi = xen_smp_send_call_function_ipi,
 	.send_call_func_ipi = xen_smp_send_call_function_ipi,

+ 0 - 12
include/linux/percpu-defs.h

@@ -147,18 +147,6 @@
 #define DEFINE_PER_CPU_READ_MOSTLY(type, name)				\
 #define DEFINE_PER_CPU_READ_MOSTLY(type, name)				\
 	DEFINE_PER_CPU_SECTION(type, name, "..readmostly")
 	DEFINE_PER_CPU_SECTION(type, name, "..readmostly")
 
 
-/*
- * Declaration/definition used for large per-CPU variables that must be
- * aligned to something larger than the pagesize.
- */
-#define DECLARE_PER_CPU_MULTIPAGE_ALIGNED(type, name, size)		\
-	DECLARE_PER_CPU_SECTION(type, name, "..page_aligned")		\
-	__aligned(size)
-
-#define DEFINE_PER_CPU_MULTIPAGE_ALIGNED(type, name, size)		\
-	DEFINE_PER_CPU_SECTION(type, name, "..page_aligned")		\
-	__aligned(size)
-
 /*
 /*
  * Intermodule exports for per-CPU variables.  sparse forgets about
  * Intermodule exports for per-CPU variables.  sparse forgets about
  * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
  * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to