浏览代码

Merge branch 'x86/urgent' into x86/cleanups

Ingo Molnar 16 年之前
父节点
当前提交
cbe9ee00ce

+ 4 - 1
arch/x86/Kconfig

@@ -167,9 +167,12 @@ config GENERIC_PENDING_IRQ
 config X86_SMP
 config X86_SMP
 	bool
 	bool
 	depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
 	depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
-	select USE_GENERIC_SMP_HELPERS
 	default y
 	default y
 
 
+config USE_GENERIC_SMP_HELPERS
+	def_bool y
+	depends on SMP
+
 config X86_32_SMP
 config X86_32_SMP
 	def_bool y
 	def_bool y
 	depends on X86_32 && SMP
 	depends on X86_32 && SMP

+ 0 - 0
include/asm-x86/iomap.h → arch/x86/include/asm/iomap.h


+ 21 - 4
arch/x86/kernel/ds.c

@@ -236,17 +236,33 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task)
 	struct ds_context *context = *p_context;
 	struct ds_context *context = *p_context;
 
 
 	if (!context) {
 	if (!context) {
+		spin_unlock(&ds_lock);
+
 		context = kzalloc(sizeof(*context), GFP_KERNEL);
 		context = kzalloc(sizeof(*context), GFP_KERNEL);
 
 
-		if (!context)
+		if (!context) {
+			spin_lock(&ds_lock);
 			return NULL;
 			return NULL;
+		}
 
 
 		context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
 		context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
 		if (!context->ds) {
 		if (!context->ds) {
 			kfree(context);
 			kfree(context);
+			spin_lock(&ds_lock);
 			return NULL;
 			return NULL;
 		}
 		}
 
 
+		spin_lock(&ds_lock);
+		/*
+		 * Check for race - another CPU could have allocated
+		 * it meanwhile:
+		 */
+		if (*p_context) {
+			kfree(context->ds);
+			kfree(context);
+			return *p_context;
+		}
+
 		*p_context = context;
 		*p_context = context;
 
 
 		context->this = p_context;
 		context->this = p_context;
@@ -384,14 +400,15 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
 
 
 	spin_lock(&ds_lock);
 	spin_lock(&ds_lock);
 
 
-	if (!check_tracer(task))
-		return -EPERM;
-
 	error = -ENOMEM;
 	error = -ENOMEM;
 	context = ds_alloc_context(task);
 	context = ds_alloc_context(task);
 	if (!context)
 	if (!context)
 		goto out_unlock;
 		goto out_unlock;
 
 
+	error = -EPERM;
+	if (!check_tracer(task))
+		goto out_unlock;
+
 	error = -EALREADY;
 	error = -EALREADY;
 	if (context->owner[qual] == current)
 	if (context->owner[qual] == current)
 		goto out_unlock;
 		goto out_unlock;

+ 1 - 8
arch/x86/kernel/es7000_32.c

@@ -250,31 +250,24 @@ int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
 {
 {
 	struct acpi_table_header *header = NULL;
 	struct acpi_table_header *header = NULL;
 	int i = 0;
 	int i = 0;
-	acpi_size tbl_size;
 
 
-	while (ACPI_SUCCESS(acpi_get_table_with_size("OEM1", i++, &header, &tbl_size))) {
+	while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) {
 		if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
 		if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
 			struct oem_table *t = (struct oem_table *)header;
 			struct oem_table *t = (struct oem_table *)header;
 
 
 			oem_addrX = t->OEMTableAddr;
 			oem_addrX = t->OEMTableAddr;
 			oem_size = t->OEMTableSize;
 			oem_size = t->OEMTableSize;
-			early_acpi_os_unmap_memory(header, tbl_size);
 
 
 			*oem_addr = (unsigned long)__acpi_map_table(oem_addrX,
 			*oem_addr = (unsigned long)__acpi_map_table(oem_addrX,
 								    oem_size);
 								    oem_size);
 			return 0;
 			return 0;
 		}
 		}
-		early_acpi_os_unmap_memory(header, tbl_size);
 	}
 	}
 	return -1;
 	return -1;
 }
 }
 
 
 void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
 void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
 {
 {
-	if (!oem_addr)
-		return;
-
-	__acpi_unmap_table((char *)oem_addr, oem_size);
 }
 }
 #endif
 #endif
 
 

+ 4 - 0
arch/x86/kernel/tsc_sync.c

@@ -46,7 +46,9 @@ static __cpuinit void check_tsc_warp(void)
 	cycles_t start, now, prev, end;
 	cycles_t start, now, prev, end;
 	int i;
 	int i;
 
 
+	rdtsc_barrier();
 	start = get_cycles();
 	start = get_cycles();
+	rdtsc_barrier();
 	/*
 	/*
 	 * The measurement runs for 20 msecs:
 	 * The measurement runs for 20 msecs:
 	 */
 	 */
@@ -61,7 +63,9 @@ static __cpuinit void check_tsc_warp(void)
 		 */
 		 */
 		__raw_spin_lock(&sync_lock);
 		__raw_spin_lock(&sync_lock);
 		prev = last_tsc;
 		prev = last_tsc;
+		rdtsc_barrier();
 		now = get_cycles();
 		now = get_cycles();
+		rdtsc_barrier();
 		last_tsc = now;
 		last_tsc = now;
 		__raw_spin_unlock(&sync_lock);
 		__raw_spin_unlock(&sync_lock);
 
 

+ 14 - 2
arch/x86/mach-voyager/voyager_smp.c

@@ -7,6 +7,7 @@
  * This file provides all the same external entries as smp.c but uses
  * This file provides all the same external entries as smp.c but uses
  * the voyager hal to provide the functionality
  * the voyager hal to provide the functionality
  */
  */
+#include <linux/cpu.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/mm.h>
 #include <linux/kernel_stat.h>
 #include <linux/kernel_stat.h>
@@ -1790,6 +1791,17 @@ void __init smp_setup_processor_id(void)
 	x86_write_percpu(cpu_number, hard_smp_processor_id());
 	x86_write_percpu(cpu_number, hard_smp_processor_id());
 }
 }
 
 
+static void voyager_send_call_func(cpumask_t callmask)
+{
+	__u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id());
+	send_CPI(mask, VIC_CALL_FUNCTION_CPI);
+}
+
+static void voyager_send_call_func_single(int cpu)
+{
+	send_CPI(1 << cpu, VIC_CALL_FUNCTION_SINGLE_CPI);
+}
+
 struct smp_ops smp_ops = {
 struct smp_ops smp_ops = {
 	.smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
 	.smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
 	.smp_prepare_cpus = voyager_smp_prepare_cpus,
 	.smp_prepare_cpus = voyager_smp_prepare_cpus,
@@ -1799,6 +1811,6 @@ struct smp_ops smp_ops = {
 	.smp_send_stop = voyager_smp_send_stop,
 	.smp_send_stop = voyager_smp_send_stop,
 	.smp_send_reschedule = voyager_smp_send_reschedule,
 	.smp_send_reschedule = voyager_smp_send_reschedule,
 
 
-	.send_call_func_ipi = native_send_call_func_ipi,
-	.send_call_func_single_ipi = native_send_call_func_single_ipi,
+	.send_call_func_ipi = voyager_send_call_func,
+	.send_call_func_single_ipi = voyager_send_call_func_single,
 };
 };

+ 1 - 8
drivers/xen/balloon.c

@@ -122,14 +122,7 @@ static struct timer_list balloon_timer;
 static void scrub_page(struct page *page)
 static void scrub_page(struct page *page)
 {
 {
 #ifdef CONFIG_XEN_SCRUB_PAGES
 #ifdef CONFIG_XEN_SCRUB_PAGES
-	if (PageHighMem(page)) {
-		void *v = kmap(page);
-		clear_page(v);
-		kunmap(v);
-	} else {
-		void *v = page_address(page);
-		clear_page(v);
-	}
+	clear_highpage(page);
 #endif
 #endif
 }
 }