Bläddra i källkod

[PATCH] x86_64: Fix memory hotadd heuristics

This fixes some boot failures on Dell and Unisys systems with memory
hotadd added.

 - Set hotadd_percent to 0 by default.  This means anybody using hotadd
   memory needs to specify the value on the command line.  That's
   because there are lots of Intel boxes which have a bogus hotplug area
   in their SRAT and they would waste a lot of memory before.
 - Fix calculation of how much memory to use when the hotplug area
   exceeds hotadd_percent
 - Fix fallback when the
 - Fix fallback if memory hotadd is not compiled in.

Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Andi Kleen 19 år sedan
förälder
incheckning
fad7906d16
1 ändrade filer med 11 tillägg och 4 borttagningar
  1. 11 4
      arch/x86_64/mm/srat.c

+ 11 - 4
arch/x86_64/mm/srat.c

@@ -34,7 +34,10 @@ static nodemask_t nodes_found __initdata;
 static struct bootnode nodes[MAX_NUMNODES] __initdata;
 static struct bootnode nodes[MAX_NUMNODES] __initdata;
 static struct bootnode nodes_add[MAX_NUMNODES] __initdata;
 static struct bootnode nodes_add[MAX_NUMNODES] __initdata;
 static int found_add_area __initdata;
 static int found_add_area __initdata;
-int hotadd_percent __initdata = 10;
+int hotadd_percent __initdata = 0;
+#ifndef RESERVE_HOTADD
+#define hotadd_percent 0	/* Ignore all settings */
+#endif
 static u8 pxm2node[256] = { [0 ... 255] = 0xff };
 static u8 pxm2node[256] = { [0 ... 255] = 0xff };
 
 
 /* Too small nodes confuse the VM badly. Usually they result
 /* Too small nodes confuse the VM badly. Usually they result
@@ -103,6 +106,7 @@ static __init void bad_srat(void)
 	int i;
 	int i;
 	printk(KERN_ERR "SRAT: SRAT not used.\n");
 	printk(KERN_ERR "SRAT: SRAT not used.\n");
 	acpi_numa = -1;
 	acpi_numa = -1;
+	found_add_area = 0;
 	for (i = 0; i < MAX_LOCAL_APIC; i++)
 	for (i = 0; i < MAX_LOCAL_APIC; i++)
 		apicid_to_node[i] = NUMA_NO_NODE;
 		apicid_to_node[i] = NUMA_NO_NODE;
 	for (i = 0; i < MAX_NUMNODES; i++)
 	for (i = 0; i < MAX_NUMNODES; i++)
@@ -154,7 +158,8 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
 	int pxm, node;
 	int pxm, node;
 	if (srat_disabled())
 	if (srat_disabled())
 		return;
 		return;
-	if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) {		bad_srat();
+	if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) {
+		bad_srat();
 		return;
 		return;
 	}
 	}
 	if (pa->flags.enabled == 0)
 	if (pa->flags.enabled == 0)
@@ -191,15 +196,17 @@ static int hotadd_enough_memory(struct bootnode *nd)
 	allowed = (end_pfn - e820_hole_size(0, end_pfn)) * PAGE_SIZE;
 	allowed = (end_pfn - e820_hole_size(0, end_pfn)) * PAGE_SIZE;
 	allowed = (allowed / 100) * hotadd_percent;
 	allowed = (allowed / 100) * hotadd_percent;
 	if (allocated + mem > allowed) {
 	if (allocated + mem > allowed) {
+		unsigned long range;
 		/* Give them at least part of their hotadd memory upto hotadd_percent
 		/* Give them at least part of their hotadd memory upto hotadd_percent
 		   It would be better to spread the limit out
 		   It would be better to spread the limit out
 		   over multiple hotplug areas, but that is too complicated
 		   over multiple hotplug areas, but that is too complicated
 		   right now */
 		   right now */
 		if (allocated >= allowed)
 		if (allocated >= allowed)
 			return 0;
 			return 0;
-		pages = (allowed - allocated + mem) / sizeof(struct page);
+		range = allowed - allocated;
+		pages = (range / PAGE_SIZE);
 		mem = pages * sizeof(struct page);
 		mem = pages * sizeof(struct page);
-		nd->end = nd->start + pages*PAGE_SIZE;
+		nd->end = nd->start + range;
 	}
 	}
 	/* Not completely fool proof, but a good sanity check */
 	/* Not completely fool proof, but a good sanity check */
 	addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem);
 	addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem);