فهرست منبع

[S390] memory hotplug: prevent memory zone interleave

This fixes a kernel oops with CONFIG_DEBUG_VM triggered by a
VM_BUG_ON(bad_range()): kernel BUG at mm/page_alloc.c:748.

With memory hotplug on System z, it is possible that the memory
online/offline state is preserved over a system restart, e.g. there
may be offline memory blocks in ZONE_DMA or ZONE_NORMAL. So far,
the offline memory range has always been added to ZONE_MOVABLE during
system start, so that it was possible to have ZONE_MOVABLE interleave
with ZONE_DMA or ZONE_NORMAL. This patch fixes that by checking for
zone overlap before adding memory.

Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Gerald Schaefer 13 سال پیش
والد
کامیت
892365ab4d
1فایلهای تغییر یافته به همراه26 افزوده شده و 4 حذف شده
  1. 26 4
      arch/s390/mm/init.c

+ 26 - 4
arch/s390/mm/init.c

@@ -223,16 +223,38 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 #ifdef CONFIG_MEMORY_HOTPLUG
 int arch_add_memory(int nid, u64 start, u64 size)
 {
-	struct pglist_data *pgdat;
+	unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
+	unsigned long start_pfn = PFN_DOWN(start);
+	unsigned long size_pages = PFN_DOWN(size);
 	struct zone *zone;
 	int rc;
 
-	pgdat = NODE_DATA(nid);
-	zone = pgdat->node_zones + ZONE_MOVABLE;
 	rc = vmem_add_mapping(start, size);
 	if (rc)
 		return rc;
-	rc = __add_pages(nid, zone, PFN_DOWN(start), PFN_DOWN(size));
+	for_each_zone(zone) {
+		if (zone_idx(zone) != ZONE_MOVABLE) {
+			/* Add range within existing zone limits */
+			zone_start_pfn = zone->zone_start_pfn;
+			zone_end_pfn = zone->zone_start_pfn +
+				       zone->spanned_pages;
+		} else {
+			/* Add remaining range to ZONE_MOVABLE */
+			zone_start_pfn = start_pfn;
+			zone_end_pfn = start_pfn + size_pages;
+		}
+		if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
+			continue;
+		nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
+			   zone_end_pfn - start_pfn : size_pages;
+		rc = __add_pages(nid, zone, start_pfn, nr_pages);
+		if (rc)
+			break;
+		start_pfn += nr_pages;
+		size_pages -= nr_pages;
+		if (!size_pages)
+			break;
+	}
 	if (rc)
 		vmem_remove_mapping(start, size);
 	return rc;