|
@@ -88,6 +88,40 @@ static int __meminit save_mr(struct map_range *mr, int nr_range,
|
|
|
return nr_range;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * adjust the page_size_mask for small range to go with
|
|
|
+ * big page size instead small one if nearby are ram too.
|
|
|
+ */
|
|
|
+static void __init_refok adjust_range_page_size_mask(struct map_range *mr,
|
|
|
+ int nr_range)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < nr_range; i++) {
|
|
|
+ if ((page_size_mask & (1<<PG_LEVEL_2M)) &&
|
|
|
+ !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) {
|
|
|
+ unsigned long start = round_down(mr[i].start, PMD_SIZE);
|
|
|
+ unsigned long end = round_up(mr[i].end, PMD_SIZE);
|
|
|
+
|
|
|
+#ifdef CONFIG_X86_32
|
|
|
+ if ((end >> PAGE_SHIFT) > max_low_pfn)
|
|
|
+ continue;
|
|
|
+#endif
|
|
|
+
|
|
|
+ if (memblock_is_region_memory(start, end - start))
|
|
|
+ mr[i].page_size_mask |= 1<<PG_LEVEL_2M;
|
|
|
+ }
|
|
|
+ if ((page_size_mask & (1<<PG_LEVEL_1G)) &&
|
|
|
+ !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) {
|
|
|
+ unsigned long start = round_down(mr[i].start, PUD_SIZE);
|
|
|
+ unsigned long end = round_up(mr[i].end, PUD_SIZE);
|
|
|
+
|
|
|
+ if (memblock_is_region_memory(start, end - start))
|
|
|
+ mr[i].page_size_mask |= 1<<PG_LEVEL_1G;
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static int __meminit split_mem_range(struct map_range *mr, int nr_range,
|
|
|
unsigned long start,
|
|
|
unsigned long end)
|
|
@@ -182,6 +216,9 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
|
|
|
nr_range--;
|
|
|
}
|
|
|
|
|
|
+ if (!after_bootmem)
|
|
|
+ adjust_range_page_size_mask(mr, nr_range);
|
|
|
+
|
|
|
for (i = 0; i < nr_range; i++)
|
|
|
printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n",
|
|
|
mr[i].start, mr[i].end - 1,
|