|
@@ -764,3 +764,112 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
|
|
|
return addr;
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_X86_32
|
|
|
+# ifdef CONFIG_X86_PAE
|
|
|
+# define MAX_ARCH_PFN (1ULL<<(36-PAGE_SHIFT))
|
|
|
+# else
|
|
|
+# define MAX_ARCH_PFN (1ULL<<(32-PAGE_SHIFT))
|
|
|
+# endif
|
|
|
+#else /* CONFIG_X86_32 */
|
|
|
+# define MAX_ARCH_PFN MAXMEM<<PAGE_SHIFT
|
|
|
+#endif
|
|
|
+
|
|
|
+/*
|
|
|
+ * Last pfn which the user wants to use.
|
|
|
+ */
|
|
|
+unsigned long __initdata end_user_pfn = MAX_ARCH_PFN;
|
|
|
+
|
|
|
+/*
|
|
|
+ * Find the highest page frame number we have available
|
|
|
+ */
|
|
|
+unsigned long __init e820_end_of_ram(void)
|
|
|
+{
|
|
|
+ unsigned long last_pfn;
|
|
|
+ unsigned long max_arch_pfn = MAX_ARCH_PFN;
|
|
|
+
|
|
|
+ last_pfn = find_max_pfn_with_active_regions();
|
|
|
+
|
|
|
+ if (last_pfn > max_arch_pfn)
|
|
|
+ last_pfn = max_arch_pfn;
|
|
|
+ if (last_pfn > end_user_pfn)
|
|
|
+ last_pfn = end_user_pfn;
|
|
|
+
|
|
|
+ printk(KERN_INFO "last_pfn = %lu max_arch_pfn = %lu\n",
|
|
|
+ last_pfn, max_arch_pfn);
|
|
|
+ return last_pfn;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Finds an active region in the address range from start_pfn to last_pfn and
|
|
|
+ * returns its range in ei_startpfn and ei_endpfn for the e820 entry.
|
|
|
+ */
|
|
|
+int __init e820_find_active_region(const struct e820entry *ei,
|
|
|
+ unsigned long start_pfn,
|
|
|
+ unsigned long last_pfn,
|
|
|
+ unsigned long *ei_startpfn,
|
|
|
+ unsigned long *ei_endpfn)
|
|
|
+{
|
|
|
+ u64 align = PAGE_SIZE;
|
|
|
+
|
|
|
+ *ei_startpfn = round_up(ei->addr, align) >> PAGE_SHIFT;
|
|
|
+ *ei_endpfn = round_down(ei->addr + ei->size, align) >> PAGE_SHIFT;
|
|
|
+
|
|
|
+ /* Skip map entries smaller than a page */
|
|
|
+ if (*ei_startpfn >= *ei_endpfn)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ /* Skip if map is outside the node */
|
|
|
+ if (ei->type != E820_RAM || *ei_endpfn <= start_pfn ||
|
|
|
+ *ei_startpfn >= last_pfn)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ /* Check for overlaps */
|
|
|
+ if (*ei_startpfn < start_pfn)
|
|
|
+ *ei_startpfn = start_pfn;
|
|
|
+ if (*ei_endpfn > last_pfn)
|
|
|
+ *ei_endpfn = last_pfn;
|
|
|
+
|
|
|
+ /* Obey end_user_pfn to save on memmap */
|
|
|
+ if (*ei_startpfn >= end_user_pfn)
|
|
|
+ return 0;
|
|
|
+ if (*ei_endpfn > end_user_pfn)
|
|
|
+ *ei_endpfn = end_user_pfn;
|
|
|
+
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+/* Walk the e820 map and register active regions within a node */
|
|
|
+void __init e820_register_active_regions(int nid, unsigned long start_pfn,
|
|
|
+ unsigned long last_pfn)
|
|
|
+{
|
|
|
+ unsigned long ei_startpfn;
|
|
|
+ unsigned long ei_endpfn;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < e820.nr_map; i++)
|
|
|
+ if (e820_find_active_region(&e820.map[i],
|
|
|
+ start_pfn, last_pfn,
|
|
|
+ &ei_startpfn, &ei_endpfn))
|
|
|
+ add_active_range(nid, ei_startpfn, ei_endpfn);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Find the hole size (in bytes) in the memory range.
|
|
|
+ * @start: starting address of the memory range to scan
|
|
|
+ * @end: ending address of the memory range to scan
|
|
|
+ */
|
|
|
+u64 __init e820_hole_size(u64 start, u64 end)
|
|
|
+{
|
|
|
+ unsigned long start_pfn = start >> PAGE_SHIFT;
|
|
|
+ unsigned long last_pfn = end >> PAGE_SHIFT;
|
|
|
+ unsigned long ei_startpfn, ei_endpfn, ram = 0;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < e820.nr_map; i++) {
|
|
|
+ if (e820_find_active_region(&e820.map[i],
|
|
|
+ start_pfn, last_pfn,
|
|
|
+ &ei_startpfn, &ei_endpfn))
|
|
|
+ ram += ei_endpfn - ei_startpfn;
|
|
|
+ }
|
|
|
+ return end - start - ((u64)ram << PAGE_SHIFT);
|
|
|
+}
|