|
@@ -418,27 +418,27 @@ static unsigned long __init get_new_step_size(unsigned long step_size)
|
|
|
return step_size << 5;
|
|
|
}
|
|
|
|
|
|
-void __init init_mem_mapping(void)
|
|
|
+/**
|
|
|
+ * memory_map_top_down - Map [map_start, map_end) top down
|
|
|
+ * @map_start: start address of the target memory range
|
|
|
+ * @map_end: end address of the target memory range
|
|
|
+ *
|
|
|
+ * This function will setup direct mapping for memory range
|
|
|
+ * [map_start, map_end) in top-down. That said, the page tables
|
|
|
+ * will be allocated at the end of the memory, and we map the
|
|
|
+ * memory in top-down.
|
|
|
+ */
|
|
|
+static void __init memory_map_top_down(unsigned long map_start,
|
|
|
+ unsigned long map_end)
|
|
|
{
|
|
|
- unsigned long end, real_end, start, last_start;
|
|
|
+ unsigned long real_end, start, last_start;
|
|
|
unsigned long step_size;
|
|
|
unsigned long addr;
|
|
|
unsigned long mapped_ram_size = 0;
|
|
|
unsigned long new_mapped_ram_size;
|
|
|
|
|
|
- probe_page_size_mask();
|
|
|
-
|
|
|
-#ifdef CONFIG_X86_64
|
|
|
- end = max_pfn << PAGE_SHIFT;
|
|
|
-#else
|
|
|
- end = max_low_pfn << PAGE_SHIFT;
|
|
|
-#endif
|
|
|
-
|
|
|
- /* the ISA range is always mapped regardless of memory holes */
|
|
|
- init_memory_mapping(0, ISA_END_ADDRESS);
|
|
|
-
|
|
|
/* xen has big range in reserved near end of ram, skip it at first.*/
|
|
|
- addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE, PMD_SIZE);
|
|
|
+ addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
|
|
|
real_end = addr + PMD_SIZE;
|
|
|
|
|
|
/* step_size need to be small so pgt_buf from BRK could cover it */
|
|
@@ -453,13 +453,13 @@ void __init init_mem_mapping(void)
|
|
|
* end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
|
|
|
* for page table.
|
|
|
*/
|
|
|
- while (last_start > ISA_END_ADDRESS) {
|
|
|
+ while (last_start > map_start) {
|
|
|
if (last_start > step_size) {
|
|
|
start = round_down(last_start - 1, step_size);
|
|
|
- if (start < ISA_END_ADDRESS)
|
|
|
- start = ISA_END_ADDRESS;
|
|
|
+ if (start < map_start)
|
|
|
+ start = map_start;
|
|
|
} else
|
|
|
- start = ISA_END_ADDRESS;
|
|
|
+ start = map_start;
|
|
|
new_mapped_ram_size = init_range_memory_mapping(start,
|
|
|
last_start);
|
|
|
last_start = start;
|
|
@@ -470,8 +470,27 @@ void __init init_mem_mapping(void)
|
|
|
mapped_ram_size += new_mapped_ram_size;
|
|
|
}
|
|
|
|
|
|
- if (real_end < end)
|
|
|
- init_range_memory_mapping(real_end, end);
|
|
|
+ if (real_end < map_end)
|
|
|
+ init_range_memory_mapping(real_end, map_end);
|
|
|
+}
|
|
|
+
|
|
|
+void __init init_mem_mapping(void)
|
|
|
+{
|
|
|
+ unsigned long end;
|
|
|
+
|
|
|
+ probe_page_size_mask();
|
|
|
+
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
+ end = max_pfn << PAGE_SHIFT;
|
|
|
+#else
|
|
|
+ end = max_low_pfn << PAGE_SHIFT;
|
|
|
+#endif
|
|
|
+
|
|
|
+ /* the ISA range is always mapped regardless of memory holes */
|
|
|
+ init_memory_mapping(0, ISA_END_ADDRESS);
|
|
|
+
|
|
|
+ /* setup direct mapping for range [ISA_END_ADDRESS, end) in top-down*/
|
|
|
+ memory_map_top_down(ISA_END_ADDRESS, end);
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
if (max_pfn > max_low_pfn) {
|