|
@@ -474,6 +474,51 @@ static void __init memory_map_top_down(unsigned long map_start,
|
|
|
init_range_memory_mapping(real_end, map_end);
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * memory_map_bottom_up - Map [map_start, map_end) bottom up
|
|
|
+ * @map_start: start address of the target memory range
|
|
|
+ * @map_end: end address of the target memory range
|
|
|
+ *
|
|
|
+ * This function will setup direct mapping for memory range
|
|
|
+ * [map_start, map_end) in bottom-up. Since we have limited the
|
|
|
+ * bottom-up allocation above the kernel, the page tables will
|
|
|
+ * be allocated just above the kernel and we map the memory
|
|
|
+ * in [map_start, map_end) in bottom-up.
|
|
|
+ */
|
|
|
+static void __init memory_map_bottom_up(unsigned long map_start,
|
|
|
+ unsigned long map_end)
|
|
|
+{
|
|
|
+ unsigned long next, new_mapped_ram_size, start;
|
|
|
+ unsigned long mapped_ram_size = 0;
|
|
|
+ /* step_size need to be small so pgt_buf from BRK could cover it */
|
|
|
+ unsigned long step_size = PMD_SIZE;
|
|
|
+
|
|
|
+ start = map_start;
|
|
|
+ min_pfn_mapped = start >> PAGE_SHIFT;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We start from the bottom (@map_start) and go to the top (@map_end).
|
|
|
+ * The memblock_find_in_range() gets us a block of RAM from the
|
|
|
+ * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
|
|
|
+ * for page table.
|
|
|
+ */
|
|
|
+ while (start < map_end) {
|
|
|
+ if (map_end - start > step_size) {
|
|
|
+ next = round_up(start + 1, step_size);
|
|
|
+ if (next > map_end)
|
|
|
+ next = map_end;
|
|
|
+ } else
|
|
|
+ next = map_end;
|
|
|
+
|
|
|
+ new_mapped_ram_size = init_range_memory_mapping(start, next);
|
|
|
+ start = next;
|
|
|
+
|
|
|
+ if (new_mapped_ram_size > mapped_ram_size)
|
|
|
+ step_size = get_new_step_size(step_size);
|
|
|
+ mapped_ram_size += new_mapped_ram_size;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
void __init init_mem_mapping(void)
|
|
|
{
|
|
|
unsigned long end;
|
|
@@ -489,8 +534,25 @@ void __init init_mem_mapping(void)
|
|
|
/* the ISA range is always mapped regardless of memory holes */
|
|
|
init_memory_mapping(0, ISA_END_ADDRESS);
|
|
|
|
|
|
- /* setup direct mapping for range [ISA_END_ADDRESS, end) in top-down*/
|
|
|
- memory_map_top_down(ISA_END_ADDRESS, end);
|
|
|
+ /*
|
|
|
+ * If the allocation is in bottom-up direction, we setup direct mapping
|
|
|
+ * in bottom-up, otherwise we setup direct mapping in top-down.
|
|
|
+ */
|
|
|
+ if (memblock_bottom_up()) {
|
|
|
+ unsigned long kernel_end = __pa_symbol(_end);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * we need two separate calls here. This is because we want to
|
|
|
+ * allocate page tables above the kernel. So we first map
|
|
|
+ * [kernel_end, end) to make memory above the kernel be mapped
|
|
|
+ * as soon as possible. And then use page tables allocated above
|
|
|
+ * the kernel to map [ISA_END_ADDRESS, kernel_end).
|
|
|
+ */
|
|
|
+ memory_map_bottom_up(kernel_end, end);
|
|
|
+ memory_map_bottom_up(ISA_END_ADDRESS, kernel_end);
|
|
|
+ } else {
|
|
|
+ memory_map_top_down(ISA_END_ADDRESS, end);
|
|
|
+ }
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
if (max_pfn > max_low_pfn) {
|