|
@@ -81,6 +81,11 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
|
|
|
end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT);
|
|
|
}
|
|
|
|
|
|
+void native_pagetable_reserve(u64 start, u64 end)
|
|
|
+{
|
|
|
+ memblock_x86_reserve_range(start, end, "PGTABLE");
|
|
|
+}
|
|
|
+
|
|
|
struct map_range {
|
|
|
unsigned long start;
|
|
|
unsigned long end;
|
|
@@ -272,9 +277,24 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
|
|
|
|
|
|
__flush_tlb_all();
|
|
|
|
|
|
+ /*
|
|
|
+ * Reserve the kernel pagetable pages we used (pgt_buf_start -
|
|
|
+ * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
|
|
|
+ * so that they can be reused for other purposes.
|
|
|
+ *
|
|
|
+ * On native it just means calling memblock_x86_reserve_range, on Xen it
|
|
|
+ * also means marking RW the pagetable pages that we allocated before
|
|
|
+ * but that haven't been used.
|
|
|
+ *
|
|
|
+ * In fact on xen we mark RO the whole range pgt_buf_start -
|
|
|
+ * pgt_buf_top, because we have to make sure that when
|
|
|
+ * init_memory_mapping reaches the pagetable pages area, it maps
|
|
|
+ * RO all the pagetable pages, including the ones that are beyond
|
|
|
+ * pgt_buf_end at that time.
|
|
|
+ */
|
|
|
if (!after_bootmem && pgt_buf_end > pgt_buf_start)
|
|
|
- memblock_x86_reserve_range(pgt_buf_start << PAGE_SHIFT,
|
|
|
- pgt_buf_end << PAGE_SHIFT, "PGTABLE");
|
|
|
+ x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start),
|
|
|
+ PFN_PHYS(pgt_buf_end));
|
|
|
|
|
|
if (!after_bootmem)
|
|
|
early_memtest(start, end);
|