|
@@ -35,6 +35,7 @@ struct map_range {
|
|
|
unsigned page_size_mask;
|
|
|
};
|
|
|
|
|
|
+static int page_size_mask;
|
|
|
/*
|
|
|
* First calculate space needed for kernel direct mapping page tables to cover
|
|
|
* mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB
|
|
@@ -94,6 +95,30 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
|
|
|
(pgt_buf_top << PAGE_SHIFT) - 1);
|
|
|
}
|
|
|
|
|
|
+void probe_page_size_mask(void)
|
|
|
+{
|
|
|
+#if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK)
|
|
|
+ /*
|
|
|
+ * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
|
|
|
+ * This will simplify cpa(), which otherwise needs to support splitting
|
|
|
+ * large pages into small in interrupt context, etc.
|
|
|
+ */
|
|
|
+ if (direct_gbpages)
|
|
|
+ page_size_mask |= 1 << PG_LEVEL_1G;
|
|
|
+ if (cpu_has_pse)
|
|
|
+ page_size_mask |= 1 << PG_LEVEL_2M;
|
|
|
+#endif
|
|
|
+
|
|
|
+ /* Enable PSE if available */
|
|
|
+ if (cpu_has_pse)
|
|
|
+ set_in_cr4(X86_CR4_PSE);
|
|
|
+
|
|
|
+ /* Enable PGE if available */
|
|
|
+ if (cpu_has_pge) {
|
|
|
+ set_in_cr4(X86_CR4_PGE);
|
|
|
+ __supported_pte_mask |= _PAGE_GLOBAL;
|
|
|
+ }
|
|
|
+}
|
|
|
void __init native_pagetable_reserve(u64 start, u64 end)
|
|
|
{
|
|
|
memblock_reserve(start, end - start);
|
|
@@ -129,45 +154,15 @@ static int __meminit save_mr(struct map_range *mr, int nr_range,
|
|
|
unsigned long __init_refok init_memory_mapping(unsigned long start,
|
|
|
unsigned long end)
|
|
|
{
|
|
|
- unsigned long page_size_mask = 0;
|
|
|
unsigned long start_pfn, end_pfn;
|
|
|
unsigned long ret = 0;
|
|
|
unsigned long pos;
|
|
|
-
|
|
|
struct map_range mr[NR_RANGE_MR];
|
|
|
int nr_range, i;
|
|
|
- int use_pse, use_gbpages;
|
|
|
|
|
|
printk(KERN_INFO "init_memory_mapping: [mem %#010lx-%#010lx]\n",
|
|
|
start, end - 1);
|
|
|
|
|
|
-#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
|
|
|
- /*
|
|
|
- * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
|
|
|
- * This will simplify cpa(), which otherwise needs to support splitting
|
|
|
- * large pages into small in interrupt context, etc.
|
|
|
- */
|
|
|
- use_pse = use_gbpages = 0;
|
|
|
-#else
|
|
|
- use_pse = cpu_has_pse;
|
|
|
- use_gbpages = direct_gbpages;
|
|
|
-#endif
|
|
|
-
|
|
|
- /* Enable PSE if available */
|
|
|
- if (cpu_has_pse)
|
|
|
- set_in_cr4(X86_CR4_PSE);
|
|
|
-
|
|
|
- /* Enable PGE if available */
|
|
|
- if (cpu_has_pge) {
|
|
|
- set_in_cr4(X86_CR4_PGE);
|
|
|
- __supported_pte_mask |= _PAGE_GLOBAL;
|
|
|
- }
|
|
|
-
|
|
|
- if (use_gbpages)
|
|
|
- page_size_mask |= 1 << PG_LEVEL_1G;
|
|
|
- if (use_pse)
|
|
|
- page_size_mask |= 1 << PG_LEVEL_2M;
|
|
|
-
|
|
|
memset(mr, 0, sizeof(mr));
|
|
|
nr_range = 0;
|
|
|
|