|
@@ -84,6 +84,7 @@
|
|
|
*/
|
|
|
DEFINE_SPINLOCK(xen_reservation_lock);
|
|
|
|
|
|
+#ifdef CONFIG_X86_32
|
|
|
/*
|
|
|
* Identity map, in addition to plain kernel map. This needs to be
|
|
|
* large enough to allocate page table pages to allocate the rest.
|
|
@@ -91,7 +92,7 @@ DEFINE_SPINLOCK(xen_reservation_lock);
|
|
|
*/
|
|
|
#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
|
|
|
static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
|
|
|
-
|
|
|
+#endif
|
|
|
#ifdef CONFIG_X86_64
|
|
|
/* l3 pud for userspace vsyscall mapping */
|
|
|
static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
|
|
@@ -1628,7 +1629,7 @@ static void set_page_prot(void *addr, pgprot_t prot)
|
|
|
if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
|
|
|
BUG();
|
|
|
}
|
|
|
-
|
|
|
+#ifdef CONFIG_X86_32
|
|
|
static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
|
|
|
{
|
|
|
unsigned pmdidx, pteidx;
|
|
@@ -1679,7 +1680,7 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
|
|
|
|
|
|
set_page_prot(pmd, PAGE_KERNEL_RO);
|
|
|
}
|
|
|
-
|
|
|
+#endif
|
|
|
void __init xen_setup_machphys_mapping(void)
|
|
|
{
|
|
|
struct xen_machphys_mapping mapping;
|
|
@@ -1765,14 +1766,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
|
|
|
/* Note that we don't do anything with level1_fixmap_pgt which
|
|
|
* we don't need. */
|
|
|
|
|
|
- /* Set up identity map */
|
|
|
- xen_map_identity_early(level2_ident_pgt, max_pfn);
|
|
|
-
|
|
|
/* Make pagetable pieces RO */
|
|
|
set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
|
|
|
set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
|
|
|
set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
|
|
|
set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
|
|
|
+ set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
|
|
|
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
|
|
|
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
|
|
|
|