|
@@ -1701,9 +1701,6 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
|
|
for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
|
|
for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
|
|
pte_t pte;
|
|
pte_t pte;
|
|
|
|
|
|
- if (pfn > max_pfn_mapped)
|
|
|
|
- max_pfn_mapped = pfn;
|
|
|
|
-
|
|
|
|
if (!pte_none(pte_page[pteidx]))
|
|
if (!pte_none(pte_page[pteidx]))
|
|
continue;
|
|
continue;
|
|
|
|
|
|
@@ -1761,6 +1758,12 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
|
|
pud_t *l3;
|
|
pud_t *l3;
|
|
pmd_t *l2;
|
|
pmd_t *l2;
|
|
|
|
|
|
|
|
+ /* max_pfn_mapped is the last pfn mapped in the initial memory
|
|
|
|
+ * mappings. Considering that on Xen after the kernel mappings we
|
|
|
|
+ * have the mappings of some pages that don't exist in pfn space, we
|
|
|
|
+ * set max_pfn_mapped to the last real pfn mapped. */
|
|
|
|
+ max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
|
|
|
|
+
|
|
/* Zap identity mapping */
|
|
/* Zap identity mapping */
|
|
init_level4_pgt[0] = __pgd(0);
|
|
init_level4_pgt[0] = __pgd(0);
|
|
|
|
|
|
@@ -1865,9 +1868,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
|
|
initial_kernel_pmd =
|
|
initial_kernel_pmd =
|
|
extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
|
|
extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
|
|
|
|
|
|
- max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
|
|
|
|
- xen_start_info->nr_pt_frames * PAGE_SIZE +
|
|
|
|
- 512*1024);
|
|
|
|
|
|
+ max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
|
|
|
|
|
|
kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
|
|
kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
|
|
memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
|
|
memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
|