|
@@ -196,9 +196,11 @@ RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3);
|
|
|
|
|
|
/* When we populate back during bootup, the amount of pages can vary. The
|
|
|
* max we have is seen is 395979, but that does not mean it can't be more.
|
|
|
- * But some machines can have 3GB I/O holes even. So lets reserve enough
|
|
|
- * for 4GB of I/O and E820 holes. */
|
|
|
-RESERVE_BRK(p2m_populated, PMD_SIZE * 4);
|
|
|
+ * Some machines can have 3GB I/O holes even. With early_can_reuse_p2m_middle
|
|
|
+ * it can re-use Xen provided mfn_list array, so we only need to allocate at
|
|
|
+ * most three P2M top nodes. */
|
|
|
+RESERVE_BRK(p2m_populated, PAGE_SIZE * 3);
|
|
|
+
|
|
|
static inline unsigned p2m_top_index(unsigned long pfn)
|
|
|
{
|
|
|
BUG_ON(pfn >= MAX_P2M_PFN);
|
|
@@ -575,12 +577,99 @@ static bool __init early_alloc_p2m(unsigned long pfn)
|
|
|
}
|
|
|
return true;
|
|
|
}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Skim over the P2M tree looking at pages that are either filled with
|
|
|
+ * INVALID_P2M_ENTRY or with 1:1 PFNs. If found, re-use that page and
|
|
|
+ * replace the P2M leaf with a p2m_missing or p2m_identity.
|
|
|
+ * Stick the old page in the new P2M tree location.
|
|
|
+ */
|
|
|
+bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_mfn)
|
|
|
+{
|
|
|
+ unsigned topidx;
|
|
|
+ unsigned mididx;
|
|
|
+ unsigned ident_pfns;
|
|
|
+ unsigned inv_pfns;
|
|
|
+ unsigned long *p2m;
|
|
|
+ unsigned long *mid_mfn_p;
|
|
|
+ unsigned idx;
|
|
|
+ unsigned long pfn;
|
|
|
+
|
|
|
+ /* We only look when this entails a P2M middle layer */
|
|
|
+ if (p2m_index(set_pfn))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ for (pfn = 0; pfn <= MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
|
|
|
+ topidx = p2m_top_index(pfn);
|
|
|
+
|
|
|
+ if (!p2m_top[topidx])
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (p2m_top[topidx] == p2m_mid_missing)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ mididx = p2m_mid_index(pfn);
|
|
|
+ p2m = p2m_top[topidx][mididx];
|
|
|
+ if (!p2m)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if ((p2m == p2m_missing) || (p2m == p2m_identity))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if ((unsigned long)p2m == INVALID_P2M_ENTRY)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ ident_pfns = 0;
|
|
|
+ inv_pfns = 0;
|
|
|
+ for (idx = 0; idx < P2M_PER_PAGE; idx++) {
|
|
|
+ /* IDENTITY_PFNs are 1:1 */
|
|
|
+ if (p2m[idx] == IDENTITY_FRAME(pfn + idx))
|
|
|
+ ident_pfns++;
|
|
|
+ else if (p2m[idx] == INVALID_P2M_ENTRY)
|
|
|
+ inv_pfns++;
|
|
|
+ else
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ if ((ident_pfns == P2M_PER_PAGE) || (inv_pfns == P2M_PER_PAGE))
|
|
|
+ goto found;
|
|
|
+ }
|
|
|
+ return false;
|
|
|
+found:
|
|
|
+ /* Found one, replace old with p2m_identity or p2m_missing */
|
|
|
+ p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing);
|
|
|
+ /* And the other for save/restore.. */
|
|
|
+ mid_mfn_p = p2m_top_mfn_p[topidx];
|
|
|
+ /* NOTE: Even if it is a p2m_identity it should still be point to
|
|
|
+ * a page filled with INVALID_P2M_ENTRY entries. */
|
|
|
+ mid_mfn_p[mididx] = virt_to_mfn(p2m_missing);
|
|
|
+
|
|
|
+ /* Reset where we want to stick the old page in. */
|
|
|
+ topidx = p2m_top_index(set_pfn);
|
|
|
+ mididx = p2m_mid_index(set_pfn);
|
|
|
+
|
|
|
+ /* This shouldn't happen */
|
|
|
+ if (WARN_ON(p2m_top[topidx] == p2m_mid_missing))
|
|
|
+ early_alloc_p2m(set_pfn);
|
|
|
+
|
|
|
+ if (WARN_ON(p2m_top[topidx][mididx] != p2m_missing))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ p2m_init(p2m);
|
|
|
+ p2m_top[topidx][mididx] = p2m;
|
|
|
+ mid_mfn_p = p2m_top_mfn_p[topidx];
|
|
|
+ mid_mfn_p[mididx] = virt_to_mfn(p2m);
|
|
|
+
|
|
|
+ return true;
|
|
|
+}
|
|
|
bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
|
|
|
{
|
|
|
if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
|
|
|
if (!early_alloc_p2m(pfn))
|
|
|
return false;
|
|
|
|
|
|
+ if (early_can_reuse_p2m_middle(pfn, mfn))
|
|
|
+ return __set_phys_to_machine(pfn, mfn);
|
|
|
+
|
|
|
if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/))
|
|
|
return false;
|
|
|
|