|
@@ -135,11 +135,12 @@ void pmd_clear_bad(pmd_t *pmd)
|
|
|
* Note: this doesn't free the actual pages themselves. That
|
|
|
* has been handled earlier when unmapping all the memory regions.
|
|
|
*/
|
|
|
-static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
|
|
|
+static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
|
|
|
+ unsigned long addr)
|
|
|
{
|
|
|
pgtable_t token = pmd_pgtable(*pmd);
|
|
|
pmd_clear(pmd);
|
|
|
- pte_free_tlb(tlb, token);
|
|
|
+ pte_free_tlb(tlb, token, addr);
|
|
|
tlb->mm->nr_ptes--;
|
|
|
}
|
|
|
|
|
@@ -157,7 +158,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
|
|
|
next = pmd_addr_end(addr, end);
|
|
|
if (pmd_none_or_clear_bad(pmd))
|
|
|
continue;
|
|
|
- free_pte_range(tlb, pmd);
|
|
|
+ free_pte_range(tlb, pmd, addr);
|
|
|
} while (pmd++, addr = next, addr != end);
|
|
|
|
|
|
start &= PUD_MASK;
|
|
@@ -173,7 +174,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
|
|
|
|
|
|
pmd = pmd_offset(pud, start);
|
|
|
pud_clear(pud);
|
|
|
- pmd_free_tlb(tlb, pmd);
|
|
|
+ pmd_free_tlb(tlb, pmd, start);
|
|
|
}
|
|
|
|
|
|
static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
|
|
@@ -206,7 +207,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
|
|
|
|
|
|
pud = pud_offset(pgd, start);
|
|
|
pgd_clear(pgd);
|
|
|
- pud_free_tlb(tlb, pud);
|
|
|
+ pud_free_tlb(tlb, pud, start);
|
|
|
}
|
|
|
|
|
|
/*
|