|
@@ -671,6 +671,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
|
|
|
set_pmd_at(mm, haddr, pmd, entry);
|
|
|
prepare_pmd_huge_pte(pgtable, mm);
|
|
|
add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
|
|
|
+ mm->nr_ptes++;
|
|
|
spin_unlock(&mm->page_table_lock);
|
|
|
}
|
|
|
|
|
@@ -789,6 +790,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|
|
pmd = pmd_mkold(pmd_wrprotect(pmd));
|
|
|
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
|
|
|
prepare_pmd_huge_pte(pgtable, dst_mm);
|
|
|
+ dst_mm->nr_ptes++;
|
|
|
|
|
|
ret = 0;
|
|
|
out_unlock:
|
|
@@ -887,7 +889,6 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
|
|
|
}
|
|
|
kfree(pages);
|
|
|
|
|
|
- mm->nr_ptes++;
|
|
|
smp_wmb(); /* make pte visible before pmd */
|
|
|
pmd_populate(mm, pmd, pgtable);
|
|
|
page_remove_rmap(page);
|
|
@@ -1047,6 +1048,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|
|
VM_BUG_ON(page_mapcount(page) < 0);
|
|
|
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
|
|
|
VM_BUG_ON(!PageHead(page));
|
|
|
+ tlb->mm->nr_ptes--;
|
|
|
spin_unlock(&tlb->mm->page_table_lock);
|
|
|
tlb_remove_page(tlb, page);
|
|
|
pte_free(tlb->mm, pgtable);
|
|
@@ -1375,7 +1377,6 @@ static int __split_huge_page_map(struct page *page,
|
|
|
pte_unmap(pte);
|
|
|
}
|
|
|
|
|
|
- mm->nr_ptes++;
|
|
|
smp_wmb(); /* make pte visible before pmd */
|
|
|
/*
|
|
|
* Up to this point the pmd is present and huge and
|
|
@@ -1988,7 +1989,6 @@ static void collapse_huge_page(struct mm_struct *mm,
|
|
|
set_pmd_at(mm, address, pmd, _pmd);
|
|
|
update_mmu_cache(vma, address, _pmd);
|
|
|
prepare_pmd_huge_pte(pgtable, mm);
|
|
|
- mm->nr_ptes--;
|
|
|
spin_unlock(&mm->page_table_lock);
|
|
|
|
|
|
#ifndef CONFIG_NUMA
|