|
@@ -804,6 +804,7 @@ static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src
|
|
|
next = pmd_addr_end(addr, end);
|
|
|
if (pmd_trans_huge(*src_pmd)) {
|
|
|
int err;
|
|
|
+ VM_BUG_ON(next-addr != HPAGE_PMD_SIZE);
|
|
|
err = copy_huge_pmd(dst_mm, src_mm,
|
|
|
dst_pmd, src_pmd, addr, vma);
|
|
|
if (err == -ENOMEM)
|
|
@@ -1015,9 +1016,10 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
|
|
|
do {
|
|
|
next = pmd_addr_end(addr, end);
|
|
|
if (pmd_trans_huge(*pmd)) {
|
|
|
- if (next-addr != HPAGE_PMD_SIZE)
|
|
|
+ if (next-addr != HPAGE_PMD_SIZE) {
|
|
|
+ VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
|
|
|
split_huge_page_pmd(vma->vm_mm, pmd);
|
|
|
- else if (zap_huge_pmd(tlb, vma, pmd)) {
|
|
|
+ } else if (zap_huge_pmd(tlb, vma, pmd)) {
|
|
|
(*zap_work)--;
|
|
|
continue;
|
|
|
}
|