|
@@ -146,6 +146,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
|
|
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
|
do {
|
|
|
+ unsigned long this_pages;
|
|
|
+
|
|
|
next = pmd_addr_end(addr, end);
|
|
|
if (pmd_trans_huge(*pmd)) {
|
|
|
if (next - addr != HPAGE_PMD_SIZE)
|
|
@@ -165,8 +167,9 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
|
|
|
}
|
|
|
if (pmd_none_or_clear_bad(pmd))
|
|
|
continue;
|
|
|
- pages += change_pte_range(vma, pmd, addr, next, newprot,
|
|
|
+ this_pages = change_pte_range(vma, pmd, addr, next, newprot,
|
|
|
dirty_accountable, prot_numa, &all_same_nidpid);
|
|
|
+ pages += this_pages;
|
|
|
|
|
|
/*
|
|
|
* If we are changing protections for NUMA hinting faults then
|
|
@@ -174,7 +177,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
|
|
|
* node. This allows a regular PMD to be handled as one fault
|
|
|
* and effectively batches the taking of the PTL
|
|
|
*/
|
|
|
- if (prot_numa && all_same_nidpid)
|
|
|
+ if (prot_numa && this_pages && all_same_nidpid)
|
|
|
change_pmd_protnuma(vma->vm_mm, addr, pmd);
|
|
|
} while (pmd++, addr = next, addr != end);
|
|
|
|