|
@@ -1291,6 +1291,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
goto out_unlock;
|
|
|
|
|
|
page = pmd_page(pmd);
|
|
|
+ BUG_ON(is_huge_zero_page(page));
|
|
|
page_nid = page_to_nid(page);
|
|
|
count_vm_numa_event(NUMA_HINT_FAULTS);
|
|
|
if (page_nid == this_nid)
|
|
@@ -1481,8 +1482,15 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
|
|
} else {
|
|
|
struct page *page = pmd_page(*pmd);
|
|
|
|
|
|
- /* only check non-shared pages */
|
|
|
+ /*
|
|
|
+ * Only check non-shared pages. Do not trap faults
|
|
|
+ * against the zero page. The read-only data is likely
|
|
|
+ * to be read-cached on the local CPU cache and it is
|
|
|
+ * less useful to know about local vs remote hits on
|
|
|
+ * the zero page.
|
|
|
+ */
|
|
|
if (page_mapcount(page) == 1 &&
|
|
|
+ !is_huge_zero_page(page) &&
|
|
|
!pmd_numa(*pmd)) {
|
|
|
entry = pmdp_get_and_clear(mm, addr, pmd);
|
|
|
entry = pmd_mknuma(entry);
|