|
@@ -2515,21 +2515,18 @@ retry:
|
|
|
hugepage_add_new_anon_rmap(page, vma, address);
|
|
|
}
|
|
|
} else {
|
|
|
+ /*
|
|
|
+ * If memory error occurs between mmap() and fault, some process
|
|
|
+ * don't have hwpoisoned swap entry for errored virtual address.
|
|
|
+ * So we need to block hugepage fault by PG_hwpoison bit check.
|
|
|
+ */
|
|
|
+ if (unlikely(PageHWPoison(page))) {
|
|
|
+ ret = VM_FAULT_HWPOISON;
|
|
|
+ goto backout_unlocked;
|
|
|
+ }
|
|
|
page_dup_rmap(page);
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * Since memory error handler replaces pte into hwpoison swap entry
|
|
|
- * at the time of error handling, a process which reserved but not have
|
|
|
- * the mapping to the error hugepage does not have hwpoison swap entry.
|
|
|
- * So we need to block accesses from such a process by checking
|
|
|
- * PG_hwpoison bit here.
|
|
|
- */
|
|
|
- if (unlikely(PageHWPoison(page))) {
|
|
|
- ret = VM_FAULT_HWPOISON;
|
|
|
- goto backout_unlocked;
|
|
|
- }
|
|
|
-
|
|
|
/*
|
|
|
* If we are going to COW a private mapping later, we examine the
|
|
|
* pending reservations for this page now. This will ensure that
|