|
@@ -2792,24 +2792,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
spinlock_t *ptl;
|
|
|
pte_t entry;
|
|
|
|
|
|
- if (check_stack_guard_page(vma, address) < 0) {
|
|
|
- pte_unmap(page_table);
|
|
|
+ pte_unmap(page_table);
|
|
|
+
|
|
|
+ /* Check if we need to add a guard page to the stack */
|
|
|
+ if (check_stack_guard_page(vma, address) < 0)
|
|
|
return VM_FAULT_SIGBUS;
|
|
|
- }
|
|
|
|
|
|
+ /* Use the zero-page for reads */
|
|
|
if (!(flags & FAULT_FLAG_WRITE)) {
|
|
|
entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
|
|
|
vma->vm_page_prot));
|
|
|
- ptl = pte_lockptr(mm, pmd);
|
|
|
- spin_lock(ptl);
|
|
|
+ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
|
|
|
if (!pte_none(*page_table))
|
|
|
goto unlock;
|
|
|
goto setpte;
|
|
|
}
|
|
|
|
|
|
/* Allocate our own private page. */
|
|
|
- pte_unmap(page_table);
|
|
|
-
|
|
|
if (unlikely(anon_vma_prepare(vma)))
|
|
|
goto oom;
|
|
|
page = alloc_zeroed_user_highpage_movable(vma, address);
|