|
@@ -2496,7 +2496,7 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
|
|
|
*/
|
|
|
static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
unsigned long address, pte_t *page_table, pmd_t *pmd,
|
|
|
- int write_access, pte_t orig_pte)
|
|
|
+ unsigned int flags, pte_t orig_pte)
|
|
|
{
|
|
|
spinlock_t *ptl;
|
|
|
struct page *page;
|
|
@@ -2572,9 +2572,9 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
|
|
|
inc_mm_counter(mm, anon_rss);
|
|
|
pte = mk_pte(page, vma->vm_page_prot);
|
|
|
- if (write_access && reuse_swap_page(page)) {
|
|
|
+ if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
|
|
|
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
|
|
|
- write_access = 0;
|
|
|
+ flags &= ~FAULT_FLAG_WRITE;
|
|
|
}
|
|
|
flush_icache_page(vma, page);
|
|
|
set_pte_at(mm, address, page_table, pte);
|
|
@@ -2587,7 +2587,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
try_to_free_swap(page);
|
|
|
unlock_page(page);
|
|
|
|
|
|
- if (write_access) {
|
|
|
+ if (flags & FAULT_FLAG_WRITE) {
|
|
|
ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
|
|
|
if (ret & VM_FAULT_ERROR)
|
|
|
ret &= VM_FAULT_ERROR;
|
|
@@ -2616,7 +2616,7 @@ out_page:
|
|
|
*/
|
|
|
static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
unsigned long address, pte_t *page_table, pmd_t *pmd,
|
|
|
- int write_access)
|
|
|
+ unsigned int flags)
|
|
|
{
|
|
|
struct page *page;
|
|
|
spinlock_t *ptl;
|
|
@@ -2776,7 +2776,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
* due to the bad i386 page protection. But it's valid
|
|
|
* for other architectures too.
|
|
|
*
|
|
|
- * Note that if write_access is true, we either now have
|
|
|
+ * Note that if FAULT_FLAG_WRITE is set, we either now have
|
|
|
* an exclusive copy of the page, or this is a shared mapping,
|
|
|
* so we can make it writable and dirty to avoid having to
|
|
|
* handle that later.
|
|
@@ -2847,11 +2847,10 @@ unwritable_page:
|
|
|
|
|
|
static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
unsigned long address, pte_t *page_table, pmd_t *pmd,
|
|
|
- int write_access, pte_t orig_pte)
|
|
|
+ unsigned int flags, pte_t orig_pte)
|
|
|
{
|
|
|
pgoff_t pgoff = (((address & PAGE_MASK)
|
|
|
- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
|
|
|
- unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
|
|
|
|
|
|
pte_unmap(page_table);
|
|
|
return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
|
|
@@ -2868,12 +2867,12 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
*/
|
|
|
static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
unsigned long address, pte_t *page_table, pmd_t *pmd,
|
|
|
- int write_access, pte_t orig_pte)
|
|
|
+ unsigned int flags, pte_t orig_pte)
|
|
|
{
|
|
|
- unsigned int flags = FAULT_FLAG_NONLINEAR |
|
|
|
- (write_access ? FAULT_FLAG_WRITE : 0);
|
|
|
pgoff_t pgoff;
|
|
|
|
|
|
+ flags |= FAULT_FLAG_NONLINEAR;
|
|
|
+
|
|
|
if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
|
|
|
return 0;
|
|
|
|
|
@@ -2904,7 +2903,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
*/
|
|
|
static inline int handle_pte_fault(struct mm_struct *mm,
|
|
|
struct vm_area_struct *vma, unsigned long address,
|
|
|
- pte_t *pte, pmd_t *pmd, int write_access)
|
|
|
+ pte_t *pte, pmd_t *pmd, unsigned int flags)
|
|
|
{
|
|
|
pte_t entry;
|
|
|
spinlock_t *ptl;
|
|
@@ -2915,30 +2914,30 @@ static inline int handle_pte_fault(struct mm_struct *mm,
|
|
|
if (vma->vm_ops) {
|
|
|
if (likely(vma->vm_ops->fault))
|
|
|
return do_linear_fault(mm, vma, address,
|
|
|
- pte, pmd, write_access, entry);
|
|
|
+ pte, pmd, flags, entry);
|
|
|
}
|
|
|
return do_anonymous_page(mm, vma, address,
|
|
|
- pte, pmd, write_access);
|
|
|
+ pte, pmd, flags);
|
|
|
}
|
|
|
if (pte_file(entry))
|
|
|
return do_nonlinear_fault(mm, vma, address,
|
|
|
- pte, pmd, write_access, entry);
|
|
|
+ pte, pmd, flags, entry);
|
|
|
return do_swap_page(mm, vma, address,
|
|
|
- pte, pmd, write_access, entry);
|
|
|
+ pte, pmd, flags, entry);
|
|
|
}
|
|
|
|
|
|
ptl = pte_lockptr(mm, pmd);
|
|
|
spin_lock(ptl);
|
|
|
if (unlikely(!pte_same(*pte, entry)))
|
|
|
goto unlock;
|
|
|
- if (write_access) {
|
|
|
+ if (flags & FAULT_FLAG_WRITE) {
|
|
|
if (!pte_write(entry))
|
|
|
return do_wp_page(mm, vma, address,
|
|
|
pte, pmd, ptl, entry);
|
|
|
entry = pte_mkdirty(entry);
|
|
|
}
|
|
|
entry = pte_mkyoung(entry);
|
|
|
- if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
|
|
|
+ if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
|
|
|
update_mmu_cache(vma, address, entry);
|
|
|
} else {
|
|
|
/*
|
|
@@ -2947,7 +2946,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
|
|
|
* This still avoids useless tlb flushes for .text page faults
|
|
|
* with threads.
|
|
|
*/
|
|
|
- if (write_access)
|
|
|
+ if (flags & FAULT_FLAG_WRITE)
|
|
|
flush_tlb_page(vma, address);
|
|
|
}
|
|
|
unlock:
|
|
@@ -2965,13 +2964,14 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
pud_t *pud;
|
|
|
pmd_t *pmd;
|
|
|
pte_t *pte;
|
|
|
+ unsigned int flags = write_access ? FAULT_FLAG_WRITE : 0;
|
|
|
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
|
|
count_vm_event(PGFAULT);
|
|
|
|
|
|
if (unlikely(is_vm_hugetlb_page(vma)))
|
|
|
- return hugetlb_fault(mm, vma, address, write_access);
|
|
|
+ return hugetlb_fault(mm, vma, address, flags);
|
|
|
|
|
|
pgd = pgd_offset(mm, address);
|
|
|
pud = pud_alloc(mm, pgd, address);
|
|
@@ -2984,7 +2984,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
if (!pte)
|
|
|
return VM_FAULT_OOM;
|
|
|
|
|
|
- return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
|
|
|
+ return handle_pte_fault(mm, vma, address, pte, pmd, flags);
|
|
|
}
|
|
|
|
|
|
#ifndef __PAGETABLE_PUD_FOLDED
|