|
@@ -811,15 +811,18 @@ static struct page *__follow_page(struct mm_struct *mm, unsigned long address,
|
|
|
pte = *ptep;
|
|
|
pte_unmap(ptep);
|
|
|
if (pte_present(pte)) {
|
|
|
- if (write && !pte_dirty(pte))
|
|
|
+ if (write && !pte_write(pte))
|
|
|
goto out;
|
|
|
if (read && !pte_read(pte))
|
|
|
goto out;
|
|
|
pfn = pte_pfn(pte);
|
|
|
if (pfn_valid(pfn)) {
|
|
|
page = pfn_to_page(pfn);
|
|
|
- if (accessed)
|
|
|
+ if (accessed) {
|
|
|
+ if (write && !pte_dirty(pte) &&!PageDirty(page))
|
|
|
+ set_page_dirty(page);
|
|
|
mark_page_accessed(page);
|
|
|
+ }
|
|
|
return page;
|
|
|
}
|
|
|
}
|
|
@@ -941,10 +944,11 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|
|
}
|
|
|
spin_lock(&mm->page_table_lock);
|
|
|
do {
|
|
|
+ int write_access = write;
|
|
|
struct page *page;
|
|
|
|
|
|
cond_resched_lock(&mm->page_table_lock);
|
|
|
- while (!(page = follow_page(mm, start, write))) {
|
|
|
+ while (!(page = follow_page(mm, start, write_access))) {
|
|
|
/*
|
|
|
* Shortcut for anonymous pages. We don't want
|
|
|
* to force the creation of pages tables for
|
|
@@ -957,7 +961,16 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|
|
break;
|
|
|
}
|
|
|
spin_unlock(&mm->page_table_lock);
|
|
|
- switch (handle_mm_fault(mm,vma,start,write)) {
|
|
|
+ switch (__handle_mm_fault(mm, vma, start,
|
|
|
+ write_access)) {
|
|
|
+ case VM_FAULT_WRITE:
|
|
|
+ /*
|
|
|
+ * do_wp_page has broken COW when
|
|
|
+ * necessary, even if maybe_mkwrite
|
|
|
+ * decided not to set pte_write
|
|
|
+ */
|
|
|
+ write_access = 0;
|
|
|
+ /* FALLTHRU */
|
|
|
case VM_FAULT_MINOR:
|
|
|
tsk->min_flt++;
|
|
|
break;
|
|
@@ -1220,6 +1233,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
|
|
|
struct page *old_page, *new_page;
|
|
|
unsigned long pfn = pte_pfn(pte);
|
|
|
pte_t entry;
|
|
|
+ int ret;
|
|
|
|
|
|
if (unlikely(!pfn_valid(pfn))) {
|
|
|
/*
|
|
@@ -1247,7 +1261,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
|
|
|
lazy_mmu_prot_update(entry);
|
|
|
pte_unmap(page_table);
|
|
|
spin_unlock(&mm->page_table_lock);
|
|
|
- return VM_FAULT_MINOR;
|
|
|
+ return VM_FAULT_MINOR|VM_FAULT_WRITE;
|
|
|
}
|
|
|
}
|
|
|
pte_unmap(page_table);
|
|
@@ -1274,6 +1288,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
|
|
|
/*
|
|
|
* Re-check the pte - we dropped the lock
|
|
|
*/
|
|
|
+ ret = VM_FAULT_MINOR;
|
|
|
spin_lock(&mm->page_table_lock);
|
|
|
page_table = pte_offset_map(pmd, address);
|
|
|
if (likely(pte_same(*page_table, pte))) {
|
|
@@ -1290,12 +1305,13 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
|
|
|
|
|
|
/* Free the old page.. */
|
|
|
new_page = old_page;
|
|
|
+ ret |= VM_FAULT_WRITE;
|
|
|
}
|
|
|
pte_unmap(page_table);
|
|
|
page_cache_release(new_page);
|
|
|
page_cache_release(old_page);
|
|
|
spin_unlock(&mm->page_table_lock);
|
|
|
- return VM_FAULT_MINOR;
|
|
|
+ return ret;
|
|
|
|
|
|
no_new_page:
|
|
|
page_cache_release(old_page);
|
|
@@ -1987,7 +2003,6 @@ static inline int handle_pte_fault(struct mm_struct *mm,
|
|
|
if (write_access) {
|
|
|
if (!pte_write(entry))
|
|
|
return do_wp_page(mm, vma, address, pte, pmd, entry);
|
|
|
-
|
|
|
entry = pte_mkdirty(entry);
|
|
|
}
|
|
|
entry = pte_mkyoung(entry);
|
|
@@ -2002,7 +2017,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
|
|
|
/*
|
|
|
* By the time we get here, we already hold the mm semaphore
|
|
|
*/
|
|
|
-int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
|
|
|
+int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
|
|
|
unsigned long address, int write_access)
|
|
|
{
|
|
|
pgd_t *pgd;
|