|
@@ -3093,14 +3093,34 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
pte_t *page_table;
|
|
|
spinlock_t *ptl;
|
|
|
struct page *page;
|
|
|
+ struct page *cow_page;
|
|
|
pte_t entry;
|
|
|
int anon = 0;
|
|
|
- int charged = 0;
|
|
|
struct page *dirty_page = NULL;
|
|
|
struct vm_fault vmf;
|
|
|
int ret;
|
|
|
int page_mkwrite = 0;
|
|
|
|
|
|
+ /*
|
|
|
+ * If we do COW later, allocate page befor taking lock_page()
|
|
|
+ * on the file cache page. This will reduce lock holding time.
|
|
|
+ */
|
|
|
+ if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
|
|
|
+
|
|
|
+ if (unlikely(anon_vma_prepare(vma)))
|
|
|
+ return VM_FAULT_OOM;
|
|
|
+
|
|
|
+ cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
|
|
|
+ if (!cow_page)
|
|
|
+ return VM_FAULT_OOM;
|
|
|
+
|
|
|
+ if (mem_cgroup_newpage_charge(cow_page, mm, GFP_KERNEL)) {
|
|
|
+ page_cache_release(cow_page);
|
|
|
+ return VM_FAULT_OOM;
|
|
|
+ }
|
|
|
+ } else
|
|
|
+ cow_page = NULL;
|
|
|
+
|
|
|
vmf.virtual_address = (void __user *)(address & PAGE_MASK);
|
|
|
vmf.pgoff = pgoff;
|
|
|
vmf.flags = flags;
|
|
@@ -3109,12 +3129,13 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
ret = vma->vm_ops->fault(vma, &vmf);
|
|
|
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
|
|
|
VM_FAULT_RETRY)))
|
|
|
- return ret;
|
|
|
+ goto uncharge_out;
|
|
|
|
|
|
if (unlikely(PageHWPoison(vmf.page))) {
|
|
|
if (ret & VM_FAULT_LOCKED)
|
|
|
unlock_page(vmf.page);
|
|
|
- return VM_FAULT_HWPOISON;
|
|
|
+ ret = VM_FAULT_HWPOISON;
|
|
|
+ goto uncharge_out;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -3132,23 +3153,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
page = vmf.page;
|
|
|
if (flags & FAULT_FLAG_WRITE) {
|
|
|
if (!(vma->vm_flags & VM_SHARED)) {
|
|
|
+ page = cow_page;
|
|
|
anon = 1;
|
|
|
- if (unlikely(anon_vma_prepare(vma))) {
|
|
|
- ret = VM_FAULT_OOM;
|
|
|
- goto out;
|
|
|
- }
|
|
|
- page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
|
|
|
- vma, address);
|
|
|
- if (!page) {
|
|
|
- ret = VM_FAULT_OOM;
|
|
|
- goto out;
|
|
|
- }
|
|
|
- if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
|
|
|
- ret = VM_FAULT_OOM;
|
|
|
- page_cache_release(page);
|
|
|
- goto out;
|
|
|
- }
|
|
|
- charged = 1;
|
|
|
copy_user_highpage(page, vmf.page, address, vma);
|
|
|
__SetPageUptodate(page);
|
|
|
} else {
|
|
@@ -3217,8 +3223,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
/* no need to invalidate: a not-present page won't be cached */
|
|
|
update_mmu_cache(vma, address, page_table);
|
|
|
} else {
|
|
|
- if (charged)
|
|
|
- mem_cgroup_uncharge_page(page);
|
|
|
+ if (cow_page)
|
|
|
+ mem_cgroup_uncharge_page(cow_page);
|
|
|
if (anon)
|
|
|
page_cache_release(page);
|
|
|
else
|
|
@@ -3227,7 +3233,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
|
|
|
pte_unmap_unlock(page_table, ptl);
|
|
|
|
|
|
-out:
|
|
|
if (dirty_page) {
|
|
|
struct address_space *mapping = page->mapping;
|
|
|
|
|
@@ -3257,6 +3262,13 @@ out:
|
|
|
unwritable_page:
|
|
|
page_cache_release(page);
|
|
|
return ret;
|
|
|
+uncharge_out:
|
|
|
+ /* fs's fault handler get error */
|
|
|
+ if (cow_page) {
|
|
|
+ mem_cgroup_uncharge_page(cow_page);
|
|
|
+ page_cache_release(cow_page);
|
|
|
+ }
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|