|
@@ -1047,7 +1047,8 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|
|
* readonly mappings. The tradeoff is that copy_page_range is more
|
|
|
* efficient than faulting.
|
|
|
*/
|
|
|
- if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
|
|
|
+ if (!(vma->vm_flags & (VM_HUGETLB | VM_NONLINEAR |
|
|
|
+ VM_PFNMAP | VM_MIXEDMAP))) {
|
|
|
if (!vma->anon_vma)
|
|
|
return 0;
|
|
|
}
|
|
@@ -2085,6 +2086,11 @@ out:
|
|
|
* ask for a shared writable mapping!
|
|
|
*
|
|
|
* The page does not need to be reserved.
|
|
|
+ *
|
|
|
+ * Usually this function is called from f_op->mmap() handler
|
|
|
+ * under mm->mmap_sem write-lock, so it can change vma->vm_flags.
|
|
|
+ * Caller must set VM_MIXEDMAP on vma if it wants to call this
|
|
|
+ * function from other places, for example from page-fault handler.
|
|
|
*/
|
|
|
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
|
|
|
struct page *page)
|
|
@@ -2093,7 +2099,11 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
|
|
|
return -EFAULT;
|
|
|
if (!page_count(page))
|
|
|
return -EINVAL;
|
|
|
- vma->vm_flags |= VM_INSERTPAGE;
|
|
|
+ if (!(vma->vm_flags & VM_MIXEDMAP)) {
|
|
|
+ BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
|
|
|
+ BUG_ON(vma->vm_flags & VM_PFNMAP);
|
|
|
+ vma->vm_flags |= VM_MIXEDMAP;
|
|
|
+ }
|
|
|
return insert_page(vma, addr, page, vma->vm_page_prot);
|
|
|
}
|
|
|
EXPORT_SYMBOL(vm_insert_page);
|