|
@@ -2322,13 +2322,16 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
|
|
struct vm_area_struct *new_vma, *prev;
|
|
struct vm_area_struct *new_vma, *prev;
|
|
struct rb_node **rb_link, *rb_parent;
|
|
struct rb_node **rb_link, *rb_parent;
|
|
struct mempolicy *pol;
|
|
struct mempolicy *pol;
|
|
|
|
+ bool faulted_in_anon_vma = true;
|
|
|
|
|
|
/*
|
|
/*
|
|
* If anonymous vma has not yet been faulted, update new pgoff
|
|
* If anonymous vma has not yet been faulted, update new pgoff
|
|
* to match new location, to increase its chance of merging.
|
|
* to match new location, to increase its chance of merging.
|
|
*/
|
|
*/
|
|
- if (!vma->vm_file && !vma->anon_vma)
|
|
|
|
|
|
+ if (unlikely(!vma->vm_file && !vma->anon_vma)) {
|
|
pgoff = addr >> PAGE_SHIFT;
|
|
pgoff = addr >> PAGE_SHIFT;
|
|
|
|
+ faulted_in_anon_vma = false;
|
|
|
|
+ }
|
|
|
|
|
|
find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
|
|
find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
|
|
new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
|
|
new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
|
|
@@ -2337,9 +2340,24 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
|
|
/*
|
|
/*
|
|
* Source vma may have been merged into new_vma
|
|
* Source vma may have been merged into new_vma
|
|
*/
|
|
*/
|
|
- if (vma_start >= new_vma->vm_start &&
|
|
|
|
- vma_start < new_vma->vm_end)
|
|
|
|
|
|
+ if (unlikely(vma_start >= new_vma->vm_start &&
|
|
|
|
+ vma_start < new_vma->vm_end)) {
|
|
|
|
+ /*
|
|
|
|
+ * The only way we can get a vma_merge with
|
|
|
|
+ * self during an mremap is if the vma hasn't
|
|
|
|
+ * been faulted in yet and we were allowed to
|
|
|
|
+ * reset the dst vma->vm_pgoff to the
|
|
|
|
+ * destination address of the mremap to allow
|
|
|
|
+ * the merge to happen. mremap must change the
|
|
|
|
+ * vm_pgoff linearity between src and dst vmas
|
|
|
|
+ * (in turn preventing a vma_merge) to be
|
|
|
|
+ * safe. It is only safe to keep the vm_pgoff
|
|
|
|
+ * linear if there are no pages mapped yet.
|
|
|
|
+ */
|
|
|
|
+ VM_BUG_ON(faulted_in_anon_vma);
|
|
*vmap = new_vma;
|
|
*vmap = new_vma;
|
|
|
|
+ } else
|
|
|
|
+ anon_vma_moveto_tail(new_vma);
|
|
} else {
|
|
} else {
|
|
new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
|
new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
|
|
if (new_vma) {
|
|
if (new_vma) {
|