|
@@ -2237,6 +2237,12 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
+ (vma->vm_pgoff >> PAGE_SHIFT);
|
|
+ (vma->vm_pgoff >> PAGE_SHIFT);
|
|
mapping = (struct address_space *)page_private(page);
|
|
mapping = (struct address_space *)page_private(page);
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Take the mapping lock for the duration of the table walk. As
|
|
|
|
+ * this mapping should be shared between all the VMAs,
|
|
|
|
+ * __unmap_hugepage_range() is called as the lock is already held
|
|
|
|
+ */
|
|
|
|
+ spin_lock(&mapping->i_mmap_lock);
|
|
vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
|
|
vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
|
|
/* Do not unmap the current VMA */
|
|
/* Do not unmap the current VMA */
|
|
if (iter_vma == vma)
|
|
if (iter_vma == vma)
|
|
@@ -2250,10 +2256,11 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
* from the time of fork. This would look like data corruption
|
|
* from the time of fork. This would look like data corruption
|
|
*/
|
|
*/
|
|
if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
|
|
if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
|
|
- unmap_hugepage_range(iter_vma,
|
|
|
|
|
|
+ __unmap_hugepage_range(iter_vma,
|
|
address, address + huge_page_size(h),
|
|
address, address + huge_page_size(h),
|
|
page);
|
|
page);
|
|
}
|
|
}
|
|
|
|
+ spin_unlock(&mapping->i_mmap_lock);
|
|
|
|
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|