|
@@ -2293,6 +2293,9 @@ retry_avoidcopy:
|
|
|
outside_reserve = 1;
|
|
|
|
|
|
page_cache_get(old_page);
|
|
|
+
|
|
|
+ /* Drop page_table_lock as buddy allocator may be called */
|
|
|
+ spin_unlock(&mm->page_table_lock);
|
|
|
new_page = alloc_huge_page(vma, address, outside_reserve);
|
|
|
|
|
|
if (IS_ERR(new_page)) {
|
|
@@ -2310,19 +2313,25 @@ retry_avoidcopy:
|
|
|
if (unmap_ref_private(mm, vma, old_page, address)) {
|
|
|
BUG_ON(page_count(old_page) != 1);
|
|
|
BUG_ON(huge_pte_none(pte));
|
|
|
+ spin_lock(&mm->page_table_lock);
|
|
|
goto retry_avoidcopy;
|
|
|
}
|
|
|
WARN_ON_ONCE(1);
|
|
|
}
|
|
|
|
|
|
+ /* Caller expects lock to be held */
|
|
|
+ spin_lock(&mm->page_table_lock);
|
|
|
return -PTR_ERR(new_page);
|
|
|
}
|
|
|
|
|
|
- spin_unlock(&mm->page_table_lock);
|
|
|
copy_huge_page(new_page, old_page, address, vma);
|
|
|
__SetPageUptodate(new_page);
|
|
|
- spin_lock(&mm->page_table_lock);
|
|
|
|
|
|
+ /*
|
|
|
+ * Retake the page_table_lock to check for racing updates
|
|
|
+ * before the page tables are altered
|
|
|
+ */
|
|
|
+ spin_lock(&mm->page_table_lock);
|
|
|
ptep = huge_pte_offset(mm, address & huge_page_mask(h));
|
|
|
if (likely(pte_same(huge_ptep_get(ptep), pte))) {
|
|
|
/* Break COW */
|