|
@@ -1771,12 +1771,9 @@ static void collapse_huge_page(struct mm_struct *mm,
|
|
|
|
|
|
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
|
|
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
|
|
#ifndef CONFIG_NUMA
|
|
#ifndef CONFIG_NUMA
|
|
|
|
+ up_read(&mm->mmap_sem);
|
|
VM_BUG_ON(!*hpage);
|
|
VM_BUG_ON(!*hpage);
|
|
new_page = *hpage;
|
|
new_page = *hpage;
|
|
- if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
|
|
|
|
- up_read(&mm->mmap_sem);
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
#else
|
|
#else
|
|
VM_BUG_ON(*hpage);
|
|
VM_BUG_ON(*hpage);
|
|
/*
|
|
/*
|
|
@@ -1791,22 +1788,26 @@ static void collapse_huge_page(struct mm_struct *mm,
|
|
*/
|
|
*/
|
|
new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
|
|
new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
|
|
node, __GFP_OTHER_NODE);
|
|
node, __GFP_OTHER_NODE);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * After allocating the hugepage, release the mmap_sem read lock in
|
|
|
|
+ * preparation for taking it in write mode.
|
|
|
|
+ */
|
|
|
|
+ up_read(&mm->mmap_sem);
|
|
if (unlikely(!new_page)) {
|
|
if (unlikely(!new_page)) {
|
|
- up_read(&mm->mmap_sem);
|
|
|
|
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
|
|
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
|
|
*hpage = ERR_PTR(-ENOMEM);
|
|
*hpage = ERR_PTR(-ENOMEM);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
+#endif
|
|
|
|
+
|
|
count_vm_event(THP_COLLAPSE_ALLOC);
|
|
count_vm_event(THP_COLLAPSE_ALLOC);
|
|
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
|
|
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
|
|
- up_read(&mm->mmap_sem);
|
|
|
|
|
|
+#ifdef CONFIG_NUMA
|
|
put_page(new_page);
|
|
put_page(new_page);
|
|
|
|
+#endif
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
-#endif
|
|
|
|
-
|
|
|
|
- /* after allocating the hugepage upgrade to mmap_sem write mode */
|
|
|
|
- up_read(&mm->mmap_sem);
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
* Prevent all access to pagetables with the exception of
|
|
* Prevent all access to pagetables with the exception of
|