|
@@ -1596,14 +1596,13 @@ void __khugepaged_exit(struct mm_struct *mm)
|
|
|
list_del(&mm_slot->mm_node);
|
|
|
free = 1;
|
|
|
}
|
|
|
+ spin_unlock(&khugepaged_mm_lock);
|
|
|
|
|
|
if (free) {
|
|
|
- spin_unlock(&khugepaged_mm_lock);
|
|
|
clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
|
|
|
free_mm_slot(mm_slot);
|
|
|
mmdrop(mm);
|
|
|
} else if (mm_slot) {
|
|
|
- spin_unlock(&khugepaged_mm_lock);
|
|
|
/*
|
|
|
* This is required to serialize against
|
|
|
* khugepaged_test_exit() (which is guaranteed to run
|
|
@@ -1614,8 +1613,7 @@ void __khugepaged_exit(struct mm_struct *mm)
|
|
|
*/
|
|
|
down_write(&mm->mmap_sem);
|
|
|
up_write(&mm->mmap_sem);
|
|
|
- } else
|
|
|
- spin_unlock(&khugepaged_mm_lock);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static void release_pte_page(struct page *page)
|