|
@@ -286,6 +286,12 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
|
|
|
|
|
|
spin_lock(&hugetlb_lock);
|
|
spin_lock(&hugetlb_lock);
|
|
if (page) {
|
|
if (page) {
|
|
|
|
+ /*
|
|
|
|
+ * This page is now managed by the hugetlb allocator and has
|
|
|
|
+ * no users -- drop the buddy allocator's reference.
|
|
|
|
+ */
|
|
|
|
+ put_page_testzero(page);
|
|
|
|
+ VM_BUG_ON(page_count(page));
|
|
nid = page_to_nid(page);
|
|
nid = page_to_nid(page);
|
|
set_compound_page_dtor(page, free_huge_page);
|
|
set_compound_page_dtor(page, free_huge_page);
|
|
/*
|
|
/*
|
|
@@ -369,13 +375,14 @@ free:
|
|
enqueue_huge_page(page);
|
|
enqueue_huge_page(page);
|
|
else {
|
|
else {
|
|
/*
|
|
/*
|
|
- * Decrement the refcount and free the page using its
|
|
|
|
- * destructor. This must be done with hugetlb_lock
|
|
|
|
|
|
+ * The page has a reference count of zero already, so
|
|
|
|
+ * call free_huge_page directly instead of using
|
|
|
|
+ * put_page. This must be done with hugetlb_lock
|
|
* unlocked which is safe because free_huge_page takes
|
|
* unlocked which is safe because free_huge_page takes
|
|
* hugetlb_lock before deciding how to free the page.
|
|
* hugetlb_lock before deciding how to free the page.
|
|
*/
|
|
*/
|
|
spin_unlock(&hugetlb_lock);
|
|
spin_unlock(&hugetlb_lock);
|
|
- put_page(page);
|
|
|
|
|
|
+ free_huge_page(page);
|
|
spin_lock(&hugetlb_lock);
|
|
spin_lock(&hugetlb_lock);
|
|
}
|
|
}
|
|
}
|
|
}
|