|
@@ -372,11 +372,19 @@ retry:
|
|
|
resv_huge_pages += delta;
|
|
|
ret = 0;
|
|
|
free:
|
|
|
+ /* Free the needed pages to the hugetlb pool */
|
|
|
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
|
|
|
+ if ((--needed) < 0)
|
|
|
+ break;
|
|
|
list_del(&page->lru);
|
|
|
- if ((--needed) >= 0)
|
|
|
- enqueue_huge_page(page);
|
|
|
- else {
|
|
|
+ enqueue_huge_page(page);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Free unnecessary surplus pages to the buddy allocator */
|
|
|
+ if (!list_empty(&surplus_list)) {
|
|
|
+ spin_unlock(&hugetlb_lock);
|
|
|
+ list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
|
|
|
+ list_del(&page->lru);
|
|
|
/*
|
|
|
* The page has a reference count of zero already, so
|
|
|
* call free_huge_page directly instead of using
|
|
@@ -384,10 +392,9 @@ free:
|
|
|
* unlocked which is safe because free_huge_page takes
|
|
|
* hugetlb_lock before deciding how to free the page.
|
|
|
*/
|
|
|
- spin_unlock(&hugetlb_lock);
|
|
|
free_huge_page(page);
|
|
|
- spin_lock(&hugetlb_lock);
|
|
|
}
|
|
|
+ spin_lock(&hugetlb_lock);
|
|
|
}
|
|
|
|
|
|
return ret;
|