|
@@ -211,24 +211,29 @@ static void put_huge_zero_page(void)
|
|
|
BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
|
|
|
}
|
|
|
|
|
|
-static int shrink_huge_zero_page(struct shrinker *shrink,
|
|
|
- struct shrink_control *sc)
|
|
|
+static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
|
|
|
+ struct shrink_control *sc)
|
|
|
{
|
|
|
- if (!sc->nr_to_scan)
|
|
|
- /* we can free zero page only if last reference remains */
|
|
|
- return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
|
|
|
+ /* we can free zero page only if last reference remains */
|
|
|
+ return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
|
|
|
+}
|
|
|
|
|
|
+static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
|
|
|
+ struct shrink_control *sc)
|
|
|
+{
|
|
|
if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
|
|
|
struct page *zero_page = xchg(&huge_zero_page, NULL);
|
|
|
BUG_ON(zero_page == NULL);
|
|
|
__free_page(zero_page);
|
|
|
+ return HPAGE_PMD_NR;
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
static struct shrinker huge_zero_page_shrinker = {
|
|
|
- .shrink = shrink_huge_zero_page,
|
|
|
+ .count_objects = shrink_huge_zero_page_count,
|
|
|
+ .scan_objects = shrink_huge_zero_page_scan,
|
|
|
.seeks = DEFAULT_SEEKS,
|
|
|
};
|
|
|
|