|
@@ -202,6 +202,14 @@ void unregister_shrinker(struct shrinker *shrinker)
|
|
|
}
|
|
|
EXPORT_SYMBOL(unregister_shrinker);
|
|
|
|
|
|
+static inline int do_shrinker_shrink(struct shrinker *shrinker,
|
|
|
+ struct shrink_control *sc,
|
|
|
+ unsigned long nr_to_scan)
|
|
|
+{
|
|
|
+ sc->nr_to_scan = nr_to_scan;
|
|
|
+ return (*shrinker->shrink)(shrinker, sc);
|
|
|
+}
|
|
|
+
|
|
|
#define SHRINK_BATCH 128
|
|
|
/*
|
|
|
* Call the shrink functions to age shrinkable caches
|
|
@@ -223,15 +231,14 @@ EXPORT_SYMBOL(unregister_shrinker);
|
|
|
* Returns the number of slab objects which we shrunk.
|
|
|
*/
|
|
|
unsigned long shrink_slab(struct shrink_control *shrink,
|
|
|
+ unsigned long nr_pages_scanned,
|
|
|
unsigned long lru_pages)
|
|
|
{
|
|
|
struct shrinker *shrinker;
|
|
|
unsigned long ret = 0;
|
|
|
- unsigned long scanned = shrink->nr_scanned;
|
|
|
- gfp_t gfp_mask = shrink->gfp_mask;
|
|
|
|
|
|
- if (scanned == 0)
|
|
|
- scanned = SWAP_CLUSTER_MAX;
|
|
|
+ if (nr_pages_scanned == 0)
|
|
|
+ nr_pages_scanned = SWAP_CLUSTER_MAX;
|
|
|
|
|
|
if (!down_read_trylock(&shrinker_rwsem)) {
|
|
|
/* Assume we'll be able to shrink next time */
|
|
@@ -244,8 +251,8 @@ unsigned long shrink_slab(struct shrink_control *shrink,
|
|
|
unsigned long total_scan;
|
|
|
unsigned long max_pass;
|
|
|
|
|
|
- max_pass = (*shrinker->shrink)(shrinker, 0, gfp_mask);
|
|
|
- delta = (4 * scanned) / shrinker->seeks;
|
|
|
+ max_pass = do_shrinker_shrink(shrinker, shrink, 0);
|
|
|
+ delta = (4 * nr_pages_scanned) / shrinker->seeks;
|
|
|
delta *= max_pass;
|
|
|
do_div(delta, lru_pages + 1);
|
|
|
shrinker->nr += delta;
|
|
@@ -272,9 +279,9 @@ unsigned long shrink_slab(struct shrink_control *shrink,
|
|
|
int shrink_ret;
|
|
|
int nr_before;
|
|
|
|
|
|
- nr_before = (*shrinker->shrink)(shrinker, 0, gfp_mask);
|
|
|
- shrink_ret = (*shrinker->shrink)(shrinker, this_scan,
|
|
|
- gfp_mask);
|
|
|
+ nr_before = do_shrinker_shrink(shrinker, shrink, 0);
|
|
|
+ shrink_ret = do_shrinker_shrink(shrinker, shrink,
|
|
|
+ this_scan);
|
|
|
if (shrink_ret == -1)
|
|
|
break;
|
|
|
if (shrink_ret < nr_before)
|
|
@@ -2072,8 +2079,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
|
|
|
lru_pages += zone_reclaimable_pages(zone);
|
|
|
}
|
|
|
|
|
|
- shrink->nr_scanned = sc->nr_scanned;
|
|
|
- shrink_slab(shrink, lru_pages);
|
|
|
+ shrink_slab(shrink, sc->nr_scanned, lru_pages);
|
|
|
if (reclaim_state) {
|
|
|
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
|
|
|
reclaim_state->reclaimed_slab = 0;
|
|
@@ -2456,8 +2462,7 @@ loop_again:
|
|
|
end_zone, 0))
|
|
|
shrink_zone(priority, zone, &sc);
|
|
|
reclaim_state->reclaimed_slab = 0;
|
|
|
- shrink.nr_scanned = sc.nr_scanned;
|
|
|
- nr_slab = shrink_slab(&shrink, lru_pages);
|
|
|
+ nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
|
|
|
sc.nr_reclaimed += reclaim_state->reclaimed_slab;
|
|
|
total_scanned += sc.nr_scanned;
|
|
|
|
|
@@ -3025,7 +3030,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
|
|
|
}
|
|
|
|
|
|
nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
|
|
|
- shrink.nr_scanned = sc.nr_scanned;
|
|
|
if (nr_slab_pages0 > zone->min_slab_pages) {
|
|
|
/*
|
|
|
* shrink_slab() does not currently allow us to determine how
|
|
@@ -3041,7 +3045,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
|
|
|
unsigned long lru_pages = zone_reclaimable_pages(zone);
|
|
|
|
|
|
/* No reclaimable slab or very low memory pressure */
|
|
|
- if (!shrink_slab(&shrink, lru_pages))
|
|
|
+ if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
|
|
|
break;
|
|
|
|
|
|
/* Freed enough memory */
|