|
@@ -931,13 +931,15 @@ static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
|
|
|
struct ext4_inode_info *ei;
|
|
|
struct list_head *cur, *tmp;
|
|
|
LIST_HEAD(skipped);
|
|
|
- int ret, nr_shrunk = 0;
|
|
|
+ int nr_shrunk = 0;
|
|
|
int retried = 0, skip_precached = 1, nr_skipped = 0;
|
|
|
|
|
|
spin_lock(&sbi->s_es_lru_lock);
|
|
|
|
|
|
retry:
|
|
|
list_for_each_safe(cur, tmp, &sbi->s_es_lru) {
|
|
|
+ int shrunk;
|
|
|
+
|
|
|
/*
|
|
|
* If we have already reclaimed all extents from extent
|
|
|
* status tree, just stop the loop immediately.
|
|
@@ -964,13 +966,13 @@ retry:
|
|
|
continue;
|
|
|
|
|
|
write_lock(&ei->i_es_lock);
|
|
|
- ret = __es_try_to_reclaim_extents(ei, nr_to_scan);
|
|
|
+ shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan);
|
|
|
if (ei->i_es_lru_nr == 0)
|
|
|
list_del_init(&ei->i_es_lru);
|
|
|
write_unlock(&ei->i_es_lock);
|
|
|
|
|
|
- nr_shrunk += ret;
|
|
|
- nr_to_scan -= ret;
|
|
|
+ nr_shrunk += shrunk;
|
|
|
+ nr_to_scan -= shrunk;
|
|
|
if (nr_to_scan == 0)
|
|
|
break;
|
|
|
}
|
|
@@ -1007,7 +1009,20 @@ retry:
|
|
|
return nr_shrunk;
|
|
|
}
|
|
|
|
|
|
-static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
|
|
|
+static unsigned long ext4_es_count(struct shrinker *shrink,
|
|
|
+ struct shrink_control *sc)
|
|
|
+{
|
|
|
+ unsigned long nr;
|
|
|
+ struct ext4_sb_info *sbi;
|
|
|
+
|
|
|
+ sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker);
|
|
|
+ nr = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
|
|
|
+ trace_ext4_es_shrink_enter(sbi->s_sb, sc->nr_to_scan, nr);
|
|
|
+ return nr;
|
|
|
+}
|
|
|
+
|
|
|
+static unsigned long ext4_es_scan(struct shrinker *shrink,
|
|
|
+ struct shrink_control *sc)
|
|
|
{
|
|
|
struct ext4_sb_info *sbi = container_of(shrink,
|
|
|
struct ext4_sb_info, s_es_shrinker);
|
|
@@ -1022,9 +1037,8 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
|
|
|
|
|
|
nr_shrunk = __ext4_es_shrink(sbi, nr_to_scan, NULL);
|
|
|
|
|
|
- ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
|
|
|
trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret);
|
|
|
- return ret;
|
|
|
+ return nr_shrunk;
|
|
|
}
|
|
|
|
|
|
void ext4_es_register_shrinker(struct ext4_sb_info *sbi)
|
|
@@ -1032,7 +1046,8 @@ void ext4_es_register_shrinker(struct ext4_sb_info *sbi)
|
|
|
INIT_LIST_HEAD(&sbi->s_es_lru);
|
|
|
spin_lock_init(&sbi->s_es_lru_lock);
|
|
|
sbi->s_es_last_sorted = 0;
|
|
|
- sbi->s_es_shrinker.shrink = ext4_es_shrink;
|
|
|
+ sbi->s_es_shrinker.scan_objects = ext4_es_scan;
|
|
|
+ sbi->s_es_shrinker.count_objects = ext4_es_count;
|
|
|
sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
|
|
|
register_shrinker(&sbi->s_es_shrinker);
|
|
|
}
|
|
@@ -1076,7 +1091,7 @@ static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
|
|
|
struct ext4_es_tree *tree = &ei->i_es_tree;
|
|
|
struct rb_node *node;
|
|
|
struct extent_status *es;
|
|
|
- int nr_shrunk = 0;
|
|
|
+ unsigned long nr_shrunk = 0;
|
|
|
static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
|
|
|
DEFAULT_RATELIMIT_BURST);
|
|
|
|