|
@@ -176,22 +176,12 @@ static int
|
|
|
mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
|
|
|
{
|
|
|
LIST_HEAD(free_list);
|
|
|
- struct list_head *l, *ltmp;
|
|
|
+ struct mb_cache *cache;
|
|
|
+ struct mb_cache_entry *entry, *tmp;
|
|
|
int count = 0;
|
|
|
|
|
|
- spin_lock(&mb_cache_spinlock);
|
|
|
- list_for_each(l, &mb_cache_list) {
|
|
|
- struct mb_cache *cache =
|
|
|
- list_entry(l, struct mb_cache, c_cache_list);
|
|
|
- mb_debug("cache %s (%d)", cache->c_name,
|
|
|
- atomic_read(&cache->c_entry_count));
|
|
|
- count += atomic_read(&cache->c_entry_count);
|
|
|
- }
|
|
|
mb_debug("trying to free %d entries", nr_to_scan);
|
|
|
- if (nr_to_scan == 0) {
|
|
|
- spin_unlock(&mb_cache_spinlock);
|
|
|
- goto out;
|
|
|
- }
|
|
|
+ spin_lock(&mb_cache_spinlock);
|
|
|
while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
|
|
|
struct mb_cache_entry *ce =
|
|
|
list_entry(mb_cache_lru_list.next,
|
|
@@ -199,12 +189,15 @@ mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
|
|
|
list_move_tail(&ce->e_lru_list, &free_list);
|
|
|
__mb_cache_entry_unhash(ce);
|
|
|
}
|
|
|
+ list_for_each_entry(cache, &mb_cache_list, c_cache_list) {
|
|
|
+ mb_debug("cache %s (%d)", cache->c_name,
|
|
|
+ atomic_read(&cache->c_entry_count));
|
|
|
+ count += atomic_read(&cache->c_entry_count);
|
|
|
+ }
|
|
|
spin_unlock(&mb_cache_spinlock);
|
|
|
- list_for_each_safe(l, ltmp, &free_list) {
|
|
|
- __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
|
|
|
- e_lru_list), gfp_mask);
|
|
|
+ list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) {
|
|
|
+ __mb_cache_entry_forget(entry, gfp_mask);
|
|
|
}
|
|
|
-out:
|
|
|
return (count / 100) * sysctl_vfs_cache_pressure;
|
|
|
}
|
|
|
|