|
@@ -356,7 +356,7 @@ static void dentry_unlink_inode(struct dentry * dentry)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * dentry_lru_(add|del|prune|move_tail) must be called with d_lock held.
|
|
|
+ * dentry_lru_(add|del|move_list) must be called with d_lock held.
|
|
|
*/
|
|
|
static void dentry_lru_add(struct dentry *dentry)
|
|
|
{
|
|
@@ -373,16 +373,26 @@ static void dentry_lru_add(struct dentry *dentry)
|
|
|
static void __dentry_lru_del(struct dentry *dentry)
|
|
|
{
|
|
|
list_del_init(&dentry->d_lru);
|
|
|
- dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
|
|
|
+ dentry->d_flags &= ~DCACHE_LRU_LIST;
|
|
|
dentry->d_sb->s_nr_dentry_unused--;
|
|
|
this_cpu_dec(nr_dentry_unused);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
* Remove a dentry with references from the LRU.
|
|
|
+ *
|
|
|
+ * If we are on the shrink list, then we can get to try_prune_one_dentry() and
|
|
|
+ * lose our last reference through the parent walk. In this case, we need to
|
|
|
+ * remove ourselves from the shrink list, not the LRU.
|
|
|
*/
|
|
|
static void dentry_lru_del(struct dentry *dentry)
|
|
|
{
|
|
|
+ if (dentry->d_flags & DCACHE_SHRINK_LIST) {
|
|
|
+ list_del_init(&dentry->d_lru);
|
|
|
+ dentry->d_flags &= ~DCACHE_SHRINK_LIST;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
if (!list_empty(&dentry->d_lru)) {
|
|
|
spin_lock(&dentry->d_sb->s_dentry_lru_lock);
|
|
|
__dentry_lru_del(dentry);
|
|
@@ -392,14 +402,16 @@ static void dentry_lru_del(struct dentry *dentry)
|
|
|
|
|
|
static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
|
|
|
{
|
|
|
+ BUG_ON(dentry->d_flags & DCACHE_SHRINK_LIST);
|
|
|
+
|
|
|
spin_lock(&dentry->d_sb->s_dentry_lru_lock);
|
|
|
if (list_empty(&dentry->d_lru)) {
|
|
|
dentry->d_flags |= DCACHE_LRU_LIST;
|
|
|
list_add_tail(&dentry->d_lru, list);
|
|
|
- dentry->d_sb->s_nr_dentry_unused++;
|
|
|
- this_cpu_inc(nr_dentry_unused);
|
|
|
} else {
|
|
|
list_move_tail(&dentry->d_lru, list);
|
|
|
+ dentry->d_sb->s_nr_dentry_unused--;
|
|
|
+ this_cpu_dec(nr_dentry_unused);
|
|
|
}
|
|
|
spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
|
|
|
}
|
|
@@ -497,7 +509,8 @@ EXPORT_SYMBOL(d_drop);
|
|
|
* If ref is non-zero, then decrement the refcount too.
|
|
|
* Returns dentry requiring refcount drop, or NULL if we're done.
|
|
|
*/
|
|
|
-static inline struct dentry *dentry_kill(struct dentry *dentry)
|
|
|
+static inline struct dentry *
|
|
|
+dentry_kill(struct dentry *dentry, int unlock_on_failure)
|
|
|
__releases(dentry->d_lock)
|
|
|
{
|
|
|
struct inode *inode;
|
|
@@ -506,8 +519,10 @@ static inline struct dentry *dentry_kill(struct dentry *dentry)
|
|
|
inode = dentry->d_inode;
|
|
|
if (inode && !spin_trylock(&inode->i_lock)) {
|
|
|
relock:
|
|
|
- spin_unlock(&dentry->d_lock);
|
|
|
- cpu_relax();
|
|
|
+ if (unlock_on_failure) {
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
+ cpu_relax();
|
|
|
+ }
|
|
|
return dentry; /* try again with same dentry */
|
|
|
}
|
|
|
if (IS_ROOT(dentry))
|
|
@@ -590,7 +605,7 @@ repeat:
|
|
|
return;
|
|
|
|
|
|
kill_it:
|
|
|
- dentry = dentry_kill(dentry);
|
|
|
+ dentry = dentry_kill(dentry, 1);
|
|
|
if (dentry)
|
|
|
goto repeat;
|
|
|
}
|
|
@@ -810,12 +825,12 @@ EXPORT_SYMBOL(d_prune_aliases);
|
|
|
*
|
|
|
* This may fail if locks cannot be acquired no problem, just try again.
|
|
|
*/
|
|
|
-static void try_prune_one_dentry(struct dentry *dentry)
|
|
|
+static struct dentry * try_prune_one_dentry(struct dentry *dentry)
|
|
|
__releases(dentry->d_lock)
|
|
|
{
|
|
|
struct dentry *parent;
|
|
|
|
|
|
- parent = dentry_kill(dentry);
|
|
|
+ parent = dentry_kill(dentry, 0);
|
|
|
/*
|
|
|
* If dentry_kill returns NULL, we have nothing more to do.
|
|
|
* if it returns the same dentry, trylocks failed. In either
|
|
@@ -827,17 +842,18 @@ static void try_prune_one_dentry(struct dentry *dentry)
|
|
|
* fragmentation.
|
|
|
*/
|
|
|
if (!parent)
|
|
|
- return;
|
|
|
+ return NULL;
|
|
|
if (parent == dentry)
|
|
|
- return;
|
|
|
+ return dentry;
|
|
|
|
|
|
/* Prune ancestors. */
|
|
|
dentry = parent;
|
|
|
while (dentry) {
|
|
|
if (lockref_put_or_lock(&dentry->d_lockref))
|
|
|
- return;
|
|
|
- dentry = dentry_kill(dentry);
|
|
|
+ return NULL;
|
|
|
+ dentry = dentry_kill(dentry, 1);
|
|
|
}
|
|
|
+ return NULL;
|
|
|
}
|
|
|
|
|
|
static void shrink_dentry_list(struct list_head *list)
|
|
@@ -855,22 +871,32 @@ static void shrink_dentry_list(struct list_head *list)
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * The dispose list is isolated and dentries are not accounted
|
|
|
+ * to the LRU here, so we can simply remove it from the list
|
|
|
+ * here regardless of whether it is referenced or not.
|
|
|
+ */
|
|
|
+ list_del_init(&dentry->d_lru);
|
|
|
+ dentry->d_flags &= ~DCACHE_SHRINK_LIST;
|
|
|
+
|
|
|
/*
|
|
|
* We found an inuse dentry which was not removed from
|
|
|
- * the LRU because of laziness during lookup. Do not free
|
|
|
- * it - just keep it off the LRU list.
|
|
|
+ * the LRU because of laziness during lookup. Do not free it.
|
|
|
*/
|
|
|
if (dentry->d_lockref.count) {
|
|
|
- dentry_lru_del(dentry);
|
|
|
spin_unlock(&dentry->d_lock);
|
|
|
continue;
|
|
|
}
|
|
|
-
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
- try_prune_one_dentry(dentry);
|
|
|
+ dentry = try_prune_one_dentry(dentry);
|
|
|
|
|
|
rcu_read_lock();
|
|
|
+ if (dentry) {
|
|
|
+ dentry->d_flags |= DCACHE_SHRINK_LIST;
|
|
|
+ list_add(&dentry->d_lru, list);
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
+ }
|
|
|
}
|
|
|
rcu_read_unlock();
|
|
|
}
|
|
@@ -911,8 +937,10 @@ relock:
|
|
|
list_move(&dentry->d_lru, &referenced);
|
|
|
spin_unlock(&dentry->d_lock);
|
|
|
} else {
|
|
|
- list_move_tail(&dentry->d_lru, &tmp);
|
|
|
+ list_move(&dentry->d_lru, &tmp);
|
|
|
dentry->d_flags |= DCACHE_SHRINK_LIST;
|
|
|
+ this_cpu_dec(nr_dentry_unused);
|
|
|
+ sb->s_nr_dentry_unused--;
|
|
|
spin_unlock(&dentry->d_lock);
|
|
|
if (!--count)
|
|
|
break;
|
|
@@ -926,6 +954,27 @@ relock:
|
|
|
shrink_dentry_list(&tmp);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Mark all the dentries as on being the dispose list so we don't think they are
|
|
|
+ * still on the LRU if we try to kill them from ascending the parent chain in
|
|
|
+ * try_prune_one_dentry() rather than directly from the dispose list.
|
|
|
+ */
|
|
|
+static void
|
|
|
+shrink_dcache_list(
|
|
|
+ struct list_head *dispose)
|
|
|
+{
|
|
|
+ struct dentry *dentry;
|
|
|
+
|
|
|
+ rcu_read_lock();
|
|
|
+ list_for_each_entry_rcu(dentry, dispose, d_lru) {
|
|
|
+ spin_lock(&dentry->d_lock);
|
|
|
+ dentry->d_flags |= DCACHE_SHRINK_LIST;
|
|
|
+ spin_unlock(&dentry->d_lock);
|
|
|
+ }
|
|
|
+ rcu_read_unlock();
|
|
|
+ shrink_dentry_list(dispose);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* shrink_dcache_sb - shrink dcache for a superblock
|
|
|
* @sb: superblock
|
|
@@ -939,9 +988,17 @@ void shrink_dcache_sb(struct super_block *sb)
|
|
|
|
|
|
spin_lock(&sb->s_dentry_lru_lock);
|
|
|
while (!list_empty(&sb->s_dentry_lru)) {
|
|
|
+ /*
|
|
|
+ * account for removal here so we don't need to handle it later
|
|
|
+ * even though the dentry is no longer on the lru list.
|
|
|
+ */
|
|
|
list_splice_init(&sb->s_dentry_lru, &tmp);
|
|
|
+ this_cpu_sub(nr_dentry_unused, sb->s_nr_dentry_unused);
|
|
|
+ sb->s_nr_dentry_unused = 0;
|
|
|
spin_unlock(&sb->s_dentry_lru_lock);
|
|
|
- shrink_dentry_list(&tmp);
|
|
|
+
|
|
|
+ shrink_dcache_list(&tmp);
|
|
|
+
|
|
|
spin_lock(&sb->s_dentry_lru_lock);
|
|
|
}
|
|
|
spin_unlock(&sb->s_dentry_lru_lock);
|