|
@@ -167,14 +167,19 @@ void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
|
|
|
spin_unlock(&lru_lock);
|
|
|
}
|
|
|
|
|
|
-static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
|
|
|
+static void __gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
|
|
|
{
|
|
|
- spin_lock(&lru_lock);
|
|
|
if (!list_empty(&gl->gl_lru)) {
|
|
|
list_del_init(&gl->gl_lru);
|
|
|
atomic_dec(&lru_count);
|
|
|
clear_bit(GLF_LRU, &gl->gl_flags);
|
|
|
}
|
|
|
+}
|
|
|
+
|
|
|
+static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
|
|
|
+{
|
|
|
+ spin_lock(&lru_lock);
|
|
|
+ __gfs2_glock_remove_from_lru(gl);
|
|
|
spin_unlock(&lru_lock);
|
|
|
}
|
|
|
|
|
@@ -217,11 +222,12 @@ void gfs2_glock_put(struct gfs2_glock *gl)
|
|
|
struct gfs2_sbd *sdp = gl->gl_sbd;
|
|
|
struct address_space *mapping = gfs2_glock2aspace(gl);
|
|
|
|
|
|
- if (atomic_dec_and_test(&gl->gl_ref)) {
|
|
|
+ if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
|
|
|
+ __gfs2_glock_remove_from_lru(gl);
|
|
|
+ spin_unlock(&lru_lock);
|
|
|
spin_lock_bucket(gl->gl_hash);
|
|
|
hlist_bl_del_rcu(&gl->gl_list);
|
|
|
spin_unlock_bucket(gl->gl_hash);
|
|
|
- gfs2_glock_remove_from_lru(gl);
|
|
|
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
|
|
|
GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
|
|
|
trace_gfs2_glock_put(gl);
|