|
@@ -145,14 +145,9 @@ static int demote_ok(const struct gfs2_glock *gl)
|
|
|
{
|
|
|
const struct gfs2_glock_operations *glops = gl->gl_ops;
|
|
|
|
|
|
- /* assert_spin_locked(&gl->gl_spin); */
|
|
|
-
|
|
|
if (gl->gl_state == LM_ST_UNLOCKED)
|
|
|
return 0;
|
|
|
- if (test_bit(GLF_LFLUSH, &gl->gl_flags))
|
|
|
- return 0;
|
|
|
- if ((gl->gl_name.ln_type != LM_TYPE_INODE) &&
|
|
|
- !list_empty(&gl->gl_holders))
|
|
|
+ if (!list_empty(&gl->gl_holders))
|
|
|
return 0;
|
|
|
if (glops->go_demote_ok)
|
|
|
return glops->go_demote_ok(gl);
|
|
@@ -174,6 +169,17 @@ void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
|
|
|
spin_unlock(&lru_lock);
|
|
|
}
|
|
|
|
|
|
+static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
|
|
|
+{
|
|
|
+ spin_lock(&lru_lock);
|
|
|
+ if (!list_empty(&gl->gl_lru)) {
|
|
|
+ list_del_init(&gl->gl_lru);
|
|
|
+ atomic_dec(&lru_count);
|
|
|
+ clear_bit(GLF_LRU, &gl->gl_flags);
|
|
|
+ }
|
|
|
+ spin_unlock(&lru_lock);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
|
|
|
* @gl: the glock
|
|
@@ -217,12 +223,7 @@ void gfs2_glock_put(struct gfs2_glock *gl)
|
|
|
spin_lock_bucket(gl->gl_hash);
|
|
|
hlist_bl_del_rcu(&gl->gl_list);
|
|
|
spin_unlock_bucket(gl->gl_hash);
|
|
|
- spin_lock(&lru_lock);
|
|
|
- if (!list_empty(&gl->gl_lru)) {
|
|
|
- list_del_init(&gl->gl_lru);
|
|
|
- atomic_dec(&lru_count);
|
|
|
- }
|
|
|
- spin_unlock(&lru_lock);
|
|
|
+ gfs2_glock_remove_from_lru(gl);
|
|
|
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
|
|
|
GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
|
|
|
trace_gfs2_glock_put(gl);
|
|
@@ -1025,6 +1026,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
|
|
|
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
|
|
return -EIO;
|
|
|
|
|
|
+ if (test_bit(GLF_LRU, &gl->gl_flags))
|
|
|
+ gfs2_glock_remove_from_lru(gl);
|
|
|
+
|
|
|
spin_lock(&gl->gl_spin);
|
|
|
add_to_queue(gh);
|
|
|
if ((LM_FLAG_NOEXP & gh->gh_flags) &&
|
|
@@ -1082,7 +1086,8 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
|
|
|
!test_bit(GLF_DEMOTE, &gl->gl_flags))
|
|
|
fast_path = 1;
|
|
|
}
|
|
|
- __gfs2_glock_schedule_for_reclaim(gl);
|
|
|
+ if (!test_bit(GLF_LFLUSH, &gl->gl_flags))
|
|
|
+ __gfs2_glock_schedule_for_reclaim(gl);
|
|
|
trace_gfs2_glock_queue(gh, 0);
|
|
|
spin_unlock(&gl->gl_spin);
|
|
|
if (likely(fast_path))
|
|
@@ -1461,12 +1466,7 @@ static void thaw_glock(struct gfs2_glock *gl)
|
|
|
|
|
|
static void clear_glock(struct gfs2_glock *gl)
|
|
|
{
|
|
|
- spin_lock(&lru_lock);
|
|
|
- if (!list_empty(&gl->gl_lru)) {
|
|
|
- list_del_init(&gl->gl_lru);
|
|
|
- atomic_dec(&lru_count);
|
|
|
- }
|
|
|
- spin_unlock(&lru_lock);
|
|
|
+ gfs2_glock_remove_from_lru(gl);
|
|
|
|
|
|
spin_lock(&gl->gl_spin);
|
|
|
if (gl->gl_state != LM_ST_UNLOCKED)
|
|
@@ -1666,7 +1666,7 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
|
|
|
dtime *= 1000000/HZ; /* demote time in uSec */
|
|
|
if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
|
|
|
dtime = 0;
|
|
|
- gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n",
|
|
|
+ gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d\n",
|
|
|
state2str(gl->gl_state),
|
|
|
gl->gl_name.ln_type,
|
|
|
(unsigned long long)gl->gl_name.ln_number,
|
|
@@ -1674,6 +1674,7 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
|
|
|
state2str(gl->gl_target),
|
|
|
state2str(gl->gl_demote_state), dtime,
|
|
|
atomic_read(&gl->gl_ail_count),
|
|
|
+ atomic_read(&gl->gl_revokes),
|
|
|
atomic_read(&gl->gl_ref));
|
|
|
|
|
|
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
|