|
@@ -1488,7 +1488,7 @@ static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
|
|
|
|
|
|
rcu_read_lock();
|
|
|
hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
|
|
|
- if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref))
|
|
|
+ if ((gl->gl_sbd == sdp) && atomic_inc_not_zero(&gl->gl_ref))
|
|
|
examiner(gl);
|
|
|
}
|
|
|
rcu_read_unlock();
|
|
@@ -1508,18 +1508,17 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
|
|
|
* thaw_glock - thaw out a glock which has an unprocessed reply waiting
|
|
|
* @gl: The glock to thaw
|
|
|
*
|
|
|
- * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
|
|
|
- * so this has to result in the ref count being dropped by one.
|
|
|
*/
|
|
|
|
|
|
static void thaw_glock(struct gfs2_glock *gl)
|
|
|
{
|
|
|
if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
|
|
|
- return;
|
|
|
+ goto out;
|
|
|
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
|
|
|
- gfs2_glock_hold(gl);
|
|
|
- if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
|
|
+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) {
|
|
|
+out:
|
|
|
gfs2_glock_put(gl);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1536,7 +1535,6 @@ static void clear_glock(struct gfs2_glock *gl)
|
|
|
if (gl->gl_state != LM_ST_UNLOCKED)
|
|
|
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
|
|
|
spin_unlock(&gl->gl_spin);
|
|
|
- gfs2_glock_hold(gl);
|
|
|
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
|
|
gfs2_glock_put(gl);
|
|
|
}
|