|
@@ -567,6 +567,7 @@ __acquires(&gl->gl_spin)
|
|
|
set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
|
|
|
do_error(gl, 0); /* Fail queued try locks */
|
|
|
}
|
|
|
+ gl->gl_req = target;
|
|
|
spin_unlock(&gl->gl_spin);
|
|
|
if (glops->go_xmote_th)
|
|
|
glops->go_xmote_th(gl);
|
|
@@ -1353,24 +1354,28 @@ static int gfs2_should_freeze(const struct gfs2_glock *gl)
|
|
|
* @gl: Pointer to the glock
|
|
|
* @ret: The return value from the dlm
|
|
|
*
|
|
|
+ * The gl_reply field is under the gl_spin lock so that it is ok
|
|
|
+ * to use a bitfield shared with other glock state fields.
|
|
|
*/
|
|
|
|
|
|
void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
|
|
|
{
|
|
|
struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
|
|
|
|
|
|
+ spin_lock(&gl->gl_spin);
|
|
|
gl->gl_reply = ret;
|
|
|
|
|
|
if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
|
|
|
- spin_lock(&gl->gl_spin);
|
|
|
if (gfs2_should_freeze(gl)) {
|
|
|
set_bit(GLF_FROZEN, &gl->gl_flags);
|
|
|
spin_unlock(&gl->gl_spin);
|
|
|
return;
|
|
|
}
|
|
|
- spin_unlock(&gl->gl_spin);
|
|
|
}
|
|
|
+
|
|
|
+ spin_unlock(&gl->gl_spin);
|
|
|
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
|
|
|
+ smp_wmb();
|
|
|
gfs2_glock_hold(gl);
|
|
|
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
|
|
gfs2_glock_put(gl);
|