|
@@ -541,21 +541,6 @@ out_locked:
|
|
|
spin_unlock(&gl->gl_spin);
|
|
|
}
|
|
|
|
|
|
-static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
|
|
|
- unsigned int req_state,
|
|
|
- unsigned int flags)
|
|
|
-{
|
|
|
- int ret = LM_OUT_ERROR;
|
|
|
-
|
|
|
- if (!sdp->sd_lockstruct.ls_ops->lm_lock)
|
|
|
- return req_state == LM_ST_UNLOCKED ? 0 : req_state;
|
|
|
-
|
|
|
- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
|
|
- ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock,
|
|
|
- req_state, flags);
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* do_xmote - Calls the DLM to change the state of a lock
|
|
|
* @gl: The lock state
|
|
@@ -575,13 +560,14 @@ __acquires(&gl->gl_spin)
|
|
|
|
|
|
lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
|
|
|
LM_FLAG_PRIORITY);
|
|
|
- BUG_ON(gl->gl_state == target);
|
|
|
- BUG_ON(gl->gl_state == gl->gl_target);
|
|
|
+ GLOCK_BUG_ON(gl, gl->gl_state == target);
|
|
|
+ GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
|
|
|
if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
|
|
|
glops->go_inval) {
|
|
|
set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
|
|
|
do_error(gl, 0); /* Fail queued try locks */
|
|
|
}
|
|
|
+ gl->gl_req = target;
|
|
|
spin_unlock(&gl->gl_spin);
|
|
|
if (glops->go_xmote_th)
|
|
|
glops->go_xmote_th(gl);
|
|
@@ -594,15 +580,17 @@ __acquires(&gl->gl_spin)
|
|
|
gl->gl_state == LM_ST_DEFERRED) &&
|
|
|
!(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
|
|
|
lck_flags |= LM_FLAG_TRY_1CB;
|
|
|
- ret = gfs2_lm_lock(sdp, gl, target, lck_flags);
|
|
|
|
|
|
- if (!(ret & LM_OUT_ASYNC)) {
|
|
|
- finish_xmote(gl, ret);
|
|
|
+ if (sdp->sd_lockstruct.ls_ops->lm_lock) {
|
|
|
+ /* lock_dlm */
|
|
|
+ ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
|
|
|
+ GLOCK_BUG_ON(gl, ret);
|
|
|
+ } else { /* lock_nolock */
|
|
|
+ finish_xmote(gl, target);
|
|
|
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
|
|
gfs2_glock_put(gl);
|
|
|
- } else {
|
|
|
- GLOCK_BUG_ON(gl, ret != LM_OUT_ASYNC);
|
|
|
}
|
|
|
+
|
|
|
spin_lock(&gl->gl_spin);
|
|
|
}
|
|
|
|
|
@@ -951,17 +939,22 @@ int gfs2_glock_wait(struct gfs2_holder *gh)
|
|
|
|
|
|
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
|
|
|
{
|
|
|
+ struct va_format vaf;
|
|
|
va_list args;
|
|
|
|
|
|
va_start(args, fmt);
|
|
|
+
|
|
|
if (seq) {
|
|
|
struct gfs2_glock_iter *gi = seq->private;
|
|
|
vsprintf(gi->string, fmt, args);
|
|
|
seq_printf(seq, gi->string);
|
|
|
} else {
|
|
|
- printk(KERN_ERR " ");
|
|
|
- vprintk(fmt, args);
|
|
|
+ vaf.fmt = fmt;
|
|
|
+ vaf.va = &args;
|
|
|
+
|
|
|
+ printk(KERN_ERR " %pV", &vaf);
|
|
|
}
|
|
|
+
|
|
|
va_end(args);
|
|
|
}
|
|
|
|
|
@@ -1361,24 +1354,28 @@ static int gfs2_should_freeze(const struct gfs2_glock *gl)
|
|
|
* @gl: Pointer to the glock
|
|
|
* @ret: The return value from the dlm
|
|
|
*
|
|
|
+ * The gl_reply field is under the gl_spin lock so that it is ok
|
|
|
+ * to use a bitfield shared with other glock state fields.
|
|
|
*/
|
|
|
|
|
|
void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
|
|
|
{
|
|
|
struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
|
|
|
|
|
|
+ spin_lock(&gl->gl_spin);
|
|
|
gl->gl_reply = ret;
|
|
|
|
|
|
if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
|
|
|
- spin_lock(&gl->gl_spin);
|
|
|
if (gfs2_should_freeze(gl)) {
|
|
|
set_bit(GLF_FROZEN, &gl->gl_flags);
|
|
|
spin_unlock(&gl->gl_spin);
|
|
|
return;
|
|
|
}
|
|
|
- spin_unlock(&gl->gl_spin);
|
|
|
}
|
|
|
+
|
|
|
+ spin_unlock(&gl->gl_spin);
|
|
|
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
|
|
|
+ smp_wmb();
|
|
|
gfs2_glock_hold(gl);
|
|
|
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
|
|
gfs2_glock_put(gl);
|
|
@@ -1626,18 +1623,17 @@ static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
|
|
|
static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
|
|
|
{
|
|
|
struct task_struct *gh_owner = NULL;
|
|
|
- char buffer[KSYM_SYMBOL_LEN];
|
|
|
char flags_buf[32];
|
|
|
|
|
|
- sprint_symbol(buffer, gh->gh_ip);
|
|
|
if (gh->gh_owner_pid)
|
|
|
gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
|
|
|
- gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %s\n",
|
|
|
- state2str(gh->gh_state),
|
|
|
- hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
|
|
|
- gh->gh_error,
|
|
|
- gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
|
|
|
- gh_owner ? gh_owner->comm : "(ended)", buffer);
|
|
|
+ gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
|
|
|
+ state2str(gh->gh_state),
|
|
|
+ hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
|
|
|
+ gh->gh_error,
|
|
|
+ gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
|
|
|
+ gh_owner ? gh_owner->comm : "(ended)",
|
|
|
+ (void *)gh->gh_ip);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1782,12 +1778,13 @@ int __init gfs2_glock_init(void)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
- glock_workqueue = alloc_workqueue("glock_workqueue", WQ_RESCUER |
|
|
|
+ glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
|
|
|
WQ_HIGHPRI | WQ_FREEZEABLE, 0);
|
|
|
if (IS_ERR(glock_workqueue))
|
|
|
return PTR_ERR(glock_workqueue);
|
|
|
- gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", WQ_RESCUER |
|
|
|
- WQ_FREEZEABLE, 0);
|
|
|
+ gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
|
|
|
+ WQ_MEM_RECLAIM | WQ_FREEZEABLE,
|
|
|
+ 0);
|
|
|
if (IS_ERR(gfs2_delete_workqueue)) {
|
|
|
destroy_workqueue(glock_workqueue);
|
|
|
return PTR_ERR(gfs2_delete_workqueue);
|