|
@@ -829,8 +829,8 @@ lookup:
|
|
|
* but they might own this lockres. wait on them. */
|
|
|
bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
|
|
|
if (bit < O2NM_MAX_NODES) {
|
|
|
- mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
|
|
|
- "recover before lock mastery can begin\n",
|
|
|
+ mlog(0, "%s: res %.*s, At least one node (%d) "
|
|
|
+ "to recover before lock mastery can begin\n",
|
|
|
dlm->name, namelen, (char *)lockid, bit);
|
|
|
wait_on_recovery = 1;
|
|
|
}
|
|
@@ -864,8 +864,8 @@ redo_request:
|
|
|
* dlm spinlock would be detectable be a change on the mle,
|
|
|
* so we only need to clear out the recovery map once. */
|
|
|
if (dlm_is_recovery_lock(lockid, namelen)) {
|
|
|
- mlog(ML_NOTICE, "%s: recovery map is not empty, but "
|
|
|
- "must master $RECOVERY lock now\n", dlm->name);
|
|
|
+ mlog(0, "%s: Recovery map is not empty, but must "
|
|
|
+ "master $RECOVERY lock now\n", dlm->name);
|
|
|
if (!dlm_pre_master_reco_lockres(dlm, res))
|
|
|
wait_on_recovery = 0;
|
|
|
else {
|
|
@@ -883,8 +883,8 @@ redo_request:
|
|
|
spin_lock(&dlm->spinlock);
|
|
|
bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
|
|
|
if (bit < O2NM_MAX_NODES) {
|
|
|
- mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
|
|
|
- "recover before lock mastery can begin\n",
|
|
|
+ mlog(0, "%s: res %.*s, At least one node (%d) "
|
|
|
+ "to recover before lock mastery can begin\n",
|
|
|
dlm->name, namelen, (char *)lockid, bit);
|
|
|
wait_on_recovery = 1;
|
|
|
} else
|
|
@@ -913,8 +913,8 @@ redo_request:
|
|
|
* yet, keep going until it does. this is how the
|
|
|
* master will know that asserts are needed back to
|
|
|
* the lower nodes. */
|
|
|
- mlog(0, "%s:%.*s: requests only up to %u but master "
|
|
|
- "is %u, keep going\n", dlm->name, namelen,
|
|
|
+ mlog(0, "%s: res %.*s, Requests only up to %u but "
|
|
|
+ "master is %u, keep going\n", dlm->name, namelen,
|
|
|
lockid, nodenum, mle->master);
|
|
|
}
|
|
|
}
|
|
@@ -924,13 +924,12 @@ wait:
|
|
|
ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
|
|
|
if (ret < 0) {
|
|
|
wait_on_recovery = 1;
|
|
|
- mlog(0, "%s:%.*s: node map changed, redo the "
|
|
|
- "master request now, blocked=%d\n",
|
|
|
- dlm->name, res->lockname.len,
|
|
|
+ mlog(0, "%s: res %.*s, Node map changed, redo the master "
|
|
|
+ "request now, blocked=%d\n", dlm->name, res->lockname.len,
|
|
|
res->lockname.name, blocked);
|
|
|
if (++tries > 20) {
|
|
|
- mlog(ML_ERROR, "%s:%.*s: spinning on "
|
|
|
- "dlm_wait_for_lock_mastery, blocked=%d\n",
|
|
|
+ mlog(ML_ERROR, "%s: res %.*s, Spinning on "
|
|
|
+ "dlm_wait_for_lock_mastery, blocked = %d\n",
|
|
|
dlm->name, res->lockname.len,
|
|
|
res->lockname.name, blocked);
|
|
|
dlm_print_one_lock_resource(res);
|
|
@@ -940,7 +939,8 @@ wait:
|
|
|
goto redo_request;
|
|
|
}
|
|
|
|
|
|
- mlog(0, "lockres mastered by %u\n", res->owner);
|
|
|
+ mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len,
|
|
|
+ res->lockname.name, res->owner);
|
|
|
/* make sure we never continue without this */
|
|
|
BUG_ON(res->owner == O2NM_MAX_NODES);
|
|
|
|
|
@@ -2187,8 +2187,6 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
|
|
|
namelen = res->lockname.len;
|
|
|
BUG_ON(namelen > O2NM_MAX_NAME_LEN);
|
|
|
|
|
|
- mlog(0, "%s:%.*s: sending deref to %d\n",
|
|
|
- dlm->name, namelen, lockname, res->owner);
|
|
|
memset(&deref, 0, sizeof(deref));
|
|
|
deref.node_idx = dlm->node_num;
|
|
|
deref.namelen = namelen;
|
|
@@ -2197,14 +2195,12 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
|
|
|
ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
|
|
|
&deref, sizeof(deref), res->owner, &r);
|
|
|
if (ret < 0)
|
|
|
- mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
|
|
|
- "node %u\n", ret, DLM_DEREF_LOCKRES_MSG, dlm->key,
|
|
|
- res->owner);
|
|
|
+ mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n",
|
|
|
+ dlm->name, namelen, lockname, ret, res->owner);
|
|
|
else if (r < 0) {
|
|
|
/* BAD. other node says I did not have a ref. */
|
|
|
- mlog(ML_ERROR,"while dropping ref on %s:%.*s "
|
|
|
- "(master=%u) got %d.\n", dlm->name, namelen,
|
|
|
- lockname, res->owner, r);
|
|
|
+ mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
|
|
|
+ dlm->name, namelen, lockname, res->owner, r);
|
|
|
dlm_print_one_lock_resource(res);
|
|
|
BUG();
|
|
|
}
|
|
@@ -2916,9 +2912,9 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
|
|
|
&migrate, sizeof(migrate), nodenum,
|
|
|
&status);
|
|
|
if (ret < 0) {
|
|
|
- mlog(ML_ERROR, "Error %d when sending message %u (key "
|
|
|
- "0x%x) to node %u\n", ret, DLM_MIGRATE_REQUEST_MSG,
|
|
|
- dlm->key, nodenum);
|
|
|
+ mlog(ML_ERROR, "%s: res %.*s, Error %d send "
|
|
|
+ "MIGRATE_REQUEST to node %u\n", dlm->name,
|
|
|
+ migrate.namelen, migrate.name, ret, nodenum);
|
|
|
if (!dlm_is_host_down(ret)) {
|
|
|
mlog(ML_ERROR, "unhandled error=%d!\n", ret);
|
|
|
BUG();
|