|
@@ -69,7 +69,8 @@ static int dlm_do_assert_master(struct dlm_ctxt *dlm,
|
|
|
static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
|
|
|
|
|
|
static inline void __dlm_mle_name(struct dlm_master_list_entry *mle,
|
|
|
- unsigned char **name, unsigned int *namelen)
|
|
|
+ unsigned char **name, unsigned int *namelen,
|
|
|
+ unsigned int *namehash)
|
|
|
{
|
|
|
BUG_ON(mle->type != DLM_MLE_BLOCK &&
|
|
|
mle->type != DLM_MLE_MASTER &&
|
|
@@ -78,9 +79,13 @@ static inline void __dlm_mle_name(struct dlm_master_list_entry *mle,
|
|
|
if (mle->type != DLM_MLE_MASTER) {
|
|
|
*name = mle->u.mlename.name;
|
|
|
*namelen = mle->u.mlename.len;
|
|
|
+ if (namehash)
|
|
|
+ *namehash = mle->u.mlename.hash;
|
|
|
} else {
|
|
|
*name = (unsigned char *)mle->u.mleres->lockname.name;
|
|
|
*namelen = mle->u.mleres->lockname.len;
|
|
|
+ if (namehash)
|
|
|
+ *namehash = mle->u.mleres->lockname.hash;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -95,7 +100,7 @@ static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
|
|
|
if (dlm != mle->dlm)
|
|
|
return 0;
|
|
|
|
|
|
- __dlm_mle_name(mle, &mlename, &mlelen);
|
|
|
+ __dlm_mle_name(mle, &mlename, &mlelen, NULL);
|
|
|
|
|
|
if (namelen != mlelen || memcmp(name, mlename, namelen) != 0)
|
|
|
return 0;
|
|
@@ -294,7 +299,7 @@ static void dlm_init_mle(struct dlm_master_list_entry *mle,
|
|
|
|
|
|
mle->dlm = dlm;
|
|
|
mle->type = type;
|
|
|
- INIT_LIST_HEAD(&mle->list);
|
|
|
+ INIT_HLIST_NODE(&mle->master_hash_node);
|
|
|
INIT_LIST_HEAD(&mle->hb_events);
|
|
|
memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
|
|
|
spin_lock_init(&mle->spinlock);
|
|
@@ -317,6 +322,7 @@ static void dlm_init_mle(struct dlm_master_list_entry *mle,
|
|
|
BUG_ON(!name);
|
|
|
memcpy(mle->u.mlename.name, name, namelen);
|
|
|
mle->u.mlename.len = namelen;
|
|
|
+ mle->u.mlename.hash = dlm_lockid_hash(name, namelen);
|
|
|
}
|
|
|
|
|
|
/* copy off the node_map and register hb callbacks on our copy */
|
|
@@ -334,15 +340,21 @@ void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
|
|
|
assert_spin_locked(&dlm->spinlock);
|
|
|
assert_spin_locked(&dlm->master_lock);
|
|
|
|
|
|
- if (!list_empty(&mle->list))
|
|
|
- list_del_init(&mle->list);
|
|
|
+ if (!hlist_unhashed(&mle->master_hash_node))
|
|
|
+ hlist_del_init(&mle->master_hash_node);
|
|
|
}
|
|
|
|
|
|
void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
|
|
|
{
|
|
|
+ struct hlist_head *bucket;
|
|
|
+ unsigned char *mname;
|
|
|
+ unsigned int mlen, hash;
|
|
|
+
|
|
|
assert_spin_locked(&dlm->master_lock);
|
|
|
|
|
|
- list_add(&mle->list, &dlm->master_list);
|
|
|
+ __dlm_mle_name(mle, &mname, &mlen, &hash);
|
|
|
+ bucket = dlm_master_hash(dlm, hash);
|
|
|
+ hlist_add_head(&mle->master_hash_node, bucket);
|
|
|
}
|
|
|
|
|
|
/* returns 1 if found, 0 if not */
|
|
@@ -351,10 +363,17 @@ static int dlm_find_mle(struct dlm_ctxt *dlm,
|
|
|
char *name, unsigned int namelen)
|
|
|
{
|
|
|
struct dlm_master_list_entry *tmpmle;
|
|
|
+ struct hlist_head *bucket;
|
|
|
+ struct hlist_node *list;
|
|
|
+ unsigned int hash;
|
|
|
|
|
|
assert_spin_locked(&dlm->master_lock);
|
|
|
|
|
|
- list_for_each_entry(tmpmle, &dlm->master_list, list) {
|
|
|
+ hash = dlm_lockid_hash(name, namelen);
|
|
|
+ bucket = dlm_master_hash(dlm, hash);
|
|
|
+ hlist_for_each(list, bucket) {
|
|
|
+ tmpmle = hlist_entry(list, struct dlm_master_list_entry,
|
|
|
+ master_hash_node);
|
|
|
if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
|
|
|
continue;
|
|
|
dlm_get_mle(tmpmle);
|
|
@@ -428,23 +447,20 @@ static void dlm_mle_release(struct kref *kref)
|
|
|
{
|
|
|
struct dlm_master_list_entry *mle;
|
|
|
struct dlm_ctxt *dlm;
|
|
|
+ unsigned char *mname;
|
|
|
+ unsigned int mlen;
|
|
|
|
|
|
mlog_entry_void();
|
|
|
|
|
|
mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
|
|
|
dlm = mle->dlm;
|
|
|
|
|
|
- if (mle->type != DLM_MLE_MASTER) {
|
|
|
- mlog(0, "calling mle_release for %.*s, type %d\n",
|
|
|
- mle->u.mlename.len, mle->u.mlename.name, mle->type);
|
|
|
- } else {
|
|
|
- mlog(0, "calling mle_release for %.*s, type %d\n",
|
|
|
- mle->u.mleres->lockname.len,
|
|
|
- mle->u.mleres->lockname.name, mle->type);
|
|
|
- }
|
|
|
assert_spin_locked(&dlm->spinlock);
|
|
|
assert_spin_locked(&dlm->master_lock);
|
|
|
|
|
|
+ __dlm_mle_name(mle, &mname, &mlen, NULL);
|
|
|
+ mlog(0, "Releasing mle for %.*s, type %d\n", mlen, mname, mle->type);
|
|
|
+
|
|
|
/* remove from list if not already */
|
|
|
__dlm_unlink_mle(dlm, mle);
|
|
|
|
|
@@ -1342,7 +1358,7 @@ static int dlm_do_master_request(struct dlm_lock_resource *res,
|
|
|
|
|
|
BUG_ON(mle->type == DLM_MLE_MIGRATION);
|
|
|
|
|
|
- __dlm_mle_name(mle, &mlename, &mlenamelen);
|
|
|
+ __dlm_mle_name(mle, &mlename, &mlenamelen, NULL);
|
|
|
|
|
|
request.namelen = (u8)mlenamelen;
|
|
|
memcpy(request.name, mlename, request.namelen);
|
|
@@ -3286,8 +3302,11 @@ static void dlm_clean_block_mle(struct dlm_ctxt *dlm,
|
|
|
|
|
|
void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
|
|
|
{
|
|
|
- struct dlm_master_list_entry *mle, *next;
|
|
|
+ struct dlm_master_list_entry *mle;
|
|
|
struct dlm_lock_resource *res;
|
|
|
+ struct hlist_head *bucket;
|
|
|
+ struct hlist_node *list;
|
|
|
+ unsigned int i;
|
|
|
|
|
|
mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
|
|
|
top:
|
|
@@ -3295,7 +3314,12 @@ top:
|
|
|
|
|
|
/* clean the master list */
|
|
|
spin_lock(&dlm->master_lock);
|
|
|
- list_for_each_entry_safe(mle, next, &dlm->master_list, list) {
|
|
|
+ for (i = 0; i < DLM_HASH_BUCKETS; i++) {
|
|
|
+ bucket = dlm_master_hash(dlm, i);
|
|
|
+ hlist_for_each(list, bucket) {
|
|
|
+ mle = hlist_entry(list, struct dlm_master_list_entry,
|
|
|
+ master_hash_node);
|
|
|
+
|
|
|
BUG_ON(mle->type != DLM_MLE_BLOCK &&
|
|
|
mle->type != DLM_MLE_MASTER &&
|
|
|
mle->type != DLM_MLE_MIGRATION);
|
|
@@ -3351,6 +3375,7 @@ top:
|
|
|
/* this may be the last reference */
|
|
|
__dlm_put_mle(mle);
|
|
|
}
|
|
|
+ }
|
|
|
spin_unlock(&dlm->master_lock);
|
|
|
}
|
|
|
|