lock.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include "lock_dlm.h"
  10. static char junk_lvb[GDLM_LVB_SIZE];
  11. /* convert dlm lock-mode to gfs lock-state */
  12. static s16 gdlm_make_lmstate(s16 dlmmode)
  13. {
  14. switch (dlmmode) {
  15. case DLM_LOCK_IV:
  16. case DLM_LOCK_NL:
  17. return LM_ST_UNLOCKED;
  18. case DLM_LOCK_EX:
  19. return LM_ST_EXCLUSIVE;
  20. case DLM_LOCK_CW:
  21. return LM_ST_DEFERRED;
  22. case DLM_LOCK_PR:
  23. return LM_ST_SHARED;
  24. }
  25. gdlm_assert(0, "unknown DLM mode %d", dlmmode);
  26. return -1;
  27. }
  28. /* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm
  29. thread gets to it. */
  30. static void queue_submit(struct gdlm_lock *lp)
  31. {
  32. struct gdlm_ls *ls = lp->ls;
  33. spin_lock(&ls->async_lock);
  34. list_add_tail(&lp->delay_list, &ls->submit);
  35. spin_unlock(&ls->async_lock);
  36. wake_up(&ls->thread_wait);
  37. }
  38. static void wake_up_ast(struct gdlm_lock *lp)
  39. {
  40. clear_bit(LFL_AST_WAIT, &lp->flags);
  41. smp_mb__after_clear_bit();
  42. wake_up_bit(&lp->flags, LFL_AST_WAIT);
  43. }
  44. static void gdlm_delete_lp(struct gdlm_lock *lp)
  45. {
  46. struct gdlm_ls *ls = lp->ls;
  47. spin_lock(&ls->async_lock);
  48. if (!list_empty(&lp->delay_list))
  49. list_del_init(&lp->delay_list);
  50. gdlm_assert(!list_empty(&lp->all_list), "%x,%llx", lp->lockname.ln_type,
  51. (unsigned long long)lp->lockname.ln_number);
  52. list_del_init(&lp->all_list);
  53. ls->all_locks_count--;
  54. spin_unlock(&ls->async_lock);
  55. kfree(lp);
  56. }
  57. static void gdlm_queue_delayed(struct gdlm_lock *lp)
  58. {
  59. struct gdlm_ls *ls = lp->ls;
  60. spin_lock(&ls->async_lock);
  61. list_add_tail(&lp->delay_list, &ls->delayed);
  62. spin_unlock(&ls->async_lock);
  63. }
  64. static void process_complete(struct gdlm_lock *lp)
  65. {
  66. struct gdlm_ls *ls = lp->ls;
  67. struct lm_async_cb acb;
  68. s16 prev_mode = lp->cur;
  69. memset(&acb, 0, sizeof(acb));
  70. if (lp->lksb.sb_status == -DLM_ECANCEL) {
  71. log_info("complete dlm cancel %x,%llx flags %lx",
  72. lp->lockname.ln_type,
  73. (unsigned long long)lp->lockname.ln_number,
  74. lp->flags);
  75. lp->req = lp->cur;
  76. acb.lc_ret |= LM_OUT_CANCELED;
  77. if (lp->cur == DLM_LOCK_IV)
  78. lp->lksb.sb_lkid = 0;
  79. goto out;
  80. }
  81. if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) {
  82. if (lp->lksb.sb_status != -DLM_EUNLOCK) {
  83. log_info("unlock sb_status %d %x,%llx flags %lx",
  84. lp->lksb.sb_status, lp->lockname.ln_type,
  85. (unsigned long long)lp->lockname.ln_number,
  86. lp->flags);
  87. return;
  88. }
  89. lp->cur = DLM_LOCK_IV;
  90. lp->req = DLM_LOCK_IV;
  91. lp->lksb.sb_lkid = 0;
  92. if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) {
  93. gdlm_delete_lp(lp);
  94. return;
  95. }
  96. goto out;
  97. }
  98. if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID)
  99. memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
  100. if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) {
  101. if (lp->req == DLM_LOCK_PR)
  102. lp->req = DLM_LOCK_CW;
  103. else if (lp->req == DLM_LOCK_CW)
  104. lp->req = DLM_LOCK_PR;
  105. }
  106. /*
  107. * A canceled lock request. The lock was just taken off the delayed
  108. * list and was never even submitted to dlm.
  109. */
  110. if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
  111. log_info("complete internal cancel %x,%llx",
  112. lp->lockname.ln_type,
  113. (unsigned long long)lp->lockname.ln_number);
  114. lp->req = lp->cur;
  115. acb.lc_ret |= LM_OUT_CANCELED;
  116. goto out;
  117. }
  118. /*
  119. * An error occured.
  120. */
  121. if (lp->lksb.sb_status) {
  122. /* a "normal" error */
  123. if ((lp->lksb.sb_status == -EAGAIN) &&
  124. (lp->lkf & DLM_LKF_NOQUEUE)) {
  125. lp->req = lp->cur;
  126. if (lp->cur == DLM_LOCK_IV)
  127. lp->lksb.sb_lkid = 0;
  128. goto out;
  129. }
  130. /* this could only happen with cancels I think */
  131. log_info("ast sb_status %d %x,%llx flags %lx",
  132. lp->lksb.sb_status, lp->lockname.ln_type,
  133. (unsigned long long)lp->lockname.ln_number,
  134. lp->flags);
  135. if (lp->lksb.sb_status == -EDEADLOCK &&
  136. lp->ls->fsflags & LM_MFLAG_CONV_NODROP) {
  137. lp->req = lp->cur;
  138. acb.lc_ret |= LM_OUT_CONV_DEADLK;
  139. if (lp->cur == DLM_LOCK_IV)
  140. lp->lksb.sb_lkid = 0;
  141. goto out;
  142. } else
  143. return;
  144. }
  145. /*
  146. * This is an AST for an EX->EX conversion for sync_lvb from GFS.
  147. */
  148. if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) {
  149. wake_up_ast(lp);
  150. return;
  151. }
  152. /*
  153. * A lock has been demoted to NL because it initially completed during
  154. * BLOCK_LOCKS. Now it must be requested in the originally requested
  155. * mode.
  156. */
  157. if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) {
  158. gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx",
  159. lp->lockname.ln_type,
  160. (unsigned long long)lp->lockname.ln_number);
  161. gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx",
  162. lp->lockname.ln_type,
  163. (unsigned long long)lp->lockname.ln_number);
  164. lp->cur = DLM_LOCK_NL;
  165. lp->req = lp->prev_req;
  166. lp->prev_req = DLM_LOCK_IV;
  167. lp->lkf &= ~DLM_LKF_CONVDEADLK;
  168. set_bit(LFL_NOCACHE, &lp->flags);
  169. if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
  170. !test_bit(LFL_NOBLOCK, &lp->flags))
  171. gdlm_queue_delayed(lp);
  172. else
  173. queue_submit(lp);
  174. return;
  175. }
  176. /*
  177. * A request is granted during dlm recovery. It may be granted
  178. * because the locks of a failed node were cleared. In that case,
  179. * there may be inconsistent data beneath this lock and we must wait
  180. * for recovery to complete to use it. When gfs recovery is done this
  181. * granted lock will be converted to NL and then reacquired in this
  182. * granted state.
  183. */
  184. if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
  185. !test_bit(LFL_NOBLOCK, &lp->flags) &&
  186. lp->req != DLM_LOCK_NL) {
  187. lp->cur = lp->req;
  188. lp->prev_req = lp->req;
  189. lp->req = DLM_LOCK_NL;
  190. lp->lkf |= DLM_LKF_CONVERT;
  191. lp->lkf &= ~DLM_LKF_CONVDEADLK;
  192. log_debug("rereq %x,%llx id %x %d,%d",
  193. lp->lockname.ln_type,
  194. (unsigned long long)lp->lockname.ln_number,
  195. lp->lksb.sb_lkid, lp->cur, lp->req);
  196. set_bit(LFL_REREQUEST, &lp->flags);
  197. queue_submit(lp);
  198. return;
  199. }
  200. /*
  201. * DLM demoted the lock to NL before it was granted so GFS must be
  202. * told it cannot cache data for this lock.
  203. */
  204. if (lp->lksb.sb_flags & DLM_SBF_DEMOTED)
  205. set_bit(LFL_NOCACHE, &lp->flags);
  206. out:
  207. /*
  208. * This is an internal lock_dlm lock
  209. */
  210. if (test_bit(LFL_INLOCK, &lp->flags)) {
  211. clear_bit(LFL_NOBLOCK, &lp->flags);
  212. lp->cur = lp->req;
  213. wake_up_ast(lp);
  214. return;
  215. }
  216. /*
  217. * Normal completion of a lock request. Tell GFS it now has the lock.
  218. */
  219. clear_bit(LFL_NOBLOCK, &lp->flags);
  220. lp->cur = lp->req;
  221. acb.lc_name = lp->lockname;
  222. acb.lc_ret |= gdlm_make_lmstate(lp->cur);
  223. if (!test_and_clear_bit(LFL_NOCACHE, &lp->flags) &&
  224. (lp->cur > DLM_LOCK_NL) && (prev_mode > DLM_LOCK_NL))
  225. acb.lc_ret |= LM_OUT_CACHEABLE;
  226. ls->fscb(ls->sdp, LM_CB_ASYNC, &acb);
  227. }
  228. static void gdlm_ast(void *astarg)
  229. {
  230. struct gdlm_lock *lp = astarg;
  231. clear_bit(LFL_ACTIVE, &lp->flags);
  232. process_complete(lp);
  233. }
  234. static void process_blocking(struct gdlm_lock *lp, int bast_mode)
  235. {
  236. struct gdlm_ls *ls = lp->ls;
  237. unsigned int cb = 0;
  238. switch (gdlm_make_lmstate(bast_mode)) {
  239. case LM_ST_EXCLUSIVE:
  240. cb = LM_CB_NEED_E;
  241. break;
  242. case LM_ST_DEFERRED:
  243. cb = LM_CB_NEED_D;
  244. break;
  245. case LM_ST_SHARED:
  246. cb = LM_CB_NEED_S;
  247. break;
  248. default:
  249. gdlm_assert(0, "unknown bast mode %u", bast_mode);
  250. }
  251. ls->fscb(ls->sdp, cb, &lp->lockname);
  252. }
  253. static void gdlm_bast(void *astarg, int mode)
  254. {
  255. struct gdlm_lock *lp = astarg;
  256. if (!mode) {
  257. printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n",
  258. lp->lockname.ln_type,
  259. (unsigned long long)lp->lockname.ln_number);
  260. return;
  261. }
  262. process_blocking(lp, mode);
  263. }
  264. /* convert gfs lock-state to dlm lock-mode */
  265. static s16 make_mode(s16 lmstate)
  266. {
  267. switch (lmstate) {
  268. case LM_ST_UNLOCKED:
  269. return DLM_LOCK_NL;
  270. case LM_ST_EXCLUSIVE:
  271. return DLM_LOCK_EX;
  272. case LM_ST_DEFERRED:
  273. return DLM_LOCK_CW;
  274. case LM_ST_SHARED:
  275. return DLM_LOCK_PR;
  276. }
  277. gdlm_assert(0, "unknown LM state %d", lmstate);
  278. return -1;
  279. }
  280. /* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and
  281. DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */
  282. static void check_cur_state(struct gdlm_lock *lp, unsigned int cur_state)
  283. {
  284. s16 cur = make_mode(cur_state);
  285. if (lp->cur != DLM_LOCK_IV)
  286. gdlm_assert(lp->cur == cur, "%d, %d", lp->cur, cur);
  287. }
  288. static inline unsigned int make_flags(struct gdlm_lock *lp,
  289. unsigned int gfs_flags,
  290. s16 cur, s16 req)
  291. {
  292. unsigned int lkf = 0;
  293. if (gfs_flags & LM_FLAG_TRY)
  294. lkf |= DLM_LKF_NOQUEUE;
  295. if (gfs_flags & LM_FLAG_TRY_1CB) {
  296. lkf |= DLM_LKF_NOQUEUE;
  297. lkf |= DLM_LKF_NOQUEUEBAST;
  298. }
  299. if (gfs_flags & LM_FLAG_PRIORITY) {
  300. lkf |= DLM_LKF_NOORDER;
  301. lkf |= DLM_LKF_HEADQUE;
  302. }
  303. if (gfs_flags & LM_FLAG_ANY) {
  304. if (req == DLM_LOCK_PR)
  305. lkf |= DLM_LKF_ALTCW;
  306. else if (req == DLM_LOCK_CW)
  307. lkf |= DLM_LKF_ALTPR;
  308. }
  309. if (lp->lksb.sb_lkid != 0) {
  310. lkf |= DLM_LKF_CONVERT;
  311. /* Conversion deadlock avoidance by DLM */
  312. if (!(lp->ls->fsflags & LM_MFLAG_CONV_NODROP) &&
  313. !test_bit(LFL_FORCE_PROMOTE, &lp->flags) &&
  314. !(lkf & DLM_LKF_NOQUEUE) &&
  315. cur > DLM_LOCK_NL && req > DLM_LOCK_NL && cur != req)
  316. lkf |= DLM_LKF_CONVDEADLK;
  317. }
  318. if (lp->lvb)
  319. lkf |= DLM_LKF_VALBLK;
  320. return lkf;
  321. }
  322. /* make_strname - convert GFS lock numbers to a string */
  323. static inline void make_strname(const struct lm_lockname *lockname,
  324. struct gdlm_strname *str)
  325. {
  326. sprintf(str->name, "%8x%16llx", lockname->ln_type,
  327. (unsigned long long)lockname->ln_number);
  328. str->namelen = GDLM_STRNAME_BYTES;
  329. }
  330. static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
  331. struct gdlm_lock **lpp)
  332. {
  333. struct gdlm_lock *lp;
  334. lp = kzalloc(sizeof(struct gdlm_lock), GFP_NOFS);
  335. if (!lp)
  336. return -ENOMEM;
  337. lp->lockname = *name;
  338. make_strname(name, &lp->strname);
  339. lp->ls = ls;
  340. lp->cur = DLM_LOCK_IV;
  341. INIT_LIST_HEAD(&lp->delay_list);
  342. spin_lock(&ls->async_lock);
  343. list_add(&lp->all_list, &ls->all_locks);
  344. ls->all_locks_count++;
  345. spin_unlock(&ls->async_lock);
  346. *lpp = lp;
  347. return 0;
  348. }
  349. int gdlm_get_lock(void *lockspace, struct lm_lockname *name,
  350. void **lockp)
  351. {
  352. struct gdlm_lock *lp;
  353. int error;
  354. error = gdlm_create_lp(lockspace, name, &lp);
  355. *lockp = lp;
  356. return error;
  357. }
  358. void gdlm_put_lock(void *lock)
  359. {
  360. gdlm_delete_lp(lock);
  361. }
  362. unsigned int gdlm_do_lock(struct gdlm_lock *lp)
  363. {
  364. struct gdlm_ls *ls = lp->ls;
  365. int error, bast = 1;
  366. /*
  367. * When recovery is in progress, delay lock requests for submission
  368. * once recovery is done. Requests for recovery (NOEXP) and unlocks
  369. * can pass.
  370. */
  371. if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
  372. !test_bit(LFL_NOBLOCK, &lp->flags) && lp->req != DLM_LOCK_NL) {
  373. gdlm_queue_delayed(lp);
  374. return LM_OUT_ASYNC;
  375. }
  376. /*
  377. * Submit the actual lock request.
  378. */
  379. if (test_bit(LFL_NOBAST, &lp->flags))
  380. bast = 0;
  381. set_bit(LFL_ACTIVE, &lp->flags);
  382. log_debug("lk %x,%llx id %x %d,%d %x", lp->lockname.ln_type,
  383. (unsigned long long)lp->lockname.ln_number, lp->lksb.sb_lkid,
  384. lp->cur, lp->req, lp->lkf);
  385. error = dlm_lock(ls->dlm_lockspace, lp->req, &lp->lksb, lp->lkf,
  386. lp->strname.name, lp->strname.namelen, 0, gdlm_ast,
  387. lp, bast ? gdlm_bast : NULL);
  388. if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) {
  389. lp->lksb.sb_status = -EAGAIN;
  390. gdlm_ast(lp);
  391. error = 0;
  392. }
  393. if (error) {
  394. log_error("%s: gdlm_lock %x,%llx err=%d cur=%d req=%d lkf=%x "
  395. "flags=%lx", ls->fsname, lp->lockname.ln_type,
  396. (unsigned long long)lp->lockname.ln_number, error,
  397. lp->cur, lp->req, lp->lkf, lp->flags);
  398. return LM_OUT_ERROR;
  399. }
  400. return LM_OUT_ASYNC;
  401. }
  402. static unsigned int gdlm_do_unlock(struct gdlm_lock *lp)
  403. {
  404. struct gdlm_ls *ls = lp->ls;
  405. unsigned int lkf = 0;
  406. int error;
  407. set_bit(LFL_DLM_UNLOCK, &lp->flags);
  408. set_bit(LFL_ACTIVE, &lp->flags);
  409. if (lp->lvb)
  410. lkf = DLM_LKF_VALBLK;
  411. log_debug("un %x,%llx %x %d %x", lp->lockname.ln_type,
  412. (unsigned long long)lp->lockname.ln_number,
  413. lp->lksb.sb_lkid, lp->cur, lkf);
  414. error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, lkf, NULL, lp);
  415. if (error) {
  416. log_error("%s: gdlm_unlock %x,%llx err=%d cur=%d req=%d lkf=%x "
  417. "flags=%lx", ls->fsname, lp->lockname.ln_type,
  418. (unsigned long long)lp->lockname.ln_number, error,
  419. lp->cur, lp->req, lp->lkf, lp->flags);
  420. return LM_OUT_ERROR;
  421. }
  422. return LM_OUT_ASYNC;
  423. }
  424. unsigned int gdlm_lock(void *lock, unsigned int cur_state,
  425. unsigned int req_state, unsigned int flags)
  426. {
  427. struct gdlm_lock *lp = lock;
  428. if (req_state == LM_ST_UNLOCKED)
  429. return gdlm_unlock(lock, cur_state);
  430. if (req_state == LM_ST_UNLOCKED)
  431. return gdlm_unlock(lock, cur_state);
  432. clear_bit(LFL_DLM_CANCEL, &lp->flags);
  433. if (flags & LM_FLAG_NOEXP)
  434. set_bit(LFL_NOBLOCK, &lp->flags);
  435. check_cur_state(lp, cur_state);
  436. lp->req = make_mode(req_state);
  437. lp->lkf = make_flags(lp, flags, lp->cur, lp->req);
  438. return gdlm_do_lock(lp);
  439. }
  440. unsigned int gdlm_unlock(void *lock, unsigned int cur_state)
  441. {
  442. struct gdlm_lock *lp = lock;
  443. clear_bit(LFL_DLM_CANCEL, &lp->flags);
  444. if (lp->cur == DLM_LOCK_IV)
  445. return 0;
  446. return gdlm_do_unlock(lp);
  447. }
  448. void gdlm_cancel(void *lock)
  449. {
  450. struct gdlm_lock *lp = lock;
  451. struct gdlm_ls *ls = lp->ls;
  452. int error, delay_list = 0;
  453. if (test_bit(LFL_DLM_CANCEL, &lp->flags))
  454. return;
  455. log_info("gdlm_cancel %x,%llx flags %lx", lp->lockname.ln_type,
  456. (unsigned long long)lp->lockname.ln_number, lp->flags);
  457. spin_lock(&ls->async_lock);
  458. if (!list_empty(&lp->delay_list)) {
  459. list_del_init(&lp->delay_list);
  460. delay_list = 1;
  461. }
  462. spin_unlock(&ls->async_lock);
  463. if (delay_list) {
  464. set_bit(LFL_CANCEL, &lp->flags);
  465. set_bit(LFL_ACTIVE, &lp->flags);
  466. gdlm_ast(lp);
  467. return;
  468. }
  469. if (!test_bit(LFL_ACTIVE, &lp->flags) ||
  470. test_bit(LFL_DLM_UNLOCK, &lp->flags)) {
  471. log_info("gdlm_cancel skip %x,%llx flags %lx",
  472. lp->lockname.ln_type,
  473. (unsigned long long)lp->lockname.ln_number, lp->flags);
  474. return;
  475. }
  476. /* the lock is blocked in the dlm */
  477. set_bit(LFL_DLM_CANCEL, &lp->flags);
  478. set_bit(LFL_ACTIVE, &lp->flags);
  479. error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, DLM_LKF_CANCEL,
  480. NULL, lp);
  481. log_info("gdlm_cancel rv %d %x,%llx flags %lx", error,
  482. lp->lockname.ln_type,
  483. (unsigned long long)lp->lockname.ln_number, lp->flags);
  484. if (error == -EBUSY)
  485. clear_bit(LFL_DLM_CANCEL, &lp->flags);
  486. }
  487. static int gdlm_add_lvb(struct gdlm_lock *lp)
  488. {
  489. char *lvb;
  490. lvb = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
  491. if (!lvb)
  492. return -ENOMEM;
  493. lp->lksb.sb_lvbptr = lvb;
  494. lp->lvb = lvb;
  495. return 0;
  496. }
  497. static void gdlm_del_lvb(struct gdlm_lock *lp)
  498. {
  499. kfree(lp->lvb);
  500. lp->lvb = NULL;
  501. lp->lksb.sb_lvbptr = NULL;
  502. }
  503. static int gdlm_ast_wait(void *word)
  504. {
  505. schedule();
  506. return 0;
  507. }
  508. /* This can do a synchronous dlm request (requiring a lock_dlm thread to get
  509. the completion) because gfs won't call hold_lvb() during a callback (from
  510. the context of a lock_dlm thread). */
  511. static int hold_null_lock(struct gdlm_lock *lp)
  512. {
  513. struct gdlm_lock *lpn = NULL;
  514. int error;
  515. if (lp->hold_null) {
  516. printk(KERN_INFO "lock_dlm: lvb already held\n");
  517. return 0;
  518. }
  519. error = gdlm_create_lp(lp->ls, &lp->lockname, &lpn);
  520. if (error)
  521. goto out;
  522. lpn->lksb.sb_lvbptr = junk_lvb;
  523. lpn->lvb = junk_lvb;
  524. lpn->req = DLM_LOCK_NL;
  525. lpn->lkf = DLM_LKF_VALBLK | DLM_LKF_EXPEDITE;
  526. set_bit(LFL_NOBAST, &lpn->flags);
  527. set_bit(LFL_INLOCK, &lpn->flags);
  528. set_bit(LFL_AST_WAIT, &lpn->flags);
  529. gdlm_do_lock(lpn);
  530. wait_on_bit(&lpn->flags, LFL_AST_WAIT, gdlm_ast_wait, TASK_UNINTERRUPTIBLE);
  531. error = lpn->lksb.sb_status;
  532. if (error) {
  533. printk(KERN_INFO "lock_dlm: hold_null_lock dlm error %d\n",
  534. error);
  535. gdlm_delete_lp(lpn);
  536. lpn = NULL;
  537. }
  538. out:
  539. lp->hold_null = lpn;
  540. return error;
  541. }
  542. /* This cannot do a synchronous dlm request (requiring a lock_dlm thread to get
  543. the completion) because gfs may call unhold_lvb() during a callback (from
  544. the context of a lock_dlm thread) which could cause a deadlock since the
  545. other lock_dlm thread could be engaged in recovery. */
  546. static void unhold_null_lock(struct gdlm_lock *lp)
  547. {
  548. struct gdlm_lock *lpn = lp->hold_null;
  549. gdlm_assert(lpn, "%x,%llx", lp->lockname.ln_type,
  550. (unsigned long long)lp->lockname.ln_number);
  551. lpn->lksb.sb_lvbptr = NULL;
  552. lpn->lvb = NULL;
  553. set_bit(LFL_UNLOCK_DELETE, &lpn->flags);
  554. gdlm_do_unlock(lpn);
  555. lp->hold_null = NULL;
  556. }
  557. /* Acquire a NL lock because gfs requires the value block to remain
  558. intact on the resource while the lvb is "held" even if it's holding no locks
  559. on the resource. */
  560. int gdlm_hold_lvb(void *lock, char **lvbp)
  561. {
  562. struct gdlm_lock *lp = lock;
  563. int error;
  564. error = gdlm_add_lvb(lp);
  565. if (error)
  566. return error;
  567. *lvbp = lp->lvb;
  568. error = hold_null_lock(lp);
  569. if (error)
  570. gdlm_del_lvb(lp);
  571. return error;
  572. }
  573. void gdlm_unhold_lvb(void *lock, char *lvb)
  574. {
  575. struct gdlm_lock *lp = lock;
  576. unhold_null_lock(lp);
  577. gdlm_del_lvb(lp);
  578. }
  579. void gdlm_submit_delayed(struct gdlm_ls *ls)
  580. {
  581. struct gdlm_lock *lp, *safe;
  582. spin_lock(&ls->async_lock);
  583. list_for_each_entry_safe(lp, safe, &ls->delayed, delay_list) {
  584. list_del_init(&lp->delay_list);
  585. list_add_tail(&lp->delay_list, &ls->submit);
  586. }
  587. spin_unlock(&ls->async_lock);
  588. wake_up(&ls->thread_wait);
  589. }
  590. int gdlm_release_all_locks(struct gdlm_ls *ls)
  591. {
  592. struct gdlm_lock *lp, *safe;
  593. int count = 0;
  594. spin_lock(&ls->async_lock);
  595. list_for_each_entry_safe(lp, safe, &ls->all_locks, all_list) {
  596. list_del_init(&lp->all_list);
  597. if (lp->lvb && lp->lvb != junk_lvb)
  598. kfree(lp->lvb);
  599. kfree(lp);
  600. count++;
  601. }
  602. spin_unlock(&ls->async_lock);
  603. return count;
  604. }