lock.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include "lock_dlm.h"
  10. static char junk_lvb[GDLM_LVB_SIZE];
  11. /* convert dlm lock-mode to gfs lock-state */
  12. static s16 gdlm_make_lmstate(s16 dlmmode)
  13. {
  14. switch (dlmmode) {
  15. case DLM_LOCK_IV:
  16. case DLM_LOCK_NL:
  17. return LM_ST_UNLOCKED;
  18. case DLM_LOCK_EX:
  19. return LM_ST_EXCLUSIVE;
  20. case DLM_LOCK_CW:
  21. return LM_ST_DEFERRED;
  22. case DLM_LOCK_PR:
  23. return LM_ST_SHARED;
  24. }
  25. gdlm_assert(0, "unknown DLM mode %d", dlmmode);
  26. return -1;
  27. }
  28. /* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm
  29. thread gets to it. */
  30. static void queue_submit(struct gdlm_lock *lp)
  31. {
  32. struct gdlm_ls *ls = lp->ls;
  33. spin_lock(&ls->async_lock);
  34. list_add_tail(&lp->delay_list, &ls->submit);
  35. spin_unlock(&ls->async_lock);
  36. wake_up(&ls->thread_wait);
  37. }
  38. static void wake_up_ast(struct gdlm_lock *lp)
  39. {
  40. clear_bit(LFL_AST_WAIT, &lp->flags);
  41. smp_mb__after_clear_bit();
  42. wake_up_bit(&lp->flags, LFL_AST_WAIT);
  43. }
  44. static void gdlm_delete_lp(struct gdlm_lock *lp)
  45. {
  46. struct gdlm_ls *ls = lp->ls;
  47. spin_lock(&ls->async_lock);
  48. if (!list_empty(&lp->delay_list))
  49. list_del_init(&lp->delay_list);
  50. gdlm_assert(!list_empty(&lp->all_list), "%x,%llx", lp->lockname.ln_type,
  51. (unsigned long long)lp->lockname.ln_number);
  52. list_del_init(&lp->all_list);
  53. ls->all_locks_count--;
  54. spin_unlock(&ls->async_lock);
  55. kfree(lp);
  56. }
  57. static void gdlm_queue_delayed(struct gdlm_lock *lp)
  58. {
  59. struct gdlm_ls *ls = lp->ls;
  60. spin_lock(&ls->async_lock);
  61. list_add_tail(&lp->delay_list, &ls->delayed);
  62. spin_unlock(&ls->async_lock);
  63. }
  64. static void process_complete(struct gdlm_lock *lp)
  65. {
  66. struct gdlm_ls *ls = lp->ls;
  67. struct lm_async_cb acb;
  68. memset(&acb, 0, sizeof(acb));
  69. if (lp->lksb.sb_status == -DLM_ECANCEL) {
  70. log_info("complete dlm cancel %x,%llx flags %lx",
  71. lp->lockname.ln_type,
  72. (unsigned long long)lp->lockname.ln_number,
  73. lp->flags);
  74. lp->req = lp->cur;
  75. acb.lc_ret |= LM_OUT_CANCELED;
  76. if (lp->cur == DLM_LOCK_IV)
  77. lp->lksb.sb_lkid = 0;
  78. goto out;
  79. }
  80. if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) {
  81. if (lp->lksb.sb_status != -DLM_EUNLOCK) {
  82. log_info("unlock sb_status %d %x,%llx flags %lx",
  83. lp->lksb.sb_status, lp->lockname.ln_type,
  84. (unsigned long long)lp->lockname.ln_number,
  85. lp->flags);
  86. return;
  87. }
  88. lp->cur = DLM_LOCK_IV;
  89. lp->req = DLM_LOCK_IV;
  90. lp->lksb.sb_lkid = 0;
  91. if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) {
  92. gdlm_delete_lp(lp);
  93. return;
  94. }
  95. goto out;
  96. }
  97. if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID)
  98. memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
  99. if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) {
  100. if (lp->req == DLM_LOCK_PR)
  101. lp->req = DLM_LOCK_CW;
  102. else if (lp->req == DLM_LOCK_CW)
  103. lp->req = DLM_LOCK_PR;
  104. }
  105. /*
  106. * A canceled lock request. The lock was just taken off the delayed
  107. * list and was never even submitted to dlm.
  108. */
  109. if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
  110. log_info("complete internal cancel %x,%llx",
  111. lp->lockname.ln_type,
  112. (unsigned long long)lp->lockname.ln_number);
  113. lp->req = lp->cur;
  114. acb.lc_ret |= LM_OUT_CANCELED;
  115. goto out;
  116. }
  117. /*
  118. * An error occured.
  119. */
  120. if (lp->lksb.sb_status) {
  121. /* a "normal" error */
  122. if ((lp->lksb.sb_status == -EAGAIN) &&
  123. (lp->lkf & DLM_LKF_NOQUEUE)) {
  124. lp->req = lp->cur;
  125. if (lp->cur == DLM_LOCK_IV)
  126. lp->lksb.sb_lkid = 0;
  127. goto out;
  128. }
  129. /* this could only happen with cancels I think */
  130. log_info("ast sb_status %d %x,%llx flags %lx",
  131. lp->lksb.sb_status, lp->lockname.ln_type,
  132. (unsigned long long)lp->lockname.ln_number,
  133. lp->flags);
  134. return;
  135. }
  136. /*
  137. * This is an AST for an EX->EX conversion for sync_lvb from GFS.
  138. */
  139. if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) {
  140. wake_up_ast(lp);
  141. return;
  142. }
  143. /*
  144. * A lock has been demoted to NL because it initially completed during
  145. * BLOCK_LOCKS. Now it must be requested in the originally requested
  146. * mode.
  147. */
  148. if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) {
  149. gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx",
  150. lp->lockname.ln_type,
  151. (unsigned long long)lp->lockname.ln_number);
  152. gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx",
  153. lp->lockname.ln_type,
  154. (unsigned long long)lp->lockname.ln_number);
  155. lp->cur = DLM_LOCK_NL;
  156. lp->req = lp->prev_req;
  157. lp->prev_req = DLM_LOCK_IV;
  158. lp->lkf &= ~DLM_LKF_CONVDEADLK;
  159. set_bit(LFL_NOCACHE, &lp->flags);
  160. if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
  161. !test_bit(LFL_NOBLOCK, &lp->flags))
  162. gdlm_queue_delayed(lp);
  163. else
  164. queue_submit(lp);
  165. return;
  166. }
  167. /*
  168. * A request is granted during dlm recovery. It may be granted
  169. * because the locks of a failed node were cleared. In that case,
  170. * there may be inconsistent data beneath this lock and we must wait
  171. * for recovery to complete to use it. When gfs recovery is done this
  172. * granted lock will be converted to NL and then reacquired in this
  173. * granted state.
  174. */
  175. if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
  176. !test_bit(LFL_NOBLOCK, &lp->flags) &&
  177. lp->req != DLM_LOCK_NL) {
  178. lp->cur = lp->req;
  179. lp->prev_req = lp->req;
  180. lp->req = DLM_LOCK_NL;
  181. lp->lkf |= DLM_LKF_CONVERT;
  182. lp->lkf &= ~DLM_LKF_CONVDEADLK;
  183. log_debug("rereq %x,%llx id %x %d,%d",
  184. lp->lockname.ln_type,
  185. (unsigned long long)lp->lockname.ln_number,
  186. lp->lksb.sb_lkid, lp->cur, lp->req);
  187. set_bit(LFL_REREQUEST, &lp->flags);
  188. queue_submit(lp);
  189. return;
  190. }
  191. /*
  192. * DLM demoted the lock to NL before it was granted so GFS must be
  193. * told it cannot cache data for this lock.
  194. */
  195. if (lp->lksb.sb_flags & DLM_SBF_DEMOTED)
  196. set_bit(LFL_NOCACHE, &lp->flags);
  197. out:
  198. /*
  199. * This is an internal lock_dlm lock
  200. */
  201. if (test_bit(LFL_INLOCK, &lp->flags)) {
  202. clear_bit(LFL_NOBLOCK, &lp->flags);
  203. lp->cur = lp->req;
  204. wake_up_ast(lp);
  205. return;
  206. }
  207. /*
  208. * Normal completion of a lock request. Tell GFS it now has the lock.
  209. */
  210. clear_bit(LFL_NOBLOCK, &lp->flags);
  211. lp->cur = lp->req;
  212. acb.lc_name = lp->lockname;
  213. acb.lc_ret |= gdlm_make_lmstate(lp->cur);
  214. ls->fscb(ls->sdp, LM_CB_ASYNC, &acb);
  215. }
  216. static void gdlm_ast(void *astarg)
  217. {
  218. struct gdlm_lock *lp = astarg;
  219. clear_bit(LFL_ACTIVE, &lp->flags);
  220. process_complete(lp);
  221. }
  222. static void process_blocking(struct gdlm_lock *lp, int bast_mode)
  223. {
  224. struct gdlm_ls *ls = lp->ls;
  225. unsigned int cb = 0;
  226. switch (gdlm_make_lmstate(bast_mode)) {
  227. case LM_ST_EXCLUSIVE:
  228. cb = LM_CB_NEED_E;
  229. break;
  230. case LM_ST_DEFERRED:
  231. cb = LM_CB_NEED_D;
  232. break;
  233. case LM_ST_SHARED:
  234. cb = LM_CB_NEED_S;
  235. break;
  236. default:
  237. gdlm_assert(0, "unknown bast mode %u", bast_mode);
  238. }
  239. ls->fscb(ls->sdp, cb, &lp->lockname);
  240. }
  241. static void gdlm_bast(void *astarg, int mode)
  242. {
  243. struct gdlm_lock *lp = astarg;
  244. if (!mode) {
  245. printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n",
  246. lp->lockname.ln_type,
  247. (unsigned long long)lp->lockname.ln_number);
  248. return;
  249. }
  250. process_blocking(lp, mode);
  251. }
  252. /* convert gfs lock-state to dlm lock-mode */
  253. static s16 make_mode(s16 lmstate)
  254. {
  255. switch (lmstate) {
  256. case LM_ST_UNLOCKED:
  257. return DLM_LOCK_NL;
  258. case LM_ST_EXCLUSIVE:
  259. return DLM_LOCK_EX;
  260. case LM_ST_DEFERRED:
  261. return DLM_LOCK_CW;
  262. case LM_ST_SHARED:
  263. return DLM_LOCK_PR;
  264. }
  265. gdlm_assert(0, "unknown LM state %d", lmstate);
  266. return -1;
  267. }
  268. /* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and
  269. DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */
  270. static void check_cur_state(struct gdlm_lock *lp, unsigned int cur_state)
  271. {
  272. s16 cur = make_mode(cur_state);
  273. if (lp->cur != DLM_LOCK_IV)
  274. gdlm_assert(lp->cur == cur, "%d, %d", lp->cur, cur);
  275. }
  276. static inline unsigned int make_flags(struct gdlm_lock *lp,
  277. unsigned int gfs_flags,
  278. s16 cur, s16 req)
  279. {
  280. unsigned int lkf = 0;
  281. if (gfs_flags & LM_FLAG_TRY)
  282. lkf |= DLM_LKF_NOQUEUE;
  283. if (gfs_flags & LM_FLAG_TRY_1CB) {
  284. lkf |= DLM_LKF_NOQUEUE;
  285. lkf |= DLM_LKF_NOQUEUEBAST;
  286. }
  287. if (gfs_flags & LM_FLAG_PRIORITY) {
  288. lkf |= DLM_LKF_NOORDER;
  289. lkf |= DLM_LKF_HEADQUE;
  290. }
  291. if (gfs_flags & LM_FLAG_ANY) {
  292. if (req == DLM_LOCK_PR)
  293. lkf |= DLM_LKF_ALTCW;
  294. else if (req == DLM_LOCK_CW)
  295. lkf |= DLM_LKF_ALTPR;
  296. }
  297. if (lp->lksb.sb_lkid != 0) {
  298. lkf |= DLM_LKF_CONVERT;
  299. }
  300. if (lp->lvb)
  301. lkf |= DLM_LKF_VALBLK;
  302. return lkf;
  303. }
  304. /* make_strname - convert GFS lock numbers to a string */
  305. static inline void make_strname(const struct lm_lockname *lockname,
  306. struct gdlm_strname *str)
  307. {
  308. sprintf(str->name, "%8x%16llx", lockname->ln_type,
  309. (unsigned long long)lockname->ln_number);
  310. str->namelen = GDLM_STRNAME_BYTES;
  311. }
  312. static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
  313. struct gdlm_lock **lpp)
  314. {
  315. struct gdlm_lock *lp;
  316. lp = kzalloc(sizeof(struct gdlm_lock), GFP_NOFS);
  317. if (!lp)
  318. return -ENOMEM;
  319. lp->lockname = *name;
  320. make_strname(name, &lp->strname);
  321. lp->ls = ls;
  322. lp->cur = DLM_LOCK_IV;
  323. INIT_LIST_HEAD(&lp->delay_list);
  324. spin_lock(&ls->async_lock);
  325. list_add(&lp->all_list, &ls->all_locks);
  326. ls->all_locks_count++;
  327. spin_unlock(&ls->async_lock);
  328. *lpp = lp;
  329. return 0;
  330. }
  331. int gdlm_get_lock(void *lockspace, struct lm_lockname *name,
  332. void **lockp)
  333. {
  334. struct gdlm_lock *lp;
  335. int error;
  336. error = gdlm_create_lp(lockspace, name, &lp);
  337. *lockp = lp;
  338. return error;
  339. }
  340. void gdlm_put_lock(void *lock)
  341. {
  342. gdlm_delete_lp(lock);
  343. }
  344. unsigned int gdlm_do_lock(struct gdlm_lock *lp)
  345. {
  346. struct gdlm_ls *ls = lp->ls;
  347. int error, bast = 1;
  348. /*
  349. * When recovery is in progress, delay lock requests for submission
  350. * once recovery is done. Requests for recovery (NOEXP) and unlocks
  351. * can pass.
  352. */
  353. if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
  354. !test_bit(LFL_NOBLOCK, &lp->flags) && lp->req != DLM_LOCK_NL) {
  355. gdlm_queue_delayed(lp);
  356. return LM_OUT_ASYNC;
  357. }
  358. /*
  359. * Submit the actual lock request.
  360. */
  361. if (test_bit(LFL_NOBAST, &lp->flags))
  362. bast = 0;
  363. set_bit(LFL_ACTIVE, &lp->flags);
  364. log_debug("lk %x,%llx id %x %d,%d %x", lp->lockname.ln_type,
  365. (unsigned long long)lp->lockname.ln_number, lp->lksb.sb_lkid,
  366. lp->cur, lp->req, lp->lkf);
  367. error = dlm_lock(ls->dlm_lockspace, lp->req, &lp->lksb, lp->lkf,
  368. lp->strname.name, lp->strname.namelen, 0, gdlm_ast,
  369. lp, bast ? gdlm_bast : NULL);
  370. if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) {
  371. lp->lksb.sb_status = -EAGAIN;
  372. gdlm_ast(lp);
  373. error = 0;
  374. }
  375. if (error) {
  376. log_error("%s: gdlm_lock %x,%llx err=%d cur=%d req=%d lkf=%x "
  377. "flags=%lx", ls->fsname, lp->lockname.ln_type,
  378. (unsigned long long)lp->lockname.ln_number, error,
  379. lp->cur, lp->req, lp->lkf, lp->flags);
  380. return LM_OUT_ERROR;
  381. }
  382. return LM_OUT_ASYNC;
  383. }
  384. static unsigned int gdlm_do_unlock(struct gdlm_lock *lp)
  385. {
  386. struct gdlm_ls *ls = lp->ls;
  387. unsigned int lkf = 0;
  388. int error;
  389. set_bit(LFL_DLM_UNLOCK, &lp->flags);
  390. set_bit(LFL_ACTIVE, &lp->flags);
  391. if (lp->lvb)
  392. lkf = DLM_LKF_VALBLK;
  393. log_debug("un %x,%llx %x %d %x", lp->lockname.ln_type,
  394. (unsigned long long)lp->lockname.ln_number,
  395. lp->lksb.sb_lkid, lp->cur, lkf);
  396. error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, lkf, NULL, lp);
  397. if (error) {
  398. log_error("%s: gdlm_unlock %x,%llx err=%d cur=%d req=%d lkf=%x "
  399. "flags=%lx", ls->fsname, lp->lockname.ln_type,
  400. (unsigned long long)lp->lockname.ln_number, error,
  401. lp->cur, lp->req, lp->lkf, lp->flags);
  402. return LM_OUT_ERROR;
  403. }
  404. return LM_OUT_ASYNC;
  405. }
  406. unsigned int gdlm_lock(void *lock, unsigned int cur_state,
  407. unsigned int req_state, unsigned int flags)
  408. {
  409. struct gdlm_lock *lp = lock;
  410. if (req_state == LM_ST_UNLOCKED)
  411. return gdlm_unlock(lock, cur_state);
  412. if (req_state == LM_ST_UNLOCKED)
  413. return gdlm_unlock(lock, cur_state);
  414. clear_bit(LFL_DLM_CANCEL, &lp->flags);
  415. if (flags & LM_FLAG_NOEXP)
  416. set_bit(LFL_NOBLOCK, &lp->flags);
  417. check_cur_state(lp, cur_state);
  418. lp->req = make_mode(req_state);
  419. lp->lkf = make_flags(lp, flags, lp->cur, lp->req);
  420. return gdlm_do_lock(lp);
  421. }
  422. unsigned int gdlm_unlock(void *lock, unsigned int cur_state)
  423. {
  424. struct gdlm_lock *lp = lock;
  425. clear_bit(LFL_DLM_CANCEL, &lp->flags);
  426. if (lp->cur == DLM_LOCK_IV)
  427. return 0;
  428. return gdlm_do_unlock(lp);
  429. }
  430. void gdlm_cancel(void *lock)
  431. {
  432. struct gdlm_lock *lp = lock;
  433. struct gdlm_ls *ls = lp->ls;
  434. int error, delay_list = 0;
  435. if (test_bit(LFL_DLM_CANCEL, &lp->flags))
  436. return;
  437. log_info("gdlm_cancel %x,%llx flags %lx", lp->lockname.ln_type,
  438. (unsigned long long)lp->lockname.ln_number, lp->flags);
  439. spin_lock(&ls->async_lock);
  440. if (!list_empty(&lp->delay_list)) {
  441. list_del_init(&lp->delay_list);
  442. delay_list = 1;
  443. }
  444. spin_unlock(&ls->async_lock);
  445. if (delay_list) {
  446. set_bit(LFL_CANCEL, &lp->flags);
  447. set_bit(LFL_ACTIVE, &lp->flags);
  448. gdlm_ast(lp);
  449. return;
  450. }
  451. if (!test_bit(LFL_ACTIVE, &lp->flags) ||
  452. test_bit(LFL_DLM_UNLOCK, &lp->flags)) {
  453. log_info("gdlm_cancel skip %x,%llx flags %lx",
  454. lp->lockname.ln_type,
  455. (unsigned long long)lp->lockname.ln_number, lp->flags);
  456. return;
  457. }
  458. /* the lock is blocked in the dlm */
  459. set_bit(LFL_DLM_CANCEL, &lp->flags);
  460. set_bit(LFL_ACTIVE, &lp->flags);
  461. error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, DLM_LKF_CANCEL,
  462. NULL, lp);
  463. log_info("gdlm_cancel rv %d %x,%llx flags %lx", error,
  464. lp->lockname.ln_type,
  465. (unsigned long long)lp->lockname.ln_number, lp->flags);
  466. if (error == -EBUSY)
  467. clear_bit(LFL_DLM_CANCEL, &lp->flags);
  468. }
  469. static int gdlm_add_lvb(struct gdlm_lock *lp)
  470. {
  471. char *lvb;
  472. lvb = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
  473. if (!lvb)
  474. return -ENOMEM;
  475. lp->lksb.sb_lvbptr = lvb;
  476. lp->lvb = lvb;
  477. return 0;
  478. }
  479. static void gdlm_del_lvb(struct gdlm_lock *lp)
  480. {
  481. kfree(lp->lvb);
  482. lp->lvb = NULL;
  483. lp->lksb.sb_lvbptr = NULL;
  484. }
  485. static int gdlm_ast_wait(void *word)
  486. {
  487. schedule();
  488. return 0;
  489. }
  490. /* This can do a synchronous dlm request (requiring a lock_dlm thread to get
  491. the completion) because gfs won't call hold_lvb() during a callback (from
  492. the context of a lock_dlm thread). */
  493. static int hold_null_lock(struct gdlm_lock *lp)
  494. {
  495. struct gdlm_lock *lpn = NULL;
  496. int error;
  497. if (lp->hold_null) {
  498. printk(KERN_INFO "lock_dlm: lvb already held\n");
  499. return 0;
  500. }
  501. error = gdlm_create_lp(lp->ls, &lp->lockname, &lpn);
  502. if (error)
  503. goto out;
  504. lpn->lksb.sb_lvbptr = junk_lvb;
  505. lpn->lvb = junk_lvb;
  506. lpn->req = DLM_LOCK_NL;
  507. lpn->lkf = DLM_LKF_VALBLK | DLM_LKF_EXPEDITE;
  508. set_bit(LFL_NOBAST, &lpn->flags);
  509. set_bit(LFL_INLOCK, &lpn->flags);
  510. set_bit(LFL_AST_WAIT, &lpn->flags);
  511. gdlm_do_lock(lpn);
  512. wait_on_bit(&lpn->flags, LFL_AST_WAIT, gdlm_ast_wait, TASK_UNINTERRUPTIBLE);
  513. error = lpn->lksb.sb_status;
  514. if (error) {
  515. printk(KERN_INFO "lock_dlm: hold_null_lock dlm error %d\n",
  516. error);
  517. gdlm_delete_lp(lpn);
  518. lpn = NULL;
  519. }
  520. out:
  521. lp->hold_null = lpn;
  522. return error;
  523. }
  524. /* This cannot do a synchronous dlm request (requiring a lock_dlm thread to get
  525. the completion) because gfs may call unhold_lvb() during a callback (from
  526. the context of a lock_dlm thread) which could cause a deadlock since the
  527. other lock_dlm thread could be engaged in recovery. */
  528. static void unhold_null_lock(struct gdlm_lock *lp)
  529. {
  530. struct gdlm_lock *lpn = lp->hold_null;
  531. gdlm_assert(lpn, "%x,%llx", lp->lockname.ln_type,
  532. (unsigned long long)lp->lockname.ln_number);
  533. lpn->lksb.sb_lvbptr = NULL;
  534. lpn->lvb = NULL;
  535. set_bit(LFL_UNLOCK_DELETE, &lpn->flags);
  536. gdlm_do_unlock(lpn);
  537. lp->hold_null = NULL;
  538. }
  539. /* Acquire a NL lock because gfs requires the value block to remain
  540. intact on the resource while the lvb is "held" even if it's holding no locks
  541. on the resource. */
  542. int gdlm_hold_lvb(void *lock, char **lvbp)
  543. {
  544. struct gdlm_lock *lp = lock;
  545. int error;
  546. error = gdlm_add_lvb(lp);
  547. if (error)
  548. return error;
  549. *lvbp = lp->lvb;
  550. error = hold_null_lock(lp);
  551. if (error)
  552. gdlm_del_lvb(lp);
  553. return error;
  554. }
  555. void gdlm_unhold_lvb(void *lock, char *lvb)
  556. {
  557. struct gdlm_lock *lp = lock;
  558. unhold_null_lock(lp);
  559. gdlm_del_lvb(lp);
  560. }
  561. void gdlm_submit_delayed(struct gdlm_ls *ls)
  562. {
  563. struct gdlm_lock *lp, *safe;
  564. spin_lock(&ls->async_lock);
  565. list_for_each_entry_safe(lp, safe, &ls->delayed, delay_list) {
  566. list_del_init(&lp->delay_list);
  567. list_add_tail(&lp->delay_list, &ls->submit);
  568. }
  569. spin_unlock(&ls->async_lock);
  570. wake_up(&ls->thread_wait);
  571. }
  572. int gdlm_release_all_locks(struct gdlm_ls *ls)
  573. {
  574. struct gdlm_lock *lp, *safe;
  575. int count = 0;
  576. spin_lock(&ls->async_lock);
  577. list_for_each_entry_safe(lp, safe, &ls->all_locks, all_list) {
  578. list_del_init(&lp->all_list);
  579. if (lp->lvb && lp->lvb != junk_lvb)
  580. kfree(lp->lvb);
  581. kfree(lp);
  582. count++;
  583. }
  584. spin_unlock(&ls->async_lock);
  585. return count;
  586. }