recover.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851
  1. /******************************************************************************
  2. *******************************************************************************
  3. **
  4. ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  5. ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
  6. **
  7. ** This copyrighted material is made available to anyone wishing to use,
  8. ** modify, copy, or redistribute it subject to the terms and conditions
  9. ** of the GNU General Public License v.2.
  10. **
  11. *******************************************************************************
  12. ******************************************************************************/
  13. #include "dlm_internal.h"
  14. #include "lockspace.h"
  15. #include "dir.h"
  16. #include "config.h"
  17. #include "ast.h"
  18. #include "memory.h"
  19. #include "rcom.h"
  20. #include "lock.h"
  21. #include "lowcomms.h"
  22. #include "member.h"
  23. #include "recover.h"
  24. /*
  25. * Recovery waiting routines: these functions wait for a particular reply from
  26. * a remote node, or for the remote node to report a certain status. They need
  27. * to abort if the lockspace is stopped indicating a node has failed (perhaps
  28. * the one being waited for).
  29. */
  30. /*
  31. * Wait until given function returns non-zero or lockspace is stopped
  32. * (LS_RECOVERY_STOP set due to failure of a node in ls_nodes). When another
  33. * function thinks it could have completed the waited-on task, they should wake
  34. * up ls_wait_general to get an immediate response rather than waiting for the
  35. * timer to detect the result. A timer wakes us up periodically while waiting
  36. * to see if we should abort due to a node failure. This should only be called
  37. * by the dlm_recoverd thread.
  38. */
  39. static void dlm_wait_timer_fn(unsigned long data)
  40. {
  41. struct dlm_ls *ls = (struct dlm_ls *) data;
  42. mod_timer(&ls->ls_timer, jiffies + (dlm_config.ci_recover_timer * HZ));
  43. wake_up(&ls->ls_wait_general);
  44. }
  45. int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls))
  46. {
  47. int error = 0;
  48. init_timer(&ls->ls_timer);
  49. ls->ls_timer.function = dlm_wait_timer_fn;
  50. ls->ls_timer.data = (long) ls;
  51. ls->ls_timer.expires = jiffies + (dlm_config.ci_recover_timer * HZ);
  52. add_timer(&ls->ls_timer);
  53. wait_event(ls->ls_wait_general, testfn(ls) || dlm_recovery_stopped(ls));
  54. del_timer_sync(&ls->ls_timer);
  55. if (dlm_recovery_stopped(ls)) {
  56. log_debug(ls, "dlm_wait_function aborted");
  57. error = -EINTR;
  58. }
  59. return error;
  60. }
  61. /*
  62. * An efficient way for all nodes to wait for all others to have a certain
  63. * status. The node with the lowest nodeid polls all the others for their
  64. * status (wait_status_all) and all the others poll the node with the low id
  65. * for its accumulated result (wait_status_low). When all nodes have set
  66. * status flag X, then status flag X_ALL will be set on the low nodeid.
  67. */
  68. uint32_t dlm_recover_status(struct dlm_ls *ls)
  69. {
  70. uint32_t status;
  71. spin_lock(&ls->ls_recover_lock);
  72. status = ls->ls_recover_status;
  73. spin_unlock(&ls->ls_recover_lock);
  74. return status;
  75. }
  76. static void _set_recover_status(struct dlm_ls *ls, uint32_t status)
  77. {
  78. ls->ls_recover_status |= status;
  79. }
  80. void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status)
  81. {
  82. spin_lock(&ls->ls_recover_lock);
  83. _set_recover_status(ls, status);
  84. spin_unlock(&ls->ls_recover_lock);
  85. }
  86. static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status,
  87. int save_slots)
  88. {
  89. struct dlm_rcom *rc = ls->ls_recover_buf;
  90. struct dlm_member *memb;
  91. int error = 0, delay;
  92. list_for_each_entry(memb, &ls->ls_nodes, list) {
  93. delay = 0;
  94. for (;;) {
  95. if (dlm_recovery_stopped(ls)) {
  96. error = -EINTR;
  97. goto out;
  98. }
  99. error = dlm_rcom_status(ls, memb->nodeid, 0);
  100. if (error)
  101. goto out;
  102. if (save_slots)
  103. dlm_slot_save(ls, rc, memb);
  104. if (rc->rc_result & wait_status)
  105. break;
  106. if (delay < 1000)
  107. delay += 20;
  108. msleep(delay);
  109. }
  110. }
  111. out:
  112. return error;
  113. }
  114. static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status,
  115. uint32_t status_flags)
  116. {
  117. struct dlm_rcom *rc = ls->ls_recover_buf;
  118. int error = 0, delay = 0, nodeid = ls->ls_low_nodeid;
  119. for (;;) {
  120. if (dlm_recovery_stopped(ls)) {
  121. error = -EINTR;
  122. goto out;
  123. }
  124. error = dlm_rcom_status(ls, nodeid, status_flags);
  125. if (error)
  126. break;
  127. if (rc->rc_result & wait_status)
  128. break;
  129. if (delay < 1000)
  130. delay += 20;
  131. msleep(delay);
  132. }
  133. out:
  134. return error;
  135. }
  136. static int wait_status(struct dlm_ls *ls, uint32_t status)
  137. {
  138. uint32_t status_all = status << 1;
  139. int error;
  140. if (ls->ls_low_nodeid == dlm_our_nodeid()) {
  141. error = wait_status_all(ls, status, 0);
  142. if (!error)
  143. dlm_set_recover_status(ls, status_all);
  144. } else
  145. error = wait_status_low(ls, status_all, 0);
  146. return error;
  147. }
  148. int dlm_recover_members_wait(struct dlm_ls *ls)
  149. {
  150. struct dlm_member *memb;
  151. struct dlm_slot *slots;
  152. int num_slots, slots_size;
  153. int error, rv;
  154. uint32_t gen;
  155. list_for_each_entry(memb, &ls->ls_nodes, list) {
  156. memb->slot = -1;
  157. memb->generation = 0;
  158. }
  159. if (ls->ls_low_nodeid == dlm_our_nodeid()) {
  160. error = wait_status_all(ls, DLM_RS_NODES, 1);
  161. if (error)
  162. goto out;
  163. /* slots array is sparse, slots_size may be > num_slots */
  164. rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen);
  165. if (!rv) {
  166. spin_lock(&ls->ls_recover_lock);
  167. _set_recover_status(ls, DLM_RS_NODES_ALL);
  168. ls->ls_num_slots = num_slots;
  169. ls->ls_slots_size = slots_size;
  170. ls->ls_slots = slots;
  171. ls->ls_generation = gen;
  172. spin_unlock(&ls->ls_recover_lock);
  173. } else {
  174. dlm_set_recover_status(ls, DLM_RS_NODES_ALL);
  175. }
  176. } else {
  177. error = wait_status_low(ls, DLM_RS_NODES_ALL, DLM_RSF_NEED_SLOTS);
  178. if (error)
  179. goto out;
  180. dlm_slots_copy_in(ls);
  181. }
  182. out:
  183. return error;
  184. }
  185. int dlm_recover_directory_wait(struct dlm_ls *ls)
  186. {
  187. return wait_status(ls, DLM_RS_DIR);
  188. }
  189. int dlm_recover_locks_wait(struct dlm_ls *ls)
  190. {
  191. return wait_status(ls, DLM_RS_LOCKS);
  192. }
  193. int dlm_recover_done_wait(struct dlm_ls *ls)
  194. {
  195. return wait_status(ls, DLM_RS_DONE);
  196. }
  197. /*
  198. * The recover_list contains all the rsb's for which we've requested the new
  199. * master nodeid. As replies are returned from the resource directories the
  200. * rsb's are removed from the list. When the list is empty we're done.
  201. *
  202. * The recover_list is later similarly used for all rsb's for which we've sent
  203. * new lkb's and need to receive new corresponding lkid's.
  204. *
  205. * We use the address of the rsb struct as a simple local identifier for the
  206. * rsb so we can match an rcom reply with the rsb it was sent for.
  207. */
  208. static int recover_list_empty(struct dlm_ls *ls)
  209. {
  210. int empty;
  211. spin_lock(&ls->ls_recover_list_lock);
  212. empty = list_empty(&ls->ls_recover_list);
  213. spin_unlock(&ls->ls_recover_list_lock);
  214. return empty;
  215. }
  216. static void recover_list_add(struct dlm_rsb *r)
  217. {
  218. struct dlm_ls *ls = r->res_ls;
  219. spin_lock(&ls->ls_recover_list_lock);
  220. if (list_empty(&r->res_recover_list)) {
  221. list_add_tail(&r->res_recover_list, &ls->ls_recover_list);
  222. ls->ls_recover_list_count++;
  223. dlm_hold_rsb(r);
  224. }
  225. spin_unlock(&ls->ls_recover_list_lock);
  226. }
  227. static void recover_list_del(struct dlm_rsb *r)
  228. {
  229. struct dlm_ls *ls = r->res_ls;
  230. spin_lock(&ls->ls_recover_list_lock);
  231. list_del_init(&r->res_recover_list);
  232. ls->ls_recover_list_count--;
  233. spin_unlock(&ls->ls_recover_list_lock);
  234. dlm_put_rsb(r);
  235. }
  236. static struct dlm_rsb *recover_list_find(struct dlm_ls *ls, uint64_t id)
  237. {
  238. struct dlm_rsb *r = NULL;
  239. spin_lock(&ls->ls_recover_list_lock);
  240. list_for_each_entry(r, &ls->ls_recover_list, res_recover_list) {
  241. if (id == (unsigned long) r)
  242. goto out;
  243. }
  244. r = NULL;
  245. out:
  246. spin_unlock(&ls->ls_recover_list_lock);
  247. return r;
  248. }
  249. static void recover_list_clear(struct dlm_ls *ls)
  250. {
  251. struct dlm_rsb *r, *s;
  252. spin_lock(&ls->ls_recover_list_lock);
  253. list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) {
  254. list_del_init(&r->res_recover_list);
  255. r->res_recover_locks_count = 0;
  256. dlm_put_rsb(r);
  257. ls->ls_recover_list_count--;
  258. }
  259. if (ls->ls_recover_list_count != 0) {
  260. log_error(ls, "warning: recover_list_count %d",
  261. ls->ls_recover_list_count);
  262. ls->ls_recover_list_count = 0;
  263. }
  264. spin_unlock(&ls->ls_recover_list_lock);
  265. }
  266. /* Master recovery: find new master node for rsb's that were
  267. mastered on nodes that have been removed.
  268. dlm_recover_masters
  269. recover_master
  270. dlm_send_rcom_lookup -> receive_rcom_lookup
  271. dlm_dir_lookup
  272. receive_rcom_lookup_reply <-
  273. dlm_recover_master_reply
  274. set_new_master
  275. set_master_lkbs
  276. set_lock_master
  277. */
  278. /*
  279. * Set the lock master for all LKBs in a lock queue
  280. * If we are the new master of the rsb, we may have received new
  281. * MSTCPY locks from other nodes already which we need to ignore
  282. * when setting the new nodeid.
  283. */
  284. static void set_lock_master(struct list_head *queue, int nodeid)
  285. {
  286. struct dlm_lkb *lkb;
  287. list_for_each_entry(lkb, queue, lkb_statequeue) {
  288. if (!(lkb->lkb_flags & DLM_IFL_MSTCPY)) {
  289. lkb->lkb_nodeid = nodeid;
  290. lkb->lkb_remid = 0;
  291. }
  292. }
  293. }
  294. static void set_master_lkbs(struct dlm_rsb *r)
  295. {
  296. set_lock_master(&r->res_grantqueue, r->res_nodeid);
  297. set_lock_master(&r->res_convertqueue, r->res_nodeid);
  298. set_lock_master(&r->res_waitqueue, r->res_nodeid);
  299. }
  300. /*
  301. * Propagate the new master nodeid to locks
  302. * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider.
  303. * The NEW_MASTER2 flag tells recover_lvb() and recover_grant() which
  304. * rsb's to consider.
  305. */
  306. static void set_new_master(struct dlm_rsb *r, int nodeid)
  307. {
  308. r->res_nodeid = nodeid;
  309. set_master_lkbs(r);
  310. rsb_set_flag(r, RSB_NEW_MASTER);
  311. rsb_set_flag(r, RSB_NEW_MASTER2);
  312. }
  313. /*
  314. * We do async lookups on rsb's that need new masters. The rsb's
  315. * waiting for a lookup reply are kept on the recover_list.
  316. */
  317. static int recover_master(struct dlm_rsb *r)
  318. {
  319. struct dlm_ls *ls = r->res_ls;
  320. int error, ret_nodeid;
  321. int our_nodeid = dlm_our_nodeid();
  322. int dir_nodeid = dlm_dir_nodeid(r);
  323. if (dir_nodeid == our_nodeid) {
  324. error = dlm_dir_lookup(ls, our_nodeid, r->res_name,
  325. r->res_length, &ret_nodeid);
  326. if (error)
  327. log_error(ls, "recover dir lookup error %d", error);
  328. if (ret_nodeid == our_nodeid)
  329. ret_nodeid = 0;
  330. lock_rsb(r);
  331. set_new_master(r, ret_nodeid);
  332. unlock_rsb(r);
  333. } else {
  334. recover_list_add(r);
  335. error = dlm_send_rcom_lookup(r, dir_nodeid);
  336. }
  337. return error;
  338. }
  339. /*
  340. * All MSTCPY locks are purged and rebuilt, even if the master stayed the same.
  341. * This is necessary because recovery can be started, aborted and restarted,
  342. * causing the master nodeid to briefly change during the aborted recovery, and
  343. * change back to the original value in the second recovery. The MSTCPY locks
  344. * may or may not have been purged during the aborted recovery. Another node
  345. * with an outstanding request in waiters list and a request reply saved in the
  346. * requestqueue, cannot know whether it should ignore the reply and resend the
  347. * request, or accept the reply and complete the request. It must do the
  348. * former if the remote node purged MSTCPY locks, and it must do the later if
  349. * the remote node did not. This is solved by always purging MSTCPY locks, in
  350. * which case, the request reply would always be ignored and the request
  351. * resent.
  352. */
  353. static int recover_master_static(struct dlm_rsb *r)
  354. {
  355. int dir_nodeid = dlm_dir_nodeid(r);
  356. int new_master = dir_nodeid;
  357. if (dir_nodeid == dlm_our_nodeid())
  358. new_master = 0;
  359. lock_rsb(r);
  360. dlm_purge_mstcpy_locks(r);
  361. set_new_master(r, new_master);
  362. unlock_rsb(r);
  363. return 1;
  364. }
  365. /*
  366. * Go through local root resources and for each rsb which has a master which
  367. * has departed, get the new master nodeid from the directory. The dir will
  368. * assign mastery to the first node to look up the new master. That means
  369. * we'll discover in this lookup if we're the new master of any rsb's.
  370. *
  371. * We fire off all the dir lookup requests individually and asynchronously to
  372. * the correct dir node.
  373. */
  374. int dlm_recover_masters(struct dlm_ls *ls)
  375. {
  376. struct dlm_rsb *r;
  377. int error = 0, count = 0;
  378. log_debug(ls, "dlm_recover_masters");
  379. down_read(&ls->ls_root_sem);
  380. list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
  381. if (dlm_recovery_stopped(ls)) {
  382. up_read(&ls->ls_root_sem);
  383. error = -EINTR;
  384. goto out;
  385. }
  386. if (dlm_no_directory(ls))
  387. count += recover_master_static(r);
  388. else if (!is_master(r) &&
  389. (dlm_is_removed(ls, r->res_nodeid) ||
  390. rsb_flag(r, RSB_NEW_MASTER))) {
  391. recover_master(r);
  392. count++;
  393. }
  394. schedule();
  395. }
  396. up_read(&ls->ls_root_sem);
  397. log_debug(ls, "dlm_recover_masters %d resources", count);
  398. error = dlm_wait_function(ls, &recover_list_empty);
  399. out:
  400. if (error)
  401. recover_list_clear(ls);
  402. return error;
  403. }
  404. int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc)
  405. {
  406. struct dlm_rsb *r;
  407. int nodeid;
  408. r = recover_list_find(ls, rc->rc_id);
  409. if (!r) {
  410. log_error(ls, "dlm_recover_master_reply no id %llx",
  411. (unsigned long long)rc->rc_id);
  412. goto out;
  413. }
  414. nodeid = rc->rc_result;
  415. if (nodeid == dlm_our_nodeid())
  416. nodeid = 0;
  417. lock_rsb(r);
  418. set_new_master(r, nodeid);
  419. unlock_rsb(r);
  420. recover_list_del(r);
  421. if (recover_list_empty(ls))
  422. wake_up(&ls->ls_wait_general);
  423. out:
  424. return 0;
  425. }
  426. /* Lock recovery: rebuild the process-copy locks we hold on a
  427. remastered rsb on the new rsb master.
  428. dlm_recover_locks
  429. recover_locks
  430. recover_locks_queue
  431. dlm_send_rcom_lock -> receive_rcom_lock
  432. dlm_recover_master_copy
  433. receive_rcom_lock_reply <-
  434. dlm_recover_process_copy
  435. */
  436. /*
  437. * keep a count of the number of lkb's we send to the new master; when we get
  438. * an equal number of replies then recovery for the rsb is done
  439. */
  440. static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head)
  441. {
  442. struct dlm_lkb *lkb;
  443. int error = 0;
  444. list_for_each_entry(lkb, head, lkb_statequeue) {
  445. error = dlm_send_rcom_lock(r, lkb);
  446. if (error)
  447. break;
  448. r->res_recover_locks_count++;
  449. }
  450. return error;
  451. }
  452. static int recover_locks(struct dlm_rsb *r)
  453. {
  454. int error = 0;
  455. lock_rsb(r);
  456. DLM_ASSERT(!r->res_recover_locks_count, dlm_dump_rsb(r););
  457. error = recover_locks_queue(r, &r->res_grantqueue);
  458. if (error)
  459. goto out;
  460. error = recover_locks_queue(r, &r->res_convertqueue);
  461. if (error)
  462. goto out;
  463. error = recover_locks_queue(r, &r->res_waitqueue);
  464. if (error)
  465. goto out;
  466. if (r->res_recover_locks_count)
  467. recover_list_add(r);
  468. else
  469. rsb_clear_flag(r, RSB_NEW_MASTER);
  470. out:
  471. unlock_rsb(r);
  472. return error;
  473. }
  474. int dlm_recover_locks(struct dlm_ls *ls)
  475. {
  476. struct dlm_rsb *r;
  477. int error, count = 0;
  478. down_read(&ls->ls_root_sem);
  479. list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
  480. if (is_master(r)) {
  481. rsb_clear_flag(r, RSB_NEW_MASTER);
  482. continue;
  483. }
  484. if (!rsb_flag(r, RSB_NEW_MASTER))
  485. continue;
  486. if (dlm_recovery_stopped(ls)) {
  487. error = -EINTR;
  488. up_read(&ls->ls_root_sem);
  489. goto out;
  490. }
  491. error = recover_locks(r);
  492. if (error) {
  493. up_read(&ls->ls_root_sem);
  494. goto out;
  495. }
  496. count += r->res_recover_locks_count;
  497. }
  498. up_read(&ls->ls_root_sem);
  499. log_debug(ls, "dlm_recover_locks %d out", count);
  500. error = dlm_wait_function(ls, &recover_list_empty);
  501. out:
  502. if (error)
  503. recover_list_clear(ls);
  504. return error;
  505. }
  506. void dlm_recovered_lock(struct dlm_rsb *r)
  507. {
  508. DLM_ASSERT(rsb_flag(r, RSB_NEW_MASTER), dlm_dump_rsb(r););
  509. r->res_recover_locks_count--;
  510. if (!r->res_recover_locks_count) {
  511. rsb_clear_flag(r, RSB_NEW_MASTER);
  512. recover_list_del(r);
  513. }
  514. if (recover_list_empty(r->res_ls))
  515. wake_up(&r->res_ls->ls_wait_general);
  516. }
  517. /*
  518. * The lvb needs to be recovered on all master rsb's. This includes setting
  519. * the VALNOTVALID flag if necessary, and determining the correct lvb contents
  520. * based on the lvb's of the locks held on the rsb.
  521. *
  522. * RSB_VALNOTVALID is set if there are only NL/CR locks on the rsb. If it
  523. * was already set prior to recovery, it's not cleared, regardless of locks.
  524. *
  525. * The LVB contents are only considered for changing when this is a new master
  526. * of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with
  527. * mode > CR. If no lkb's exist with mode above CR, the lvb contents are taken
  528. * from the lkb with the largest lvb sequence number.
  529. */
  530. static void recover_lvb(struct dlm_rsb *r)
  531. {
  532. struct dlm_lkb *lkb, *high_lkb = NULL;
  533. uint32_t high_seq = 0;
  534. int lock_lvb_exists = 0;
  535. int big_lock_exists = 0;
  536. int lvblen = r->res_ls->ls_lvblen;
  537. list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
  538. if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
  539. continue;
  540. lock_lvb_exists = 1;
  541. if (lkb->lkb_grmode > DLM_LOCK_CR) {
  542. big_lock_exists = 1;
  543. goto setflag;
  544. }
  545. if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
  546. high_lkb = lkb;
  547. high_seq = lkb->lkb_lvbseq;
  548. }
  549. }
  550. list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
  551. if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
  552. continue;
  553. lock_lvb_exists = 1;
  554. if (lkb->lkb_grmode > DLM_LOCK_CR) {
  555. big_lock_exists = 1;
  556. goto setflag;
  557. }
  558. if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
  559. high_lkb = lkb;
  560. high_seq = lkb->lkb_lvbseq;
  561. }
  562. }
  563. setflag:
  564. if (!lock_lvb_exists)
  565. goto out;
  566. if (!big_lock_exists)
  567. rsb_set_flag(r, RSB_VALNOTVALID);
  568. /* don't mess with the lvb unless we're the new master */
  569. if (!rsb_flag(r, RSB_NEW_MASTER2))
  570. goto out;
  571. if (!r->res_lvbptr) {
  572. r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
  573. if (!r->res_lvbptr)
  574. goto out;
  575. }
  576. if (big_lock_exists) {
  577. r->res_lvbseq = lkb->lkb_lvbseq;
  578. memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen);
  579. } else if (high_lkb) {
  580. r->res_lvbseq = high_lkb->lkb_lvbseq;
  581. memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen);
  582. } else {
  583. r->res_lvbseq = 0;
  584. memset(r->res_lvbptr, 0, lvblen);
  585. }
  586. out:
  587. return;
  588. }
  589. /* All master rsb's flagged RECOVER_CONVERT need to be looked at. The locks
  590. converting PR->CW or CW->PR need to have their lkb_grmode set. */
  591. static void recover_conversion(struct dlm_rsb *r)
  592. {
  593. struct dlm_lkb *lkb;
  594. int grmode = -1;
  595. list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
  596. if (lkb->lkb_grmode == DLM_LOCK_PR ||
  597. lkb->lkb_grmode == DLM_LOCK_CW) {
  598. grmode = lkb->lkb_grmode;
  599. break;
  600. }
  601. }
  602. list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
  603. if (lkb->lkb_grmode != DLM_LOCK_IV)
  604. continue;
  605. if (grmode == -1)
  606. lkb->lkb_grmode = lkb->lkb_rqmode;
  607. else
  608. lkb->lkb_grmode = grmode;
  609. }
  610. }
  611. /* We've become the new master for this rsb and waiting/converting locks may
  612. need to be granted in dlm_recover_grant() due to locks that may have
  613. existed from a removed node. */
  614. static void recover_grant(struct dlm_rsb *r)
  615. {
  616. if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
  617. rsb_set_flag(r, RSB_RECOVER_GRANT);
  618. }
  619. void dlm_recover_rsbs(struct dlm_ls *ls)
  620. {
  621. struct dlm_rsb *r;
  622. unsigned int count = 0;
  623. down_read(&ls->ls_root_sem);
  624. list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
  625. lock_rsb(r);
  626. if (is_master(r)) {
  627. if (rsb_flag(r, RSB_RECOVER_CONVERT))
  628. recover_conversion(r);
  629. if (rsb_flag(r, RSB_NEW_MASTER2))
  630. recover_grant(r);
  631. recover_lvb(r);
  632. count++;
  633. }
  634. rsb_clear_flag(r, RSB_RECOVER_CONVERT);
  635. rsb_clear_flag(r, RSB_NEW_MASTER2);
  636. unlock_rsb(r);
  637. }
  638. up_read(&ls->ls_root_sem);
  639. if (count)
  640. log_debug(ls, "dlm_recover_rsbs %d done", count);
  641. }
  642. /* Create a single list of all root rsb's to be used during recovery */
  643. int dlm_create_root_list(struct dlm_ls *ls)
  644. {
  645. struct rb_node *n;
  646. struct dlm_rsb *r;
  647. int i, error = 0;
  648. down_write(&ls->ls_root_sem);
  649. if (!list_empty(&ls->ls_root_list)) {
  650. log_error(ls, "root list not empty");
  651. error = -EINVAL;
  652. goto out;
  653. }
  654. for (i = 0; i < ls->ls_rsbtbl_size; i++) {
  655. spin_lock(&ls->ls_rsbtbl[i].lock);
  656. for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
  657. r = rb_entry(n, struct dlm_rsb, res_hashnode);
  658. list_add(&r->res_root_list, &ls->ls_root_list);
  659. dlm_hold_rsb(r);
  660. }
  661. /* If we're using a directory, add tossed rsbs to the root
  662. list; they'll have entries created in the new directory,
  663. but no other recovery steps should do anything with them. */
  664. if (dlm_no_directory(ls)) {
  665. spin_unlock(&ls->ls_rsbtbl[i].lock);
  666. continue;
  667. }
  668. for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = rb_next(n)) {
  669. r = rb_entry(n, struct dlm_rsb, res_hashnode);
  670. list_add(&r->res_root_list, &ls->ls_root_list);
  671. dlm_hold_rsb(r);
  672. }
  673. spin_unlock(&ls->ls_rsbtbl[i].lock);
  674. }
  675. out:
  676. up_write(&ls->ls_root_sem);
  677. return error;
  678. }
  679. void dlm_release_root_list(struct dlm_ls *ls)
  680. {
  681. struct dlm_rsb *r, *safe;
  682. down_write(&ls->ls_root_sem);
  683. list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) {
  684. list_del_init(&r->res_root_list);
  685. dlm_put_rsb(r);
  686. }
  687. up_write(&ls->ls_root_sem);
  688. }
  689. /* If not using a directory, clear the entire toss list, there's no benefit to
  690. caching the master value since it's fixed. If we are using a dir, keep the
  691. rsb's we're the master of. Recovery will add them to the root list and from
  692. there they'll be entered in the rebuilt directory. */
  693. void dlm_clear_toss_list(struct dlm_ls *ls)
  694. {
  695. struct rb_node *n, *next;
  696. struct dlm_rsb *rsb;
  697. int i;
  698. for (i = 0; i < ls->ls_rsbtbl_size; i++) {
  699. spin_lock(&ls->ls_rsbtbl[i].lock);
  700. for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) {
  701. next = rb_next(n);;
  702. rsb = rb_entry(n, struct dlm_rsb, res_hashnode);
  703. if (dlm_no_directory(ls) || !is_master(rsb)) {
  704. rb_erase(n, &ls->ls_rsbtbl[i].toss);
  705. dlm_free_rsb(rsb);
  706. }
  707. }
  708. spin_unlock(&ls->ls_rsbtbl[i].lock);
  709. }
  710. }