dlmthread.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695
  1. /* -*- mode: c; c-basic-offset: 8; -*-
  2. * vim: noexpandtab sw=8 ts=8 sts=0:
  3. *
  4. * dlmthread.c
  5. *
  6. * standalone DLM module
  7. *
  8. * Copyright (C) 2004 Oracle. All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2 of the License, or (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public
  21. * License along with this program; if not, write to the
  22. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  23. * Boston, MA 021110-1307, USA.
  24. *
  25. */
  26. #include <linux/module.h>
  27. #include <linux/fs.h>
  28. #include <linux/types.h>
  29. #include <linux/slab.h>
  30. #include <linux/highmem.h>
  31. #include <linux/utsname.h>
  32. #include <linux/init.h>
  33. #include <linux/sysctl.h>
  34. #include <linux/random.h>
  35. #include <linux/blkdev.h>
  36. #include <linux/socket.h>
  37. #include <linux/inet.h>
  38. #include <linux/timer.h>
  39. #include <linux/kthread.h>
  40. #include "cluster/heartbeat.h"
  41. #include "cluster/nodemanager.h"
  42. #include "cluster/tcp.h"
  43. #include "dlmapi.h"
  44. #include "dlmcommon.h"
  45. #include "dlmdomain.h"
  46. #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD)
  47. #include "cluster/masklog.h"
  48. extern spinlock_t dlm_domain_lock;
  49. extern struct list_head dlm_domains;
  50. static int dlm_thread(void *data);
  51. static void dlm_flush_asts(struct dlm_ctxt *dlm);
  52. #define dlm_lock_is_remote(dlm, lock) ((lock)->ml.node != (dlm)->node_num)
  53. /* will exit holding res->spinlock, but may drop in function */
  54. /* waits until flags are cleared on res->state */
  55. void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags)
  56. {
  57. DECLARE_WAITQUEUE(wait, current);
  58. assert_spin_locked(&res->spinlock);
  59. add_wait_queue(&res->wq, &wait);
  60. repeat:
  61. set_current_state(TASK_UNINTERRUPTIBLE);
  62. if (res->state & flags) {
  63. spin_unlock(&res->spinlock);
  64. schedule();
  65. spin_lock(&res->spinlock);
  66. goto repeat;
  67. }
  68. remove_wait_queue(&res->wq, &wait);
  69. current->state = TASK_RUNNING;
  70. }
  71. static int __dlm_lockres_unused(struct dlm_lock_resource *res)
  72. {
  73. if (list_empty(&res->granted) &&
  74. list_empty(&res->converting) &&
  75. list_empty(&res->blocked) &&
  76. list_empty(&res->dirty))
  77. return 1;
  78. return 0;
  79. }
  80. /* Call whenever you may have added or deleted something from one of
  81. * the lockres queue's. This will figure out whether it belongs on the
  82. * unused list or not and does the appropriate thing. */
  83. void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
  84. struct dlm_lock_resource *res)
  85. {
  86. mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
  87. assert_spin_locked(&dlm->spinlock);
  88. assert_spin_locked(&res->spinlock);
  89. if (__dlm_lockres_unused(res)){
  90. if (list_empty(&res->purge)) {
  91. mlog(0, "putting lockres %.*s from purge list\n",
  92. res->lockname.len, res->lockname.name);
  93. res->last_used = jiffies;
  94. list_add_tail(&res->purge, &dlm->purge_list);
  95. dlm->purge_count++;
  96. }
  97. } else if (!list_empty(&res->purge)) {
  98. mlog(0, "removing lockres %.*s from purge list\n",
  99. res->lockname.len, res->lockname.name);
  100. list_del_init(&res->purge);
  101. dlm->purge_count--;
  102. }
  103. }
  104. void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
  105. struct dlm_lock_resource *res)
  106. {
  107. mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
  108. spin_lock(&dlm->spinlock);
  109. spin_lock(&res->spinlock);
  110. __dlm_lockres_calc_usage(dlm, res);
  111. spin_unlock(&res->spinlock);
  112. spin_unlock(&dlm->spinlock);
  113. }
  114. /* TODO: Eventual API: Called with the dlm spinlock held, may drop it
  115. * to do migration, but will re-acquire before exit. */
  116. void dlm_purge_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *lockres)
  117. {
  118. int master;
  119. int ret;
  120. spin_lock(&lockres->spinlock);
  121. master = lockres->owner == dlm->node_num;
  122. spin_unlock(&lockres->spinlock);
  123. mlog(0, "purging lockres %.*s, master = %d\n", lockres->lockname.len,
  124. lockres->lockname.name, master);
  125. /* Non master is the easy case -- no migration required, just
  126. * quit. */
  127. if (!master)
  128. goto finish;
  129. /* Wheee! Migrate lockres here! */
  130. spin_unlock(&dlm->spinlock);
  131. again:
  132. ret = dlm_migrate_lockres(dlm, lockres, O2NM_MAX_NODES);
  133. if (ret == -ENOTEMPTY) {
  134. mlog(ML_ERROR, "lockres %.*s still has local locks!\n",
  135. lockres->lockname.len, lockres->lockname.name);
  136. BUG();
  137. } else if (ret < 0) {
  138. mlog(ML_NOTICE, "lockres %.*s: migrate failed, retrying\n",
  139. lockres->lockname.len, lockres->lockname.name);
  140. goto again;
  141. }
  142. spin_lock(&dlm->spinlock);
  143. finish:
  144. if (!list_empty(&lockres->purge)) {
  145. list_del_init(&lockres->purge);
  146. dlm->purge_count--;
  147. }
  148. __dlm_unhash_lockres(lockres);
  149. }
  150. static void dlm_run_purge_list(struct dlm_ctxt *dlm,
  151. int purge_now)
  152. {
  153. unsigned int run_max, unused;
  154. unsigned long purge_jiffies;
  155. struct dlm_lock_resource *lockres;
  156. spin_lock(&dlm->spinlock);
  157. run_max = dlm->purge_count;
  158. while(run_max && !list_empty(&dlm->purge_list)) {
  159. run_max--;
  160. lockres = list_entry(dlm->purge_list.next,
  161. struct dlm_lock_resource, purge);
  162. /* Status of the lockres *might* change so double
  163. * check. If the lockres is unused, holding the dlm
  164. * spinlock will prevent people from getting and more
  165. * refs on it -- there's no need to keep the lockres
  166. * spinlock. */
  167. spin_lock(&lockres->spinlock);
  168. unused = __dlm_lockres_unused(lockres);
  169. spin_unlock(&lockres->spinlock);
  170. if (!unused)
  171. continue;
  172. purge_jiffies = lockres->last_used +
  173. msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
  174. /* Make sure that we want to be processing this guy at
  175. * this time. */
  176. if (!purge_now && time_after(purge_jiffies, jiffies)) {
  177. /* Since resources are added to the purge list
  178. * in tail order, we can stop at the first
  179. * unpurgable resource -- anyone added after
  180. * him will have a greater last_used value */
  181. break;
  182. }
  183. list_del_init(&lockres->purge);
  184. dlm->purge_count--;
  185. /* This may drop and reacquire the dlm spinlock if it
  186. * has to do migration. */
  187. mlog(0, "calling dlm_purge_lockres!\n");
  188. dlm_purge_lockres(dlm, lockres);
  189. mlog(0, "DONE calling dlm_purge_lockres!\n");
  190. /* Avoid adding any scheduling latencies */
  191. cond_resched_lock(&dlm->spinlock);
  192. }
  193. spin_unlock(&dlm->spinlock);
  194. }
  195. static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
  196. struct dlm_lock_resource *res)
  197. {
  198. struct dlm_lock *lock, *target;
  199. struct list_head *iter;
  200. struct list_head *head;
  201. int can_grant = 1;
  202. //mlog(0, "res->lockname.len=%d\n", res->lockname.len);
  203. //mlog(0, "res->lockname.name=%p\n", res->lockname.name);
  204. //mlog(0, "shuffle res %.*s\n", res->lockname.len,
  205. // res->lockname.name);
  206. /* because this function is called with the lockres
  207. * spinlock, and because we know that it is not migrating/
  208. * recovering/in-progress, it is fine to reserve asts and
  209. * basts right before queueing them all throughout */
  210. assert_spin_locked(&res->spinlock);
  211. BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING|
  212. DLM_LOCK_RES_RECOVERING|
  213. DLM_LOCK_RES_IN_PROGRESS)));
  214. converting:
  215. if (list_empty(&res->converting))
  216. goto blocked;
  217. mlog(0, "res %.*s has locks on a convert queue\n", res->lockname.len,
  218. res->lockname.name);
  219. target = list_entry(res->converting.next, struct dlm_lock, list);
  220. if (target->ml.convert_type == LKM_IVMODE) {
  221. mlog(ML_ERROR, "%.*s: converting a lock with no "
  222. "convert_type!\n", res->lockname.len, res->lockname.name);
  223. BUG();
  224. }
  225. head = &res->granted;
  226. list_for_each(iter, head) {
  227. lock = list_entry(iter, struct dlm_lock, list);
  228. if (lock==target)
  229. continue;
  230. if (!dlm_lock_compatible(lock->ml.type,
  231. target->ml.convert_type)) {
  232. can_grant = 0;
  233. /* queue the BAST if not already */
  234. if (lock->ml.highest_blocked == LKM_IVMODE) {
  235. __dlm_lockres_reserve_ast(res);
  236. dlm_queue_bast(dlm, lock);
  237. }
  238. /* update the highest_blocked if needed */
  239. if (lock->ml.highest_blocked < target->ml.convert_type)
  240. lock->ml.highest_blocked =
  241. target->ml.convert_type;
  242. }
  243. }
  244. head = &res->converting;
  245. list_for_each(iter, head) {
  246. lock = list_entry(iter, struct dlm_lock, list);
  247. if (lock==target)
  248. continue;
  249. if (!dlm_lock_compatible(lock->ml.type,
  250. target->ml.convert_type)) {
  251. can_grant = 0;
  252. if (lock->ml.highest_blocked == LKM_IVMODE) {
  253. __dlm_lockres_reserve_ast(res);
  254. dlm_queue_bast(dlm, lock);
  255. }
  256. if (lock->ml.highest_blocked < target->ml.convert_type)
  257. lock->ml.highest_blocked =
  258. target->ml.convert_type;
  259. }
  260. }
  261. /* we can convert the lock */
  262. if (can_grant) {
  263. spin_lock(&target->spinlock);
  264. BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
  265. mlog(0, "calling ast for converting lock: %.*s, have: %d, "
  266. "granting: %d, node: %u\n", res->lockname.len,
  267. res->lockname.name, target->ml.type,
  268. target->ml.convert_type, target->ml.node);
  269. target->ml.type = target->ml.convert_type;
  270. target->ml.convert_type = LKM_IVMODE;
  271. list_del_init(&target->list);
  272. list_add_tail(&target->list, &res->granted);
  273. BUG_ON(!target->lksb);
  274. target->lksb->status = DLM_NORMAL;
  275. spin_unlock(&target->spinlock);
  276. __dlm_lockres_reserve_ast(res);
  277. dlm_queue_ast(dlm, target);
  278. /* go back and check for more */
  279. goto converting;
  280. }
  281. blocked:
  282. if (list_empty(&res->blocked))
  283. goto leave;
  284. target = list_entry(res->blocked.next, struct dlm_lock, list);
  285. head = &res->granted;
  286. list_for_each(iter, head) {
  287. lock = list_entry(iter, struct dlm_lock, list);
  288. if (lock==target)
  289. continue;
  290. if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
  291. can_grant = 0;
  292. if (lock->ml.highest_blocked == LKM_IVMODE) {
  293. __dlm_lockres_reserve_ast(res);
  294. dlm_queue_bast(dlm, lock);
  295. }
  296. if (lock->ml.highest_blocked < target->ml.type)
  297. lock->ml.highest_blocked = target->ml.type;
  298. }
  299. }
  300. head = &res->converting;
  301. list_for_each(iter, head) {
  302. lock = list_entry(iter, struct dlm_lock, list);
  303. if (lock==target)
  304. continue;
  305. if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
  306. can_grant = 0;
  307. if (lock->ml.highest_blocked == LKM_IVMODE) {
  308. __dlm_lockres_reserve_ast(res);
  309. dlm_queue_bast(dlm, lock);
  310. }
  311. if (lock->ml.highest_blocked < target->ml.type)
  312. lock->ml.highest_blocked = target->ml.type;
  313. }
  314. }
  315. /* we can grant the blocked lock (only
  316. * possible if converting list empty) */
  317. if (can_grant) {
  318. spin_lock(&target->spinlock);
  319. BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
  320. mlog(0, "calling ast for blocked lock: %.*s, granting: %d, "
  321. "node: %u\n", res->lockname.len, res->lockname.name,
  322. target->ml.type, target->ml.node);
  323. // target->ml.type is already correct
  324. list_del_init(&target->list);
  325. list_add_tail(&target->list, &res->granted);
  326. BUG_ON(!target->lksb);
  327. target->lksb->status = DLM_NORMAL;
  328. spin_unlock(&target->spinlock);
  329. __dlm_lockres_reserve_ast(res);
  330. dlm_queue_ast(dlm, target);
  331. /* go back and check for more */
  332. goto converting;
  333. }
  334. leave:
  335. return;
  336. }
  337. /* must have NO locks when calling this with res !=NULL * */
  338. void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
  339. {
  340. mlog_entry("dlm=%p, res=%p\n", dlm, res);
  341. if (res) {
  342. spin_lock(&dlm->spinlock);
  343. spin_lock(&res->spinlock);
  344. __dlm_dirty_lockres(dlm, res);
  345. spin_unlock(&res->spinlock);
  346. spin_unlock(&dlm->spinlock);
  347. }
  348. wake_up(&dlm->dlm_thread_wq);
  349. }
  350. void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
  351. {
  352. mlog_entry("dlm=%p, res=%p\n", dlm, res);
  353. assert_spin_locked(&dlm->spinlock);
  354. assert_spin_locked(&res->spinlock);
  355. /* don't shuffle secondary queues */
  356. if ((res->owner == dlm->node_num) &&
  357. !(res->state & DLM_LOCK_RES_DIRTY)) {
  358. list_add_tail(&res->dirty, &dlm->dirty_list);
  359. res->state |= DLM_LOCK_RES_DIRTY;
  360. }
  361. }
  362. /* Launch the NM thread for the mounted volume */
  363. int dlm_launch_thread(struct dlm_ctxt *dlm)
  364. {
  365. mlog(0, "starting dlm thread...\n");
  366. dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread");
  367. if (IS_ERR(dlm->dlm_thread_task)) {
  368. mlog_errno(PTR_ERR(dlm->dlm_thread_task));
  369. dlm->dlm_thread_task = NULL;
  370. return -EINVAL;
  371. }
  372. return 0;
  373. }
  374. void dlm_complete_thread(struct dlm_ctxt *dlm)
  375. {
  376. if (dlm->dlm_thread_task) {
  377. mlog(ML_KTHREAD, "waiting for dlm thread to exit\n");
  378. kthread_stop(dlm->dlm_thread_task);
  379. dlm->dlm_thread_task = NULL;
  380. }
  381. }
  382. static int dlm_dirty_list_empty(struct dlm_ctxt *dlm)
  383. {
  384. int empty;
  385. spin_lock(&dlm->spinlock);
  386. empty = list_empty(&dlm->dirty_list);
  387. spin_unlock(&dlm->spinlock);
  388. return empty;
  389. }
  390. static void dlm_flush_asts(struct dlm_ctxt *dlm)
  391. {
  392. int ret;
  393. struct dlm_lock *lock;
  394. struct dlm_lock_resource *res;
  395. u8 hi;
  396. spin_lock(&dlm->ast_lock);
  397. while (!list_empty(&dlm->pending_asts)) {
  398. lock = list_entry(dlm->pending_asts.next,
  399. struct dlm_lock, ast_list);
  400. /* get an extra ref on lock */
  401. dlm_lock_get(lock);
  402. res = lock->lockres;
  403. mlog(0, "delivering an ast for this lockres\n");
  404. BUG_ON(!lock->ast_pending);
  405. /* remove from list (including ref) */
  406. list_del_init(&lock->ast_list);
  407. dlm_lock_put(lock);
  408. spin_unlock(&dlm->ast_lock);
  409. if (lock->ml.node != dlm->node_num) {
  410. ret = dlm_do_remote_ast(dlm, res, lock);
  411. if (ret < 0)
  412. mlog_errno(ret);
  413. } else
  414. dlm_do_local_ast(dlm, res, lock);
  415. spin_lock(&dlm->ast_lock);
  416. /* possible that another ast was queued while
  417. * we were delivering the last one */
  418. if (!list_empty(&lock->ast_list)) {
  419. mlog(0, "aha another ast got queued while "
  420. "we were finishing the last one. will "
  421. "keep the ast_pending flag set.\n");
  422. } else
  423. lock->ast_pending = 0;
  424. /* drop the extra ref.
  425. * this may drop it completely. */
  426. dlm_lock_put(lock);
  427. dlm_lockres_release_ast(dlm, res);
  428. }
  429. while (!list_empty(&dlm->pending_basts)) {
  430. lock = list_entry(dlm->pending_basts.next,
  431. struct dlm_lock, bast_list);
  432. /* get an extra ref on lock */
  433. dlm_lock_get(lock);
  434. res = lock->lockres;
  435. BUG_ON(!lock->bast_pending);
  436. /* get the highest blocked lock, and reset */
  437. spin_lock(&lock->spinlock);
  438. BUG_ON(lock->ml.highest_blocked <= LKM_IVMODE);
  439. hi = lock->ml.highest_blocked;
  440. lock->ml.highest_blocked = LKM_IVMODE;
  441. spin_unlock(&lock->spinlock);
  442. /* remove from list (including ref) */
  443. list_del_init(&lock->bast_list);
  444. dlm_lock_put(lock);
  445. spin_unlock(&dlm->ast_lock);
  446. mlog(0, "delivering a bast for this lockres "
  447. "(blocked = %d\n", hi);
  448. if (lock->ml.node != dlm->node_num) {
  449. ret = dlm_send_proxy_bast(dlm, res, lock, hi);
  450. if (ret < 0)
  451. mlog_errno(ret);
  452. } else
  453. dlm_do_local_bast(dlm, res, lock, hi);
  454. spin_lock(&dlm->ast_lock);
  455. /* possible that another bast was queued while
  456. * we were delivering the last one */
  457. if (!list_empty(&lock->bast_list)) {
  458. mlog(0, "aha another bast got queued while "
  459. "we were finishing the last one. will "
  460. "keep the bast_pending flag set.\n");
  461. } else
  462. lock->bast_pending = 0;
  463. /* drop the extra ref.
  464. * this may drop it completely. */
  465. dlm_lock_put(lock);
  466. dlm_lockres_release_ast(dlm, res);
  467. }
  468. wake_up(&dlm->ast_wq);
  469. spin_unlock(&dlm->ast_lock);
  470. }
  471. #define DLM_THREAD_TIMEOUT_MS (4 * 1000)
  472. #define DLM_THREAD_MAX_DIRTY 100
  473. #define DLM_THREAD_MAX_ASTS 10
  474. static int dlm_thread(void *data)
  475. {
  476. struct dlm_lock_resource *res;
  477. struct dlm_ctxt *dlm = data;
  478. unsigned long timeout = msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS);
  479. mlog(0, "dlm thread running for %s...\n", dlm->name);
  480. while (!kthread_should_stop()) {
  481. int n = DLM_THREAD_MAX_DIRTY;
  482. /* dlm_shutting_down is very point-in-time, but that
  483. * doesn't matter as we'll just loop back around if we
  484. * get false on the leading edge of a state
  485. * transition. */
  486. dlm_run_purge_list(dlm, dlm_shutting_down(dlm));
  487. /* We really don't want to hold dlm->spinlock while
  488. * calling dlm_shuffle_lists on each lockres that
  489. * needs to have its queues adjusted and AST/BASTs
  490. * run. So let's pull each entry off the dirty_list
  491. * and drop dlm->spinlock ASAP. Once off the list,
  492. * res->spinlock needs to be taken again to protect
  493. * the queues while calling dlm_shuffle_lists. */
  494. spin_lock(&dlm->spinlock);
  495. while (!list_empty(&dlm->dirty_list)) {
  496. int delay = 0;
  497. res = list_entry(dlm->dirty_list.next,
  498. struct dlm_lock_resource, dirty);
  499. /* peel a lockres off, remove it from the list,
  500. * unset the dirty flag and drop the dlm lock */
  501. BUG_ON(!res);
  502. dlm_lockres_get(res);
  503. spin_lock(&res->spinlock);
  504. res->state &= ~DLM_LOCK_RES_DIRTY;
  505. list_del_init(&res->dirty);
  506. spin_unlock(&res->spinlock);
  507. spin_unlock(&dlm->spinlock);
  508. /* lockres can be re-dirtied/re-added to the
  509. * dirty_list in this gap, but that is ok */
  510. spin_lock(&res->spinlock);
  511. if (res->owner != dlm->node_num) {
  512. __dlm_print_one_lock_resource(res);
  513. mlog(ML_ERROR, "inprog:%s, mig:%s, reco:%s, dirty:%s\n",
  514. res->state & DLM_LOCK_RES_IN_PROGRESS ? "yes" : "no",
  515. res->state & DLM_LOCK_RES_MIGRATING ? "yes" : "no",
  516. res->state & DLM_LOCK_RES_RECOVERING ? "yes" : "no",
  517. res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
  518. }
  519. BUG_ON(res->owner != dlm->node_num);
  520. /* it is now ok to move lockreses in these states
  521. * to the dirty list, assuming that they will only be
  522. * dirty for a short while. */
  523. if (res->state & (DLM_LOCK_RES_IN_PROGRESS |
  524. DLM_LOCK_RES_MIGRATING |
  525. DLM_LOCK_RES_RECOVERING)) {
  526. /* move it to the tail and keep going */
  527. spin_unlock(&res->spinlock);
  528. mlog(0, "delaying list shuffling for in-"
  529. "progress lockres %.*s, state=%d\n",
  530. res->lockname.len, res->lockname.name,
  531. res->state);
  532. delay = 1;
  533. goto in_progress;
  534. }
  535. /* at this point the lockres is not migrating/
  536. * recovering/in-progress. we have the lockres
  537. * spinlock and do NOT have the dlm lock.
  538. * safe to reserve/queue asts and run the lists. */
  539. mlog(0, "calling dlm_shuffle_lists with dlm=%p, "
  540. "res=%p\n", dlm, res);
  541. /* called while holding lockres lock */
  542. dlm_shuffle_lists(dlm, res);
  543. spin_unlock(&res->spinlock);
  544. dlm_lockres_calc_usage(dlm, res);
  545. in_progress:
  546. spin_lock(&dlm->spinlock);
  547. /* if the lock was in-progress, stick
  548. * it on the back of the list */
  549. if (delay) {
  550. spin_lock(&res->spinlock);
  551. list_add_tail(&res->dirty, &dlm->dirty_list);
  552. res->state |= DLM_LOCK_RES_DIRTY;
  553. spin_unlock(&res->spinlock);
  554. }
  555. dlm_lockres_put(res);
  556. /* unlikely, but we may need to give time to
  557. * other tasks */
  558. if (!--n) {
  559. mlog(0, "throttling dlm_thread\n");
  560. break;
  561. }
  562. }
  563. spin_unlock(&dlm->spinlock);
  564. dlm_flush_asts(dlm);
  565. /* yield and continue right away if there is more work to do */
  566. if (!n) {
  567. yield();
  568. continue;
  569. }
  570. wait_event_interruptible_timeout(dlm->dlm_thread_wq,
  571. !dlm_dirty_list_empty(dlm) ||
  572. kthread_should_stop(),
  573. timeout);
  574. }
  575. mlog(0, "quitting DLM thread\n");
  576. return 0;
  577. }