svclock.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911
  1. /*
  2. * linux/fs/lockd/svclock.c
  3. *
  4. * Handling of server-side locks, mostly of the blocked variety.
  5. * This is the ugliest part of lockd because we tread on very thin ice.
  6. * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
  7. * IMNSHO introducing the grant callback into the NLM protocol was one
  8. * of the worst ideas Sun ever had. Except maybe for the idea of doing
  9. * NFS file locking at all.
  10. *
  11. * I'm trying hard to avoid race conditions by protecting most accesses
  12. * to a file's list of blocked locks through a semaphore. The global
  13. * list of blocked locks is not protected in this fashion however.
  14. * Therefore, some functions (such as the RPC callback for the async grant
  15. * call) move blocked locks towards the head of the list *while some other
  16. * process might be traversing it*. This should not be a problem in
  17. * practice, because this will only cause functions traversing the list
  18. * to visit some blocks twice.
  19. *
  20. * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
  21. */
  22. #include <linux/types.h>
  23. #include <linux/errno.h>
  24. #include <linux/kernel.h>
  25. #include <linux/sched.h>
  26. #include <linux/smp_lock.h>
  27. #include <linux/sunrpc/clnt.h>
  28. #include <linux/sunrpc/svc.h>
  29. #include <linux/lockd/nlm.h>
  30. #include <linux/lockd/lockd.h>
  31. #include <linux/kthread.h>
  32. #define NLMDBG_FACILITY NLMDBG_SVCLOCK
  33. #ifdef CONFIG_LOCKD_V4
  34. #define nlm_deadlock nlm4_deadlock
  35. #else
  36. #define nlm_deadlock nlm_lck_denied
  37. #endif
  38. static void nlmsvc_release_block(struct nlm_block *block);
  39. static void nlmsvc_insert_block(struct nlm_block *block, unsigned long);
  40. static void nlmsvc_remove_block(struct nlm_block *block);
  41. static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
  42. static void nlmsvc_freegrantargs(struct nlm_rqst *call);
  43. static const struct rpc_call_ops nlmsvc_grant_ops;
  44. /*
  45. * The list of blocked locks to retry
  46. */
  47. static LIST_HEAD(nlm_blocked);
  48. /*
  49. * Insert a blocked lock into the global list
  50. */
  51. static void
  52. nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
  53. {
  54. struct nlm_block *b;
  55. struct list_head *pos;
  56. dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
  57. if (list_empty(&block->b_list)) {
  58. kref_get(&block->b_count);
  59. } else {
  60. list_del_init(&block->b_list);
  61. }
  62. pos = &nlm_blocked;
  63. if (when != NLM_NEVER) {
  64. if ((when += jiffies) == NLM_NEVER)
  65. when ++;
  66. list_for_each(pos, &nlm_blocked) {
  67. b = list_entry(pos, struct nlm_block, b_list);
  68. if (time_after(b->b_when,when) || b->b_when == NLM_NEVER)
  69. break;
  70. }
  71. /* On normal exit from the loop, pos == &nlm_blocked,
  72. * so we will be adding to the end of the list - good
  73. */
  74. }
  75. list_add_tail(&block->b_list, pos);
  76. block->b_when = when;
  77. }
  78. /*
  79. * Remove a block from the global list
  80. */
  81. static inline void
  82. nlmsvc_remove_block(struct nlm_block *block)
  83. {
  84. if (!list_empty(&block->b_list)) {
  85. list_del_init(&block->b_list);
  86. nlmsvc_release_block(block);
  87. }
  88. }
  89. /*
  90. * Find a block for a given lock
  91. */
  92. static struct nlm_block *
  93. nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
  94. {
  95. struct nlm_block *block;
  96. struct file_lock *fl;
  97. dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
  98. file, lock->fl.fl_pid,
  99. (long long)lock->fl.fl_start,
  100. (long long)lock->fl.fl_end, lock->fl.fl_type);
  101. list_for_each_entry(block, &nlm_blocked, b_list) {
  102. fl = &block->b_call->a_args.lock.fl;
  103. dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
  104. block->b_file, fl->fl_pid,
  105. (long long)fl->fl_start,
  106. (long long)fl->fl_end, fl->fl_type,
  107. nlmdbg_cookie2a(&block->b_call->a_args.cookie));
  108. if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
  109. kref_get(&block->b_count);
  110. return block;
  111. }
  112. }
  113. return NULL;
  114. }
  115. static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
  116. {
  117. if(a->len != b->len)
  118. return 0;
  119. if(memcmp(a->data,b->data,a->len))
  120. return 0;
  121. return 1;
  122. }
  123. /*
  124. * Find a block with a given NLM cookie.
  125. */
  126. static inline struct nlm_block *
  127. nlmsvc_find_block(struct nlm_cookie *cookie)
  128. {
  129. struct nlm_block *block;
  130. list_for_each_entry(block, &nlm_blocked, b_list) {
  131. if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
  132. goto found;
  133. }
  134. return NULL;
  135. found:
  136. dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
  137. kref_get(&block->b_count);
  138. return block;
  139. }
  140. /*
  141. * Create a block and initialize it.
  142. *
  143. * Note: we explicitly set the cookie of the grant reply to that of
  144. * the blocked lock request. The spec explicitly mentions that the client
  145. * should _not_ rely on the callback containing the same cookie as the
  146. * request, but (as I found out later) that's because some implementations
  147. * do just this. Never mind the standards comittees, they support our
  148. * logging industries.
  149. *
  150. * 10 years later: I hope we can safely ignore these old and broken
  151. * clients by now. Let's fix this so we can uniquely identify an incoming
  152. * GRANTED_RES message by cookie, without having to rely on the client's IP
  153. * address. --okir
  154. */
  155. static struct nlm_block *
  156. nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
  157. struct nlm_file *file, struct nlm_lock *lock,
  158. struct nlm_cookie *cookie)
  159. {
  160. struct nlm_block *block;
  161. struct nlm_rqst *call = NULL;
  162. call = nlm_alloc_call(host);
  163. if (call == NULL)
  164. return NULL;
  165. /* Allocate memory for block, and initialize arguments */
  166. block = kzalloc(sizeof(*block), GFP_KERNEL);
  167. if (block == NULL)
  168. goto failed;
  169. kref_init(&block->b_count);
  170. INIT_LIST_HEAD(&block->b_list);
  171. INIT_LIST_HEAD(&block->b_flist);
  172. if (!nlmsvc_setgrantargs(call, lock))
  173. goto failed_free;
  174. /* Set notifier function for VFS, and init args */
  175. call->a_args.lock.fl.fl_flags |= FL_SLEEP;
  176. call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
  177. nlmclnt_next_cookie(&call->a_args.cookie);
  178. dprintk("lockd: created block %p...\n", block);
  179. /* Create and initialize the block */
  180. block->b_daemon = rqstp->rq_server;
  181. block->b_host = host;
  182. block->b_file = file;
  183. block->b_fl = NULL;
  184. file->f_count++;
  185. /* Add to file's list of blocks */
  186. list_add(&block->b_flist, &file->f_blocks);
  187. /* Set up RPC arguments for callback */
  188. block->b_call = call;
  189. call->a_flags = RPC_TASK_ASYNC;
  190. call->a_block = block;
  191. return block;
  192. failed_free:
  193. kfree(block);
  194. failed:
  195. nlm_release_call(call);
  196. return NULL;
  197. }
  198. /*
  199. * Delete a block.
  200. * It is the caller's responsibility to check whether the file
  201. * can be closed hereafter.
  202. */
  203. static int nlmsvc_unlink_block(struct nlm_block *block)
  204. {
  205. int status;
  206. dprintk("lockd: unlinking block %p...\n", block);
  207. /* Remove block from list */
  208. status = posix_unblock_lock(block->b_file->f_file, &block->b_call->a_args.lock.fl);
  209. nlmsvc_remove_block(block);
  210. return status;
  211. }
  212. static void nlmsvc_free_block(struct kref *kref)
  213. {
  214. struct nlm_block *block = container_of(kref, struct nlm_block, b_count);
  215. struct nlm_file *file = block->b_file;
  216. dprintk("lockd: freeing block %p...\n", block);
  217. /* Remove block from file's list of blocks */
  218. mutex_lock(&file->f_mutex);
  219. list_del_init(&block->b_flist);
  220. mutex_unlock(&file->f_mutex);
  221. nlmsvc_freegrantargs(block->b_call);
  222. nlm_release_call(block->b_call);
  223. nlm_release_file(block->b_file);
  224. kfree(block->b_fl);
  225. kfree(block);
  226. }
  227. static void nlmsvc_release_block(struct nlm_block *block)
  228. {
  229. if (block != NULL)
  230. kref_put(&block->b_count, nlmsvc_free_block);
  231. }
  232. /*
  233. * Loop over all blocks and delete blocks held by
  234. * a matching host.
  235. */
  236. void nlmsvc_traverse_blocks(struct nlm_host *host,
  237. struct nlm_file *file,
  238. nlm_host_match_fn_t match)
  239. {
  240. struct nlm_block *block, *next;
  241. restart:
  242. mutex_lock(&file->f_mutex);
  243. list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
  244. if (!match(block->b_host, host))
  245. continue;
  246. /* Do not destroy blocks that are not on
  247. * the global retry list - why? */
  248. if (list_empty(&block->b_list))
  249. continue;
  250. kref_get(&block->b_count);
  251. mutex_unlock(&file->f_mutex);
  252. nlmsvc_unlink_block(block);
  253. nlmsvc_release_block(block);
  254. goto restart;
  255. }
  256. mutex_unlock(&file->f_mutex);
  257. }
  258. /*
  259. * Initialize arguments for GRANTED call. The nlm_rqst structure
  260. * has been cleared already.
  261. */
  262. static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
  263. {
  264. locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
  265. memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
  266. call->a_args.lock.caller = utsname()->nodename;
  267. call->a_args.lock.oh.len = lock->oh.len;
  268. /* set default data area */
  269. call->a_args.lock.oh.data = call->a_owner;
  270. call->a_args.lock.svid = lock->fl.fl_pid;
  271. if (lock->oh.len > NLMCLNT_OHSIZE) {
  272. void *data = kmalloc(lock->oh.len, GFP_KERNEL);
  273. if (!data)
  274. return 0;
  275. call->a_args.lock.oh.data = (u8 *) data;
  276. }
  277. memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
  278. return 1;
  279. }
  280. static void nlmsvc_freegrantargs(struct nlm_rqst *call)
  281. {
  282. if (call->a_args.lock.oh.data != call->a_owner)
  283. kfree(call->a_args.lock.oh.data);
  284. }
  285. /*
  286. * Deferred lock request handling for non-blocking lock
  287. */
  288. static __be32
  289. nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
  290. {
  291. __be32 status = nlm_lck_denied_nolocks;
  292. block->b_flags |= B_QUEUED;
  293. nlmsvc_insert_block(block, NLM_TIMEOUT);
  294. block->b_cache_req = &rqstp->rq_chandle;
  295. if (rqstp->rq_chandle.defer) {
  296. block->b_deferred_req =
  297. rqstp->rq_chandle.defer(block->b_cache_req);
  298. if (block->b_deferred_req != NULL)
  299. status = nlm_drop_reply;
  300. }
  301. dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
  302. block, block->b_flags, ntohl(status));
  303. return status;
  304. }
  305. /*
  306. * Attempt to establish a lock, and if it can't be granted, block it
  307. * if required.
  308. */
  309. __be32
  310. nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
  311. struct nlm_lock *lock, int wait, struct nlm_cookie *cookie)
  312. {
  313. struct nlm_block *block = NULL;
  314. struct nlm_host *host;
  315. int error;
  316. __be32 ret;
  317. dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
  318. file->f_file->f_path.dentry->d_inode->i_sb->s_id,
  319. file->f_file->f_path.dentry->d_inode->i_ino,
  320. lock->fl.fl_type, lock->fl.fl_pid,
  321. (long long)lock->fl.fl_start,
  322. (long long)lock->fl.fl_end,
  323. wait);
  324. /* Create host handle for callback */
  325. host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
  326. if (host == NULL)
  327. return nlm_lck_denied_nolocks;
  328. /* Lock file against concurrent access */
  329. mutex_lock(&file->f_mutex);
  330. /* Get existing block (in case client is busy-waiting)
  331. * or create new block
  332. */
  333. block = nlmsvc_lookup_block(file, lock);
  334. if (block == NULL) {
  335. block = nlmsvc_create_block(rqstp, nlm_get_host(host), file,
  336. lock, cookie);
  337. ret = nlm_lck_denied_nolocks;
  338. if (block == NULL)
  339. goto out;
  340. lock = &block->b_call->a_args.lock;
  341. } else
  342. lock->fl.fl_flags &= ~FL_SLEEP;
  343. if (block->b_flags & B_QUEUED) {
  344. dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n",
  345. block, block->b_flags);
  346. if (block->b_granted) {
  347. nlmsvc_unlink_block(block);
  348. ret = nlm_granted;
  349. goto out;
  350. }
  351. if (block->b_flags & B_TIMED_OUT) {
  352. nlmsvc_unlink_block(block);
  353. ret = nlm_lck_denied;
  354. goto out;
  355. }
  356. ret = nlm_drop_reply;
  357. goto out;
  358. }
  359. if (!wait)
  360. lock->fl.fl_flags &= ~FL_SLEEP;
  361. error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
  362. lock->fl.fl_flags &= ~FL_SLEEP;
  363. dprintk("lockd: vfs_lock_file returned %d\n", error);
  364. switch(error) {
  365. case 0:
  366. ret = nlm_granted;
  367. goto out;
  368. case -EAGAIN:
  369. ret = nlm_lck_denied;
  370. break;
  371. case -EINPROGRESS:
  372. if (wait)
  373. break;
  374. /* Filesystem lock operation is in progress
  375. Add it to the queue waiting for callback */
  376. ret = nlmsvc_defer_lock_rqst(rqstp, block);
  377. goto out;
  378. case -EDEADLK:
  379. ret = nlm_deadlock;
  380. goto out;
  381. default: /* includes ENOLCK */
  382. ret = nlm_lck_denied_nolocks;
  383. goto out;
  384. }
  385. ret = nlm_lck_denied;
  386. if (!wait)
  387. goto out;
  388. ret = nlm_lck_blocked;
  389. /* Append to list of blocked */
  390. nlmsvc_insert_block(block, NLM_NEVER);
  391. out:
  392. mutex_unlock(&file->f_mutex);
  393. nlmsvc_release_block(block);
  394. nlm_release_host(host);
  395. dprintk("lockd: nlmsvc_lock returned %u\n", ret);
  396. return ret;
  397. }
  398. /*
  399. * Test for presence of a conflicting lock.
  400. */
  401. __be32
  402. nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
  403. struct nlm_lock *lock, struct nlm_lock *conflock,
  404. struct nlm_cookie *cookie)
  405. {
  406. struct nlm_block *block = NULL;
  407. int error;
  408. __be32 ret;
  409. dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
  410. file->f_file->f_path.dentry->d_inode->i_sb->s_id,
  411. file->f_file->f_path.dentry->d_inode->i_ino,
  412. lock->fl.fl_type,
  413. (long long)lock->fl.fl_start,
  414. (long long)lock->fl.fl_end);
  415. /* Get existing block (in case client is busy-waiting) */
  416. block = nlmsvc_lookup_block(file, lock);
  417. if (block == NULL) {
  418. struct file_lock *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
  419. struct nlm_host *host;
  420. if (conf == NULL)
  421. return nlm_granted;
  422. /* Create host handle for callback */
  423. host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
  424. if (host == NULL) {
  425. kfree(conf);
  426. return nlm_lck_denied_nolocks;
  427. }
  428. block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
  429. if (block == NULL) {
  430. kfree(conf);
  431. return nlm_granted;
  432. }
  433. block->b_fl = conf;
  434. }
  435. if (block->b_flags & B_QUEUED) {
  436. dprintk("lockd: nlmsvc_testlock deferred block %p flags %d fl %p\n",
  437. block, block->b_flags, block->b_fl);
  438. if (block->b_flags & B_TIMED_OUT) {
  439. nlmsvc_unlink_block(block);
  440. ret = nlm_lck_denied;
  441. goto out;
  442. }
  443. if (block->b_flags & B_GOT_CALLBACK) {
  444. nlmsvc_unlink_block(block);
  445. if (block->b_fl != NULL
  446. && block->b_fl->fl_type != F_UNLCK) {
  447. lock->fl = *block->b_fl;
  448. goto conf_lock;
  449. } else {
  450. ret = nlm_granted;
  451. goto out;
  452. }
  453. }
  454. ret = nlm_drop_reply;
  455. goto out;
  456. }
  457. error = vfs_test_lock(file->f_file, &lock->fl);
  458. if (error == -EINPROGRESS) {
  459. ret = nlmsvc_defer_lock_rqst(rqstp, block);
  460. goto out;
  461. }
  462. if (error) {
  463. ret = nlm_lck_denied_nolocks;
  464. goto out;
  465. }
  466. if (lock->fl.fl_type == F_UNLCK) {
  467. ret = nlm_granted;
  468. goto out;
  469. }
  470. conf_lock:
  471. dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
  472. lock->fl.fl_type, (long long)lock->fl.fl_start,
  473. (long long)lock->fl.fl_end);
  474. conflock->caller = "somehost"; /* FIXME */
  475. conflock->len = strlen(conflock->caller);
  476. conflock->oh.len = 0; /* don't return OH info */
  477. conflock->svid = lock->fl.fl_pid;
  478. conflock->fl.fl_type = lock->fl.fl_type;
  479. conflock->fl.fl_start = lock->fl.fl_start;
  480. conflock->fl.fl_end = lock->fl.fl_end;
  481. ret = nlm_lck_denied;
  482. out:
  483. if (block)
  484. nlmsvc_release_block(block);
  485. return ret;
  486. }
  487. /*
  488. * Remove a lock.
  489. * This implies a CANCEL call: We send a GRANT_MSG, the client replies
  490. * with a GRANT_RES call which gets lost, and calls UNLOCK immediately
  491. * afterwards. In this case the block will still be there, and hence
  492. * must be removed.
  493. */
  494. __be32
  495. nlmsvc_unlock(struct nlm_file *file, struct nlm_lock *lock)
  496. {
  497. int error;
  498. dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
  499. file->f_file->f_path.dentry->d_inode->i_sb->s_id,
  500. file->f_file->f_path.dentry->d_inode->i_ino,
  501. lock->fl.fl_pid,
  502. (long long)lock->fl.fl_start,
  503. (long long)lock->fl.fl_end);
  504. /* First, cancel any lock that might be there */
  505. nlmsvc_cancel_blocked(file, lock);
  506. lock->fl.fl_type = F_UNLCK;
  507. error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
  508. return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
  509. }
  510. /*
  511. * Cancel a previously blocked request.
  512. *
  513. * A cancel request always overrides any grant that may currently
  514. * be in progress.
  515. * The calling procedure must check whether the file can be closed.
  516. */
  517. __be32
  518. nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
  519. {
  520. struct nlm_block *block;
  521. int status = 0;
  522. dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
  523. file->f_file->f_path.dentry->d_inode->i_sb->s_id,
  524. file->f_file->f_path.dentry->d_inode->i_ino,
  525. lock->fl.fl_pid,
  526. (long long)lock->fl.fl_start,
  527. (long long)lock->fl.fl_end);
  528. mutex_lock(&file->f_mutex);
  529. block = nlmsvc_lookup_block(file, lock);
  530. mutex_unlock(&file->f_mutex);
  531. if (block != NULL) {
  532. vfs_cancel_lock(block->b_file->f_file,
  533. &block->b_call->a_args.lock.fl);
  534. status = nlmsvc_unlink_block(block);
  535. nlmsvc_release_block(block);
  536. }
  537. return status ? nlm_lck_denied : nlm_granted;
  538. }
  539. /*
  540. * This is a callback from the filesystem for VFS file lock requests.
  541. * It will be used if fl_grant is defined and the filesystem can not
  542. * respond to the request immediately.
  543. * For GETLK request it will copy the reply to the nlm_block.
  544. * For SETLK or SETLKW request it will get the local posix lock.
  545. * In all cases it will move the block to the head of nlm_blocked q where
  546. * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the
  547. * deferred rpc for GETLK and SETLK.
  548. */
  549. static void
  550. nlmsvc_update_deferred_block(struct nlm_block *block, struct file_lock *conf,
  551. int result)
  552. {
  553. block->b_flags |= B_GOT_CALLBACK;
  554. if (result == 0)
  555. block->b_granted = 1;
  556. else
  557. block->b_flags |= B_TIMED_OUT;
  558. if (conf) {
  559. if (block->b_fl)
  560. __locks_copy_lock(block->b_fl, conf);
  561. }
  562. }
  563. static int nlmsvc_grant_deferred(struct file_lock *fl, struct file_lock *conf,
  564. int result)
  565. {
  566. struct nlm_block *block;
  567. int rc = -ENOENT;
  568. lock_kernel();
  569. list_for_each_entry(block, &nlm_blocked, b_list) {
  570. if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
  571. dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
  572. block, block->b_flags);
  573. if (block->b_flags & B_QUEUED) {
  574. if (block->b_flags & B_TIMED_OUT) {
  575. rc = -ENOLCK;
  576. break;
  577. }
  578. nlmsvc_update_deferred_block(block, conf, result);
  579. } else if (result == 0)
  580. block->b_granted = 1;
  581. nlmsvc_insert_block(block, 0);
  582. svc_wake_up(block->b_daemon);
  583. rc = 0;
  584. break;
  585. }
  586. }
  587. unlock_kernel();
  588. if (rc == -ENOENT)
  589. printk(KERN_WARNING "lockd: grant for unknown block\n");
  590. return rc;
  591. }
  592. /*
  593. * Unblock a blocked lock request. This is a callback invoked from the
  594. * VFS layer when a lock on which we blocked is removed.
  595. *
  596. * This function doesn't grant the blocked lock instantly, but rather moves
  597. * the block to the head of nlm_blocked where it can be picked up by lockd.
  598. */
  599. static void
  600. nlmsvc_notify_blocked(struct file_lock *fl)
  601. {
  602. struct nlm_block *block;
  603. dprintk("lockd: VFS unblock notification for block %p\n", fl);
  604. list_for_each_entry(block, &nlm_blocked, b_list) {
  605. if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
  606. nlmsvc_insert_block(block, 0);
  607. svc_wake_up(block->b_daemon);
  608. return;
  609. }
  610. }
  611. printk(KERN_WARNING "lockd: notification for unknown block!\n");
  612. }
  613. static int nlmsvc_same_owner(struct file_lock *fl1, struct file_lock *fl2)
  614. {
  615. return fl1->fl_owner == fl2->fl_owner && fl1->fl_pid == fl2->fl_pid;
  616. }
  617. struct lock_manager_operations nlmsvc_lock_operations = {
  618. .fl_compare_owner = nlmsvc_same_owner,
  619. .fl_notify = nlmsvc_notify_blocked,
  620. .fl_grant = nlmsvc_grant_deferred,
  621. };
  622. /*
  623. * Try to claim a lock that was previously blocked.
  624. *
  625. * Note that we use both the RPC_GRANTED_MSG call _and_ an async
  626. * RPC thread when notifying the client. This seems like overkill...
  627. * Here's why:
  628. * - we don't want to use a synchronous RPC thread, otherwise
  629. * we might find ourselves hanging on a dead portmapper.
  630. * - Some lockd implementations (e.g. HP) don't react to
  631. * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
  632. */
  633. static void
  634. nlmsvc_grant_blocked(struct nlm_block *block)
  635. {
  636. struct nlm_file *file = block->b_file;
  637. struct nlm_lock *lock = &block->b_call->a_args.lock;
  638. int error;
  639. dprintk("lockd: grant blocked lock %p\n", block);
  640. kref_get(&block->b_count);
  641. /* Unlink block request from list */
  642. nlmsvc_unlink_block(block);
  643. /* If b_granted is true this means we've been here before.
  644. * Just retry the grant callback, possibly refreshing the RPC
  645. * binding */
  646. if (block->b_granted) {
  647. nlm_rebind_host(block->b_host);
  648. goto callback;
  649. }
  650. /* Try the lock operation again */
  651. lock->fl.fl_flags |= FL_SLEEP;
  652. error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
  653. lock->fl.fl_flags &= ~FL_SLEEP;
  654. switch (error) {
  655. case 0:
  656. break;
  657. case -EAGAIN:
  658. case -EINPROGRESS:
  659. dprintk("lockd: lock still blocked error %d\n", error);
  660. nlmsvc_insert_block(block, NLM_NEVER);
  661. nlmsvc_release_block(block);
  662. return;
  663. default:
  664. printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
  665. -error, __FUNCTION__);
  666. nlmsvc_insert_block(block, 10 * HZ);
  667. nlmsvc_release_block(block);
  668. return;
  669. }
  670. callback:
  671. /* Lock was granted by VFS. */
  672. dprintk("lockd: GRANTing blocked lock.\n");
  673. block->b_granted = 1;
  674. /* keep block on the list, but don't reattempt until the RPC
  675. * completes or the submission fails
  676. */
  677. nlmsvc_insert_block(block, NLM_NEVER);
  678. /* Call the client -- use a soft RPC task since nlmsvc_retry_blocked
  679. * will queue up a new one if this one times out
  680. */
  681. error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG,
  682. &nlmsvc_grant_ops);
  683. /* RPC submission failed, wait a bit and retry */
  684. if (error < 0)
  685. nlmsvc_insert_block(block, 10 * HZ);
  686. }
  687. /*
  688. * This is the callback from the RPC layer when the NLM_GRANTED_MSG
  689. * RPC call has succeeded or timed out.
  690. * Like all RPC callbacks, it is invoked by the rpciod process, so it
  691. * better not sleep. Therefore, we put the blocked lock on the nlm_blocked
  692. * chain once more in order to have it removed by lockd itself (which can
  693. * then sleep on the file semaphore without disrupting e.g. the nfs client).
  694. */
  695. static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
  696. {
  697. struct nlm_rqst *call = data;
  698. struct nlm_block *block = call->a_block;
  699. unsigned long timeout;
  700. dprintk("lockd: GRANT_MSG RPC callback\n");
  701. /* if the block is not on a list at this point then it has
  702. * been invalidated. Don't try to requeue it.
  703. *
  704. * FIXME: it's possible that the block is removed from the list
  705. * after this check but before the nlmsvc_insert_block. In that
  706. * case it will be added back. Perhaps we need better locking
  707. * for nlm_blocked?
  708. */
  709. if (list_empty(&block->b_list))
  710. return;
  711. /* Technically, we should down the file semaphore here. Since we
  712. * move the block towards the head of the queue only, no harm
  713. * can be done, though. */
  714. if (task->tk_status < 0) {
  715. /* RPC error: Re-insert for retransmission */
  716. timeout = 10 * HZ;
  717. } else {
  718. /* Call was successful, now wait for client callback */
  719. timeout = 60 * HZ;
  720. }
  721. nlmsvc_insert_block(block, timeout);
  722. svc_wake_up(block->b_daemon);
  723. }
  724. static void nlmsvc_grant_release(void *data)
  725. {
  726. struct nlm_rqst *call = data;
  727. nlmsvc_release_block(call->a_block);
  728. }
  729. static const struct rpc_call_ops nlmsvc_grant_ops = {
  730. .rpc_call_done = nlmsvc_grant_callback,
  731. .rpc_release = nlmsvc_grant_release,
  732. };
  733. /*
  734. * We received a GRANT_RES callback. Try to find the corresponding
  735. * block.
  736. */
  737. void
  738. nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
  739. {
  740. struct nlm_block *block;
  741. dprintk("grant_reply: looking for cookie %x, s=%d \n",
  742. *(unsigned int *)(cookie->data), status);
  743. if (!(block = nlmsvc_find_block(cookie)))
  744. return;
  745. if (block) {
  746. if (status == nlm_lck_denied_grace_period) {
  747. /* Try again in a couple of seconds */
  748. nlmsvc_insert_block(block, 10 * HZ);
  749. } else {
  750. /* Lock is now held by client, or has been rejected.
  751. * In both cases, the block should be removed. */
  752. nlmsvc_unlink_block(block);
  753. }
  754. }
  755. nlmsvc_release_block(block);
  756. }
  757. /* Helper function to handle retry of a deferred block.
  758. * If it is a blocking lock, call grant_blocked.
  759. * For a non-blocking lock or test lock, revisit the request.
  760. */
  761. static void
  762. retry_deferred_block(struct nlm_block *block)
  763. {
  764. if (!(block->b_flags & B_GOT_CALLBACK))
  765. block->b_flags |= B_TIMED_OUT;
  766. nlmsvc_insert_block(block, NLM_TIMEOUT);
  767. dprintk("revisit block %p flags %d\n", block, block->b_flags);
  768. if (block->b_deferred_req) {
  769. block->b_deferred_req->revisit(block->b_deferred_req, 0);
  770. block->b_deferred_req = NULL;
  771. }
  772. }
  773. /*
  774. * Retry all blocked locks that have been notified. This is where lockd
  775. * picks up locks that can be granted, or grant notifications that must
  776. * be retransmitted.
  777. */
  778. unsigned long
  779. nlmsvc_retry_blocked(void)
  780. {
  781. unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
  782. struct nlm_block *block;
  783. while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
  784. block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
  785. if (block->b_when == NLM_NEVER)
  786. break;
  787. if (time_after(block->b_when,jiffies)) {
  788. timeout = block->b_when - jiffies;
  789. break;
  790. }
  791. dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
  792. block, block->b_when);
  793. if (block->b_flags & B_QUEUED) {
  794. dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n",
  795. block, block->b_granted, block->b_flags);
  796. retry_deferred_block(block);
  797. } else
  798. nlmsvc_grant_blocked(block);
  799. }
  800. return timeout;
  801. }