svclock.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849
  1. /*
  2. * linux/fs/lockd/svclock.c
  3. *
  4. * Handling of server-side locks, mostly of the blocked variety.
  5. * This is the ugliest part of lockd because we tread on very thin ice.
  6. * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
  7. * IMNSHO introducing the grant callback into the NLM protocol was one
  8. * of the worst ideas Sun ever had. Except maybe for the idea of doing
  9. * NFS file locking at all.
  10. *
  11. * I'm trying hard to avoid race conditions by protecting most accesses
  12. * to a file's list of blocked locks through a semaphore. The global
  13. * list of blocked locks is not protected in this fashion however.
  14. * Therefore, some functions (such as the RPC callback for the async grant
  15. * call) move blocked locks towards the head of the list *while some other
  16. * process might be traversing it*. This should not be a problem in
  17. * practice, because this will only cause functions traversing the list
  18. * to visit some blocks twice.
  19. *
  20. * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
  21. */
  22. #include <linux/types.h>
  23. #include <linux/errno.h>
  24. #include <linux/kernel.h>
  25. #include <linux/sched.h>
  26. #include <linux/smp_lock.h>
  27. #include <linux/sunrpc/clnt.h>
  28. #include <linux/sunrpc/svc.h>
  29. #include <linux/lockd/nlm.h>
  30. #include <linux/lockd/lockd.h>
  31. #define NLMDBG_FACILITY NLMDBG_SVCLOCK
  32. #ifdef CONFIG_LOCKD_V4
  33. #define nlm_deadlock nlm4_deadlock
  34. #else
  35. #define nlm_deadlock nlm_lck_denied
  36. #endif
  37. static void nlmsvc_release_block(struct nlm_block *block);
  38. static void nlmsvc_insert_block(struct nlm_block *block, unsigned long);
  39. static void nlmsvc_remove_block(struct nlm_block *block);
  40. static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
  41. static void nlmsvc_freegrantargs(struct nlm_rqst *call);
  42. static const struct rpc_call_ops nlmsvc_grant_ops;
  43. /*
  44. * The list of blocked locks to retry
  45. */
  46. static LIST_HEAD(nlm_blocked);
  47. /*
  48. * Insert a blocked lock into the global list
  49. */
  50. static void
  51. nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
  52. {
  53. struct nlm_block *b;
  54. struct list_head *pos;
  55. dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
  56. if (list_empty(&block->b_list)) {
  57. kref_get(&block->b_count);
  58. } else {
  59. list_del_init(&block->b_list);
  60. }
  61. pos = &nlm_blocked;
  62. if (when != NLM_NEVER) {
  63. if ((when += jiffies) == NLM_NEVER)
  64. when ++;
  65. list_for_each(pos, &nlm_blocked) {
  66. b = list_entry(pos, struct nlm_block, b_list);
  67. if (time_after(b->b_when,when) || b->b_when == NLM_NEVER)
  68. break;
  69. }
  70. /* On normal exit from the loop, pos == &nlm_blocked,
  71. * so we will be adding to the end of the list - good
  72. */
  73. }
  74. list_add_tail(&block->b_list, pos);
  75. block->b_when = when;
  76. }
  77. /*
  78. * Remove a block from the global list
  79. */
  80. static inline void
  81. nlmsvc_remove_block(struct nlm_block *block)
  82. {
  83. if (!list_empty(&block->b_list)) {
  84. list_del_init(&block->b_list);
  85. nlmsvc_release_block(block);
  86. }
  87. }
  88. /*
  89. * Find a block for a given lock
  90. */
  91. static struct nlm_block *
  92. nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
  93. {
  94. struct nlm_block *block;
  95. struct file_lock *fl;
  96. dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
  97. file, lock->fl.fl_pid,
  98. (long long)lock->fl.fl_start,
  99. (long long)lock->fl.fl_end, lock->fl.fl_type);
  100. list_for_each_entry(block, &nlm_blocked, b_list) {
  101. fl = &block->b_call->a_args.lock.fl;
  102. dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
  103. block->b_file, fl->fl_pid,
  104. (long long)fl->fl_start,
  105. (long long)fl->fl_end, fl->fl_type,
  106. nlmdbg_cookie2a(&block->b_call->a_args.cookie));
  107. if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
  108. kref_get(&block->b_count);
  109. return block;
  110. }
  111. }
  112. return NULL;
  113. }
  114. static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
  115. {
  116. if(a->len != b->len)
  117. return 0;
  118. if(memcmp(a->data,b->data,a->len))
  119. return 0;
  120. return 1;
  121. }
  122. /*
  123. * Find a block with a given NLM cookie.
  124. */
  125. static inline struct nlm_block *
  126. nlmsvc_find_block(struct nlm_cookie *cookie)
  127. {
  128. struct nlm_block *block;
  129. list_for_each_entry(block, &nlm_blocked, b_list) {
  130. if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
  131. goto found;
  132. }
  133. return NULL;
  134. found:
  135. dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
  136. kref_get(&block->b_count);
  137. return block;
  138. }
  139. /*
  140. * Create a block and initialize it.
  141. *
  142. * Note: we explicitly set the cookie of the grant reply to that of
  143. * the blocked lock request. The spec explicitly mentions that the client
  144. * should _not_ rely on the callback containing the same cookie as the
  145. * request, but (as I found out later) that's because some implementations
  146. * do just this. Never mind the standards comittees, they support our
  147. * logging industries.
  148. *
  149. * 10 years later: I hope we can safely ignore these old and broken
  150. * clients by now. Let's fix this so we can uniquely identify an incoming
  151. * GRANTED_RES message by cookie, without having to rely on the client's IP
  152. * address. --okir
  153. */
  154. static inline struct nlm_block *
  155. nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file,
  156. struct nlm_lock *lock, struct nlm_cookie *cookie)
  157. {
  158. struct nlm_block *block;
  159. struct nlm_host *host;
  160. struct nlm_rqst *call = NULL;
  161. /* Create host handle for callback */
  162. host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
  163. if (host == NULL)
  164. return NULL;
  165. call = nlm_alloc_call(host);
  166. if (call == NULL)
  167. return NULL;
  168. /* Allocate memory for block, and initialize arguments */
  169. block = kzalloc(sizeof(*block), GFP_KERNEL);
  170. if (block == NULL)
  171. goto failed;
  172. kref_init(&block->b_count);
  173. INIT_LIST_HEAD(&block->b_list);
  174. INIT_LIST_HEAD(&block->b_flist);
  175. if (!nlmsvc_setgrantargs(call, lock))
  176. goto failed_free;
  177. /* Set notifier function for VFS, and init args */
  178. call->a_args.lock.fl.fl_flags |= FL_SLEEP;
  179. call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
  180. nlmclnt_next_cookie(&call->a_args.cookie);
  181. dprintk("lockd: created block %p...\n", block);
  182. /* Create and initialize the block */
  183. block->b_daemon = rqstp->rq_server;
  184. block->b_host = host;
  185. block->b_file = file;
  186. block->b_fl = NULL;
  187. file->f_count++;
  188. /* Add to file's list of blocks */
  189. list_add(&block->b_flist, &file->f_blocks);
  190. /* Set up RPC arguments for callback */
  191. block->b_call = call;
  192. call->a_flags = RPC_TASK_ASYNC;
  193. call->a_block = block;
  194. return block;
  195. failed_free:
  196. kfree(block);
  197. failed:
  198. nlm_release_call(call);
  199. return NULL;
  200. }
  201. /*
  202. * Delete a block. If the lock was cancelled or the grant callback
  203. * failed, unlock is set to 1.
  204. * It is the caller's responsibility to check whether the file
  205. * can be closed hereafter.
  206. */
  207. static int nlmsvc_unlink_block(struct nlm_block *block)
  208. {
  209. int status;
  210. dprintk("lockd: unlinking block %p...\n", block);
  211. /* Remove block from list */
  212. status = posix_unblock_lock(block->b_file->f_file, &block->b_call->a_args.lock.fl);
  213. nlmsvc_remove_block(block);
  214. return status;
  215. }
  216. static void nlmsvc_free_block(struct kref *kref)
  217. {
  218. struct nlm_block *block = container_of(kref, struct nlm_block, b_count);
  219. struct nlm_file *file = block->b_file;
  220. dprintk("lockd: freeing block %p...\n", block);
  221. /* Remove block from file's list of blocks */
  222. mutex_lock(&file->f_mutex);
  223. list_del_init(&block->b_flist);
  224. mutex_unlock(&file->f_mutex);
  225. nlmsvc_freegrantargs(block->b_call);
  226. nlm_release_call(block->b_call);
  227. nlm_release_file(block->b_file);
  228. kfree(block->b_fl);
  229. kfree(block);
  230. }
  231. static void nlmsvc_release_block(struct nlm_block *block)
  232. {
  233. if (block != NULL)
  234. kref_put(&block->b_count, nlmsvc_free_block);
  235. }
  236. /*
  237. * Loop over all blocks and delete blocks held by
  238. * a matching host.
  239. */
  240. void nlmsvc_traverse_blocks(struct nlm_host *host,
  241. struct nlm_file *file,
  242. nlm_host_match_fn_t match)
  243. {
  244. struct nlm_block *block, *next;
  245. restart:
  246. mutex_lock(&file->f_mutex);
  247. list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
  248. if (!match(block->b_host, host))
  249. continue;
  250. /* Do not destroy blocks that are not on
  251. * the global retry list - why? */
  252. if (list_empty(&block->b_list))
  253. continue;
  254. kref_get(&block->b_count);
  255. mutex_unlock(&file->f_mutex);
  256. nlmsvc_unlink_block(block);
  257. nlmsvc_release_block(block);
  258. goto restart;
  259. }
  260. mutex_unlock(&file->f_mutex);
  261. }
  262. /*
  263. * Initialize arguments for GRANTED call. The nlm_rqst structure
  264. * has been cleared already.
  265. */
  266. static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
  267. {
  268. locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
  269. memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
  270. call->a_args.lock.caller = utsname()->nodename;
  271. call->a_args.lock.oh.len = lock->oh.len;
  272. /* set default data area */
  273. call->a_args.lock.oh.data = call->a_owner;
  274. call->a_args.lock.svid = lock->fl.fl_pid;
  275. if (lock->oh.len > NLMCLNT_OHSIZE) {
  276. void *data = kmalloc(lock->oh.len, GFP_KERNEL);
  277. if (!data)
  278. return 0;
  279. call->a_args.lock.oh.data = (u8 *) data;
  280. }
  281. memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
  282. return 1;
  283. }
  284. static void nlmsvc_freegrantargs(struct nlm_rqst *call)
  285. {
  286. if (call->a_args.lock.oh.data != call->a_owner)
  287. kfree(call->a_args.lock.oh.data);
  288. }
  289. /*
  290. * Deferred lock request handling for non-blocking lock
  291. */
  292. static u32
  293. nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
  294. {
  295. u32 status = nlm_lck_denied_nolocks;
  296. block->b_flags |= B_QUEUED;
  297. nlmsvc_insert_block(block, NLM_TIMEOUT);
  298. block->b_cache_req = &rqstp->rq_chandle;
  299. if (rqstp->rq_chandle.defer) {
  300. block->b_deferred_req =
  301. rqstp->rq_chandle.defer(block->b_cache_req);
  302. if (block->b_deferred_req != NULL)
  303. status = nlm_drop_reply;
  304. }
  305. dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
  306. block, block->b_flags, status);
  307. return status;
  308. }
  309. /*
  310. * Attempt to establish a lock, and if it can't be granted, block it
  311. * if required.
  312. */
  313. __be32
  314. nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
  315. struct nlm_lock *lock, int wait, struct nlm_cookie *cookie)
  316. {
  317. struct nlm_block *block = NULL;
  318. int error;
  319. __be32 ret;
  320. dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
  321. file->f_file->f_path.dentry->d_inode->i_sb->s_id,
  322. file->f_file->f_path.dentry->d_inode->i_ino,
  323. lock->fl.fl_type, lock->fl.fl_pid,
  324. (long long)lock->fl.fl_start,
  325. (long long)lock->fl.fl_end,
  326. wait);
  327. /* Lock file against concurrent access */
  328. mutex_lock(&file->f_mutex);
  329. /* Get existing block (in case client is busy-waiting)
  330. * or create new block
  331. */
  332. block = nlmsvc_lookup_block(file, lock);
  333. if (block == NULL) {
  334. block = nlmsvc_create_block(rqstp, file, lock, cookie);
  335. ret = nlm_lck_denied_nolocks;
  336. if (block == NULL)
  337. goto out;
  338. lock = &block->b_call->a_args.lock;
  339. } else
  340. lock->fl.fl_flags &= ~FL_SLEEP;
  341. error = posix_lock_file(file->f_file, &lock->fl, NULL);
  342. lock->fl.fl_flags &= ~FL_SLEEP;
  343. dprintk("lockd: posix_lock_file returned %d\n", error);
  344. switch(error) {
  345. case 0:
  346. ret = nlm_granted;
  347. goto out;
  348. case -EAGAIN:
  349. break;
  350. case -EDEADLK:
  351. ret = nlm_deadlock;
  352. goto out;
  353. default: /* includes ENOLCK */
  354. ret = nlm_lck_denied_nolocks;
  355. goto out;
  356. }
  357. ret = nlm_lck_denied;
  358. if (!wait)
  359. goto out;
  360. ret = nlm_lck_blocked;
  361. /* Append to list of blocked */
  362. nlmsvc_insert_block(block, NLM_NEVER);
  363. out:
  364. mutex_unlock(&file->f_mutex);
  365. nlmsvc_release_block(block);
  366. dprintk("lockd: nlmsvc_lock returned %u\n", ret);
  367. return ret;
  368. }
  369. /*
  370. * Test for presence of a conflicting lock.
  371. */
  372. __be32
  373. nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
  374. struct nlm_lock *lock, struct nlm_lock *conflock,
  375. struct nlm_cookie *cookie)
  376. {
  377. struct nlm_block *block = NULL;
  378. int error;
  379. __be32 ret;
  380. dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
  381. file->f_file->f_path.dentry->d_inode->i_sb->s_id,
  382. file->f_file->f_path.dentry->d_inode->i_ino,
  383. lock->fl.fl_type,
  384. (long long)lock->fl.fl_start,
  385. (long long)lock->fl.fl_end);
  386. /* Get existing block (in case client is busy-waiting) */
  387. block = nlmsvc_lookup_block(file, lock);
  388. if (block == NULL) {
  389. struct file_lock *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
  390. if (conf == NULL)
  391. return nlm_granted;
  392. block = nlmsvc_create_block(rqstp, file, lock, cookie);
  393. if (block == NULL) {
  394. kfree(conf);
  395. return nlm_granted;
  396. }
  397. block->b_fl = conf;
  398. }
  399. if (block->b_flags & B_QUEUED) {
  400. dprintk("lockd: nlmsvc_testlock deferred block %p flags %d fl %p\n",
  401. block, block->b_flags, block->b_fl);
  402. if (block->b_flags & B_TIMED_OUT) {
  403. nlmsvc_unlink_block(block);
  404. return nlm_lck_denied;
  405. }
  406. if (block->b_flags & B_GOT_CALLBACK) {
  407. if (block->b_fl != NULL
  408. && block->b_fl->fl_type != F_UNLCK) {
  409. lock->fl = *block->b_fl;
  410. goto conf_lock;
  411. }
  412. else {
  413. nlmsvc_unlink_block(block);
  414. return nlm_granted;
  415. }
  416. }
  417. return nlm_drop_reply;
  418. }
  419. error = vfs_test_lock(file->f_file, &lock->fl);
  420. if (error == -EINPROGRESS)
  421. return nlmsvc_defer_lock_rqst(rqstp, block);
  422. if (error) {
  423. ret = nlm_lck_denied_nolocks;
  424. goto out;
  425. }
  426. if (lock->fl.fl_type == F_UNLCK) {
  427. ret = nlm_granted;
  428. goto out;
  429. }
  430. conf_lock:
  431. dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
  432. lock->fl.fl_type, (long long)lock->fl.fl_start,
  433. (long long)lock->fl.fl_end);
  434. conflock->caller = "somehost"; /* FIXME */
  435. conflock->len = strlen(conflock->caller);
  436. conflock->oh.len = 0; /* don't return OH info */
  437. conflock->svid = lock->fl.fl_pid;
  438. conflock->fl.fl_type = lock->fl.fl_type;
  439. conflock->fl.fl_start = lock->fl.fl_start;
  440. conflock->fl.fl_end = lock->fl.fl_end;
  441. ret = nlm_lck_denied;
  442. out:
  443. if (block)
  444. nlmsvc_release_block(block);
  445. return ret;
  446. }
  447. /*
  448. * Remove a lock.
  449. * This implies a CANCEL call: We send a GRANT_MSG, the client replies
  450. * with a GRANT_RES call which gets lost, and calls UNLOCK immediately
  451. * afterwards. In this case the block will still be there, and hence
  452. * must be removed.
  453. */
  454. __be32
  455. nlmsvc_unlock(struct nlm_file *file, struct nlm_lock *lock)
  456. {
  457. int error;
  458. dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n",
  459. file->f_file->f_path.dentry->d_inode->i_sb->s_id,
  460. file->f_file->f_path.dentry->d_inode->i_ino,
  461. lock->fl.fl_pid,
  462. (long long)lock->fl.fl_start,
  463. (long long)lock->fl.fl_end);
  464. /* First, cancel any lock that might be there */
  465. nlmsvc_cancel_blocked(file, lock);
  466. lock->fl.fl_type = F_UNLCK;
  467. error = posix_lock_file(file->f_file, &lock->fl, NULL);
  468. return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
  469. }
  470. /*
  471. * Cancel a previously blocked request.
  472. *
  473. * A cancel request always overrides any grant that may currently
  474. * be in progress.
  475. * The calling procedure must check whether the file can be closed.
  476. */
  477. __be32
  478. nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
  479. {
  480. struct nlm_block *block;
  481. int status = 0;
  482. dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
  483. file->f_file->f_path.dentry->d_inode->i_sb->s_id,
  484. file->f_file->f_path.dentry->d_inode->i_ino,
  485. lock->fl.fl_pid,
  486. (long long)lock->fl.fl_start,
  487. (long long)lock->fl.fl_end);
  488. mutex_lock(&file->f_mutex);
  489. block = nlmsvc_lookup_block(file, lock);
  490. mutex_unlock(&file->f_mutex);
  491. if (block != NULL) {
  492. status = nlmsvc_unlink_block(block);
  493. nlmsvc_release_block(block);
  494. }
  495. return status ? nlm_lck_denied : nlm_granted;
  496. }
  497. /*
  498. * This is a callback from the filesystem for VFS file lock requests.
  499. * It will be used if fl_grant is defined and the filesystem can not
  500. * respond to the request immediately.
  501. * For GETLK request it will copy the reply to the nlm_block.
  502. * For SETLK or SETLKW request it will get the local posix lock.
  503. * In all cases it will move the block to the head of nlm_blocked q where
  504. * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the
  505. * deferred rpc for GETLK and SETLK.
  506. */
  507. static void
  508. nlmsvc_update_deferred_block(struct nlm_block *block, struct file_lock *conf,
  509. int result)
  510. {
  511. block->b_flags |= B_GOT_CALLBACK;
  512. if (result == 0)
  513. block->b_granted = 1;
  514. else
  515. block->b_flags |= B_TIMED_OUT;
  516. if (conf) {
  517. if (block->b_fl)
  518. locks_copy_lock(block->b_fl, conf);
  519. }
  520. }
  521. static int nlmsvc_grant_deferred(struct file_lock *fl, struct file_lock *conf,
  522. int result)
  523. {
  524. struct nlm_block *block;
  525. int rc = -ENOENT;
  526. lock_kernel();
  527. list_for_each_entry(block, &nlm_blocked, b_list) {
  528. if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
  529. dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n",
  530. block, block->b_flags);
  531. if (block->b_flags & B_QUEUED) {
  532. if (block->b_flags & B_TIMED_OUT) {
  533. rc = -ENOLCK;
  534. break;
  535. }
  536. nlmsvc_update_deferred_block(block, conf, result);
  537. } else if (result == 0)
  538. block->b_granted = 1;
  539. nlmsvc_insert_block(block, 0);
  540. svc_wake_up(block->b_daemon);
  541. rc = 0;
  542. break;
  543. }
  544. }
  545. unlock_kernel();
  546. if (rc == -ENOENT)
  547. printk(KERN_WARNING "lockd: grant for unknown block\n");
  548. return rc;
  549. }
  550. /*
  551. * Unblock a blocked lock request. This is a callback invoked from the
  552. * VFS layer when a lock on which we blocked is removed.
  553. *
  554. * This function doesn't grant the blocked lock instantly, but rather moves
  555. * the block to the head of nlm_blocked where it can be picked up by lockd.
  556. */
  557. static void
  558. nlmsvc_notify_blocked(struct file_lock *fl)
  559. {
  560. struct nlm_block *block;
  561. dprintk("lockd: VFS unblock notification for block %p\n", fl);
  562. list_for_each_entry(block, &nlm_blocked, b_list) {
  563. if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
  564. nlmsvc_insert_block(block, 0);
  565. svc_wake_up(block->b_daemon);
  566. return;
  567. }
  568. }
  569. printk(KERN_WARNING "lockd: notification for unknown block!\n");
  570. }
  571. static int nlmsvc_same_owner(struct file_lock *fl1, struct file_lock *fl2)
  572. {
  573. return fl1->fl_owner == fl2->fl_owner && fl1->fl_pid == fl2->fl_pid;
  574. }
  575. struct lock_manager_operations nlmsvc_lock_operations = {
  576. .fl_compare_owner = nlmsvc_same_owner,
  577. .fl_notify = nlmsvc_notify_blocked,
  578. .fl_grant = nlmsvc_grant_deferred,
  579. };
  580. /*
  581. * Try to claim a lock that was previously blocked.
  582. *
  583. * Note that we use both the RPC_GRANTED_MSG call _and_ an async
  584. * RPC thread when notifying the client. This seems like overkill...
  585. * Here's why:
  586. * - we don't want to use a synchronous RPC thread, otherwise
  587. * we might find ourselves hanging on a dead portmapper.
  588. * - Some lockd implementations (e.g. HP) don't react to
  589. * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
  590. */
  591. static void
  592. nlmsvc_grant_blocked(struct nlm_block *block)
  593. {
  594. struct nlm_file *file = block->b_file;
  595. struct nlm_lock *lock = &block->b_call->a_args.lock;
  596. int error;
  597. dprintk("lockd: grant blocked lock %p\n", block);
  598. kref_get(&block->b_count);
  599. /* Unlink block request from list */
  600. nlmsvc_unlink_block(block);
  601. /* If b_granted is true this means we've been here before.
  602. * Just retry the grant callback, possibly refreshing the RPC
  603. * binding */
  604. if (block->b_granted) {
  605. nlm_rebind_host(block->b_host);
  606. goto callback;
  607. }
  608. /* Try the lock operation again */
  609. lock->fl.fl_flags |= FL_SLEEP;
  610. error = posix_lock_file(file->f_file, &lock->fl, NULL);
  611. lock->fl.fl_flags &= ~FL_SLEEP;
  612. switch (error) {
  613. case 0:
  614. break;
  615. case -EAGAIN:
  616. dprintk("lockd: lock still blocked\n");
  617. nlmsvc_insert_block(block, NLM_NEVER);
  618. nlmsvc_release_block(block);
  619. return;
  620. default:
  621. printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
  622. -error, __FUNCTION__);
  623. nlmsvc_insert_block(block, 10 * HZ);
  624. nlmsvc_release_block(block);
  625. return;
  626. }
  627. callback:
  628. /* Lock was granted by VFS. */
  629. dprintk("lockd: GRANTing blocked lock.\n");
  630. block->b_granted = 1;
  631. /* Schedule next grant callback in 30 seconds */
  632. nlmsvc_insert_block(block, 30 * HZ);
  633. /* Call the client */
  634. nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG, &nlmsvc_grant_ops);
  635. }
  636. /*
  637. * This is the callback from the RPC layer when the NLM_GRANTED_MSG
  638. * RPC call has succeeded or timed out.
  639. * Like all RPC callbacks, it is invoked by the rpciod process, so it
  640. * better not sleep. Therefore, we put the blocked lock on the nlm_blocked
  641. * chain once more in order to have it removed by lockd itself (which can
  642. * then sleep on the file semaphore without disrupting e.g. the nfs client).
  643. */
  644. static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
  645. {
  646. struct nlm_rqst *call = data;
  647. struct nlm_block *block = call->a_block;
  648. unsigned long timeout;
  649. dprintk("lockd: GRANT_MSG RPC callback\n");
  650. /* Technically, we should down the file semaphore here. Since we
  651. * move the block towards the head of the queue only, no harm
  652. * can be done, though. */
  653. if (task->tk_status < 0) {
  654. /* RPC error: Re-insert for retransmission */
  655. timeout = 10 * HZ;
  656. } else {
  657. /* Call was successful, now wait for client callback */
  658. timeout = 60 * HZ;
  659. }
  660. nlmsvc_insert_block(block, timeout);
  661. svc_wake_up(block->b_daemon);
  662. }
  663. static void nlmsvc_grant_release(void *data)
  664. {
  665. struct nlm_rqst *call = data;
  666. nlmsvc_release_block(call->a_block);
  667. }
  668. static const struct rpc_call_ops nlmsvc_grant_ops = {
  669. .rpc_call_done = nlmsvc_grant_callback,
  670. .rpc_release = nlmsvc_grant_release,
  671. };
  672. /*
  673. * We received a GRANT_RES callback. Try to find the corresponding
  674. * block.
  675. */
  676. void
  677. nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
  678. {
  679. struct nlm_block *block;
  680. dprintk("grant_reply: looking for cookie %x, s=%d \n",
  681. *(unsigned int *)(cookie->data), status);
  682. if (!(block = nlmsvc_find_block(cookie)))
  683. return;
  684. if (block) {
  685. if (status == nlm_lck_denied_grace_period) {
  686. /* Try again in a couple of seconds */
  687. nlmsvc_insert_block(block, 10 * HZ);
  688. } else {
  689. /* Lock is now held by client, or has been rejected.
  690. * In both cases, the block should be removed. */
  691. nlmsvc_unlink_block(block);
  692. }
  693. }
  694. nlmsvc_release_block(block);
  695. }
  696. /* Helper function to handle retry of a deferred block.
  697. * If it is a blocking lock, call grant_blocked.
  698. * For a non-blocking lock or test lock, revisit the request.
  699. */
  700. static void
  701. retry_deferred_block(struct nlm_block *block)
  702. {
  703. if (!(block->b_flags & B_GOT_CALLBACK))
  704. block->b_flags |= B_TIMED_OUT;
  705. nlmsvc_insert_block(block, NLM_TIMEOUT);
  706. dprintk("revisit block %p flags %d\n", block, block->b_flags);
  707. if (block->b_deferred_req) {
  708. block->b_deferred_req->revisit(block->b_deferred_req, 0);
  709. block->b_deferred_req = NULL;
  710. }
  711. }
  712. /*
  713. * Retry all blocked locks that have been notified. This is where lockd
  714. * picks up locks that can be granted, or grant notifications that must
  715. * be retransmitted.
  716. */
  717. unsigned long
  718. nlmsvc_retry_blocked(void)
  719. {
  720. unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
  721. struct nlm_block *block;
  722. while (!list_empty(&nlm_blocked)) {
  723. block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
  724. if (block->b_when == NLM_NEVER)
  725. break;
  726. if (time_after(block->b_when,jiffies)) {
  727. timeout = block->b_when - jiffies;
  728. break;
  729. }
  730. dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
  731. block, block->b_when);
  732. if (block->b_flags & B_QUEUED) {
  733. dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n",
  734. block, block->b_granted, block->b_flags);
  735. retry_deferred_block(block);
  736. } else
  737. nlmsvc_grant_blocked(block);
  738. }
  739. return timeout;
  740. }