clntlock.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. /*
  2. * linux/fs/lockd/clntlock.c
  3. *
  4. * Lock handling for the client side NLM implementation
  5. *
  6. * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
  7. */
  8. #include <linux/module.h>
  9. #include <linux/types.h>
  10. #include <linux/time.h>
  11. #include <linux/nfs_fs.h>
  12. #include <linux/sunrpc/clnt.h>
  13. #include <linux/sunrpc/svc.h>
  14. #include <linux/lockd/lockd.h>
  15. #include <linux/smp_lock.h>
  16. #define NLMDBG_FACILITY NLMDBG_CLIENT
  17. /*
  18. * Local function prototypes
  19. */
  20. static int reclaimer(void *ptr);
  21. /*
  22. * The following functions handle blocking and granting from the
  23. * client perspective.
  24. */
  25. /*
  26. * This is the representation of a blocked client lock.
  27. */
  28. struct nlm_wait {
  29. struct list_head b_list; /* linked list */
  30. wait_queue_head_t b_wait; /* where to wait on */
  31. struct nlm_host * b_host;
  32. struct file_lock * b_lock; /* local file lock */
  33. unsigned short b_reclaim; /* got to reclaim lock */
  34. u32 b_status; /* grant callback status */
  35. };
  36. static LIST_HEAD(nlm_blocked);
  37. /*
  38. * Queue up a lock for blocking so that the GRANTED request can see it
  39. */
  40. int nlmclnt_prepare_block(struct nlm_rqst *req, struct nlm_host *host, struct file_lock *fl)
  41. {
  42. struct nlm_wait *block;
  43. BUG_ON(req->a_block != NULL);
  44. block = kmalloc(sizeof(*block), GFP_KERNEL);
  45. if (block == NULL)
  46. return -ENOMEM;
  47. block->b_host = host;
  48. block->b_lock = fl;
  49. init_waitqueue_head(&block->b_wait);
  50. block->b_status = NLM_LCK_BLOCKED;
  51. list_add(&block->b_list, &nlm_blocked);
  52. req->a_block = block;
  53. return 0;
  54. }
  55. void nlmclnt_finish_block(struct nlm_rqst *req)
  56. {
  57. struct nlm_wait *block = req->a_block;
  58. if (block == NULL)
  59. return;
  60. req->a_block = NULL;
  61. list_del(&block->b_list);
  62. kfree(block);
  63. }
  64. /*
  65. * Block on a lock
  66. */
  67. long nlmclnt_block(struct nlm_rqst *req, long timeout)
  68. {
  69. struct nlm_wait *block = req->a_block;
  70. long ret;
  71. /* A borken server might ask us to block even if we didn't
  72. * request it. Just say no!
  73. */
  74. if (!req->a_args.block)
  75. return -EAGAIN;
  76. /* Go to sleep waiting for GRANT callback. Some servers seem
  77. * to lose callbacks, however, so we're going to poll from
  78. * time to time just to make sure.
  79. *
  80. * For now, the retry frequency is pretty high; normally
  81. * a 1 minute timeout would do. See the comment before
  82. * nlmclnt_lock for an explanation.
  83. */
  84. ret = wait_event_interruptible_timeout(block->b_wait,
  85. block->b_status != NLM_LCK_BLOCKED,
  86. timeout);
  87. if (block->b_status != NLM_LCK_BLOCKED) {
  88. req->a_res.status = block->b_status;
  89. block->b_status = NLM_LCK_BLOCKED;
  90. }
  91. return ret;
  92. }
  93. /*
  94. * The server lockd has called us back to tell us the lock was granted
  95. */
  96. u32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock)
  97. {
  98. const struct file_lock *fl = &lock->fl;
  99. const struct nfs_fh *fh = &lock->fh;
  100. struct nlm_wait *block;
  101. u32 res = nlm_lck_denied;
  102. /*
  103. * Look up blocked request based on arguments.
  104. * Warning: must not use cookie to match it!
  105. */
  106. list_for_each_entry(block, &nlm_blocked, b_list) {
  107. struct file_lock *fl_blocked = block->b_lock;
  108. if (!nlm_compare_locks(fl_blocked, fl))
  109. continue;
  110. if (!nlm_cmp_addr(&block->b_host->h_addr, addr))
  111. continue;
  112. if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_dentry->d_inode) ,fh) != 0)
  113. continue;
  114. /* Alright, we found a lock. Set the return status
  115. * and wake up the caller
  116. */
  117. block->b_status = NLM_LCK_GRANTED;
  118. wake_up(&block->b_wait);
  119. res = nlm_granted;
  120. }
  121. return res;
  122. }
  123. /*
  124. * The following procedures deal with the recovery of locks after a
  125. * server crash.
  126. */
  127. /*
  128. * Mark the locks for reclaiming.
  129. * FIXME: In 2.5 we don't want to iterate through any global file_lock_list.
  130. * Maintain NLM lock reclaiming lists in the nlm_host instead.
  131. */
  132. static
  133. void nlmclnt_mark_reclaim(struct nlm_host *host)
  134. {
  135. struct file_lock *fl;
  136. struct inode *inode;
  137. struct list_head *tmp;
  138. list_for_each(tmp, &file_lock_list) {
  139. fl = list_entry(tmp, struct file_lock, fl_link);
  140. inode = fl->fl_file->f_dentry->d_inode;
  141. if (inode->i_sb->s_magic != NFS_SUPER_MAGIC)
  142. continue;
  143. if (fl->fl_u.nfs_fl.owner == NULL)
  144. continue;
  145. if (fl->fl_u.nfs_fl.owner->host != host)
  146. continue;
  147. if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_GRANTED))
  148. continue;
  149. fl->fl_u.nfs_fl.flags |= NFS_LCK_RECLAIM;
  150. }
  151. }
  152. /*
  153. * Someone has sent us an SM_NOTIFY. Ensure we bind to the new port number,
  154. * that we mark locks for reclaiming, and that we bump the pseudo NSM state.
  155. */
  156. static inline
  157. void nlmclnt_prepare_reclaim(struct nlm_host *host, u32 newstate)
  158. {
  159. host->h_monitored = 0;
  160. host->h_nsmstate = newstate;
  161. host->h_state++;
  162. host->h_nextrebind = 0;
  163. nlm_rebind_host(host);
  164. nlmclnt_mark_reclaim(host);
  165. dprintk("NLM: reclaiming locks for host %s", host->h_name);
  166. }
  167. /*
  168. * Reclaim all locks on server host. We do this by spawning a separate
  169. * reclaimer thread.
  170. */
  171. void
  172. nlmclnt_recovery(struct nlm_host *host, u32 newstate)
  173. {
  174. if (host->h_reclaiming++) {
  175. if (host->h_nsmstate == newstate)
  176. return;
  177. nlmclnt_prepare_reclaim(host, newstate);
  178. } else {
  179. nlmclnt_prepare_reclaim(host, newstate);
  180. nlm_get_host(host);
  181. __module_get(THIS_MODULE);
  182. if (kernel_thread(reclaimer, host, CLONE_KERNEL) < 0)
  183. module_put(THIS_MODULE);
  184. }
  185. }
  186. static int
  187. reclaimer(void *ptr)
  188. {
  189. struct nlm_host *host = (struct nlm_host *) ptr;
  190. struct nlm_wait *block;
  191. struct list_head *tmp;
  192. struct file_lock *fl;
  193. struct inode *inode;
  194. daemonize("%s-reclaim", host->h_name);
  195. allow_signal(SIGKILL);
  196. /* This one ensures that our parent doesn't terminate while the
  197. * reclaim is in progress */
  198. lock_kernel();
  199. lockd_up();
  200. /* First, reclaim all locks that have been marked. */
  201. restart:
  202. list_for_each(tmp, &file_lock_list) {
  203. fl = list_entry(tmp, struct file_lock, fl_link);
  204. inode = fl->fl_file->f_dentry->d_inode;
  205. if (inode->i_sb->s_magic != NFS_SUPER_MAGIC)
  206. continue;
  207. if (fl->fl_u.nfs_fl.owner == NULL)
  208. continue;
  209. if (fl->fl_u.nfs_fl.owner->host != host)
  210. continue;
  211. if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_RECLAIM))
  212. continue;
  213. fl->fl_u.nfs_fl.flags &= ~NFS_LCK_RECLAIM;
  214. nlmclnt_reclaim(host, fl);
  215. if (signalled())
  216. break;
  217. goto restart;
  218. }
  219. host->h_reclaiming = 0;
  220. /* Now, wake up all processes that sleep on a blocked lock */
  221. list_for_each_entry(block, &nlm_blocked, b_list) {
  222. if (block->b_host == host) {
  223. block->b_status = NLM_LCK_DENIED_GRACE_PERIOD;
  224. wake_up(&block->b_wait);
  225. }
  226. }
  227. /* Release host handle after use */
  228. nlm_release_host(host);
  229. lockd_down();
  230. unlock_kernel();
  231. module_put_and_exit(0);
  232. }