delegation.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427
  1. /*
  2. * linux/fs/nfs/delegation.c
  3. *
  4. * Copyright (C) 2004 Trond Myklebust
  5. *
  6. * NFS file delegation management
  7. *
  8. */
  9. #include <linux/config.h>
  10. #include <linux/completion.h>
  11. #include <linux/kthread.h>
  12. #include <linux/module.h>
  13. #include <linux/sched.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/nfs4.h>
  16. #include <linux/nfs_fs.h>
  17. #include <linux/nfs_xdr.h>
  18. #include "nfs4_fs.h"
  19. #include "delegation.h"
  20. static struct nfs_delegation *nfs_alloc_delegation(void)
  21. {
  22. return (struct nfs_delegation *)kmalloc(sizeof(struct nfs_delegation), GFP_KERNEL);
  23. }
  24. static void nfs_free_delegation(struct nfs_delegation *delegation)
  25. {
  26. if (delegation->cred)
  27. put_rpccred(delegation->cred);
  28. kfree(delegation);
  29. }
  30. static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
  31. {
  32. struct inode *inode = state->inode;
  33. struct file_lock *fl;
  34. int status;
  35. for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
  36. if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
  37. continue;
  38. if ((struct nfs_open_context *)fl->fl_file->private_data != ctx)
  39. continue;
  40. status = nfs4_lock_delegation_recall(state, fl);
  41. if (status >= 0)
  42. continue;
  43. switch (status) {
  44. default:
  45. printk(KERN_ERR "%s: unhandled error %d.\n",
  46. __FUNCTION__, status);
  47. case -NFS4ERR_EXPIRED:
  48. /* kill_proc(fl->fl_pid, SIGLOST, 1); */
  49. case -NFS4ERR_STALE_CLIENTID:
  50. nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs4_state);
  51. goto out_err;
  52. }
  53. }
  54. return 0;
  55. out_err:
  56. return status;
  57. }
  58. static void nfs_delegation_claim_opens(struct inode *inode)
  59. {
  60. struct nfs_inode *nfsi = NFS_I(inode);
  61. struct nfs_open_context *ctx;
  62. struct nfs4_state *state;
  63. int err;
  64. again:
  65. spin_lock(&inode->i_lock);
  66. list_for_each_entry(ctx, &nfsi->open_files, list) {
  67. state = ctx->state;
  68. if (state == NULL)
  69. continue;
  70. if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
  71. continue;
  72. get_nfs_open_context(ctx);
  73. spin_unlock(&inode->i_lock);
  74. err = nfs4_open_delegation_recall(ctx->dentry, state);
  75. if (err >= 0)
  76. err = nfs_delegation_claim_locks(ctx, state);
  77. put_nfs_open_context(ctx);
  78. if (err != 0)
  79. return;
  80. goto again;
  81. }
  82. spin_unlock(&inode->i_lock);
  83. }
  84. /*
  85. * Set up a delegation on an inode
  86. */
  87. void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
  88. {
  89. struct nfs_delegation *delegation = NFS_I(inode)->delegation;
  90. if (delegation == NULL)
  91. return;
  92. memcpy(delegation->stateid.data, res->delegation.data,
  93. sizeof(delegation->stateid.data));
  94. delegation->type = res->delegation_type;
  95. delegation->maxsize = res->maxsize;
  96. put_rpccred(cred);
  97. delegation->cred = get_rpccred(cred);
  98. delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
  99. NFS_I(inode)->delegation_state = delegation->type;
  100. smp_wmb();
  101. }
  102. /*
  103. * Set up a delegation on an inode
  104. */
  105. int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
  106. {
  107. struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
  108. struct nfs_inode *nfsi = NFS_I(inode);
  109. struct nfs_delegation *delegation;
  110. int status = 0;
  111. /* Ensure we first revalidate the attributes and page cache! */
  112. if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR)))
  113. __nfs_revalidate_inode(NFS_SERVER(inode), inode);
  114. delegation = nfs_alloc_delegation();
  115. if (delegation == NULL)
  116. return -ENOMEM;
  117. memcpy(delegation->stateid.data, res->delegation.data,
  118. sizeof(delegation->stateid.data));
  119. delegation->type = res->delegation_type;
  120. delegation->maxsize = res->maxsize;
  121. delegation->change_attr = nfsi->change_attr;
  122. delegation->cred = get_rpccred(cred);
  123. delegation->inode = inode;
  124. spin_lock(&clp->cl_lock);
  125. if (nfsi->delegation == NULL) {
  126. list_add(&delegation->super_list, &clp->cl_delegations);
  127. nfsi->delegation = delegation;
  128. nfsi->delegation_state = delegation->type;
  129. delegation = NULL;
  130. } else {
  131. if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
  132. sizeof(delegation->stateid)) != 0 ||
  133. delegation->type != nfsi->delegation->type) {
  134. printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
  135. __FUNCTION__, NIPQUAD(clp->cl_addr));
  136. status = -EIO;
  137. }
  138. }
  139. spin_unlock(&clp->cl_lock);
  140. kfree(delegation);
  141. return status;
  142. }
  143. static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
  144. {
  145. int res = 0;
  146. __nfs_revalidate_inode(NFS_SERVER(inode), inode);
  147. res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
  148. nfs_free_delegation(delegation);
  149. return res;
  150. }
  151. /* Sync all data to disk upon delegation return */
  152. static void nfs_msync_inode(struct inode *inode)
  153. {
  154. filemap_fdatawrite(inode->i_mapping);
  155. nfs_wb_all(inode);
  156. filemap_fdatawait(inode->i_mapping);
  157. }
  158. /*
  159. * Basic procedure for returning a delegation to the server
  160. */
  161. int __nfs_inode_return_delegation(struct inode *inode)
  162. {
  163. struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
  164. struct nfs_inode *nfsi = NFS_I(inode);
  165. struct nfs_delegation *delegation;
  166. int res = 0;
  167. nfs_msync_inode(inode);
  168. down_read(&clp->cl_sem);
  169. /* Guard against new delegated open calls */
  170. down_write(&nfsi->rwsem);
  171. spin_lock(&clp->cl_lock);
  172. delegation = nfsi->delegation;
  173. if (delegation != NULL) {
  174. list_del_init(&delegation->super_list);
  175. nfsi->delegation = NULL;
  176. nfsi->delegation_state = 0;
  177. }
  178. spin_unlock(&clp->cl_lock);
  179. nfs_delegation_claim_opens(inode);
  180. up_write(&nfsi->rwsem);
  181. up_read(&clp->cl_sem);
  182. nfs_msync_inode(inode);
  183. if (delegation != NULL)
  184. res = nfs_do_return_delegation(inode, delegation);
  185. return res;
  186. }
  187. /*
  188. * Return all delegations associated to a super block
  189. */
  190. void nfs_return_all_delegations(struct super_block *sb)
  191. {
  192. struct nfs4_client *clp = NFS_SB(sb)->nfs4_state;
  193. struct nfs_delegation *delegation;
  194. struct inode *inode;
  195. if (clp == NULL)
  196. return;
  197. restart:
  198. spin_lock(&clp->cl_lock);
  199. list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
  200. if (delegation->inode->i_sb != sb)
  201. continue;
  202. inode = igrab(delegation->inode);
  203. if (inode == NULL)
  204. continue;
  205. spin_unlock(&clp->cl_lock);
  206. nfs_inode_return_delegation(inode);
  207. iput(inode);
  208. goto restart;
  209. }
  210. spin_unlock(&clp->cl_lock);
  211. }
  212. int nfs_do_expire_all_delegations(void *ptr)
  213. {
  214. struct nfs4_client *clp = ptr;
  215. struct nfs_delegation *delegation;
  216. struct inode *inode;
  217. int err = 0;
  218. allow_signal(SIGKILL);
  219. restart:
  220. spin_lock(&clp->cl_lock);
  221. if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0)
  222. goto out;
  223. if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0)
  224. goto out;
  225. list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
  226. inode = igrab(delegation->inode);
  227. if (inode == NULL)
  228. continue;
  229. spin_unlock(&clp->cl_lock);
  230. err = nfs_inode_return_delegation(inode);
  231. iput(inode);
  232. if (!err)
  233. goto restart;
  234. }
  235. out:
  236. spin_unlock(&clp->cl_lock);
  237. nfs4_put_client(clp);
  238. module_put_and_exit(0);
  239. }
  240. void nfs_expire_all_delegations(struct nfs4_client *clp)
  241. {
  242. struct task_struct *task;
  243. __module_get(THIS_MODULE);
  244. atomic_inc(&clp->cl_count);
  245. task = kthread_run(nfs_do_expire_all_delegations, clp,
  246. "%u.%u.%u.%u-delegreturn",
  247. NIPQUAD(clp->cl_addr));
  248. if (!IS_ERR(task))
  249. return;
  250. nfs4_put_client(clp);
  251. module_put(THIS_MODULE);
  252. }
  253. /*
  254. * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
  255. */
  256. void nfs_handle_cb_pathdown(struct nfs4_client *clp)
  257. {
  258. struct nfs_delegation *delegation;
  259. struct inode *inode;
  260. if (clp == NULL)
  261. return;
  262. restart:
  263. spin_lock(&clp->cl_lock);
  264. list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
  265. inode = igrab(delegation->inode);
  266. if (inode == NULL)
  267. continue;
  268. spin_unlock(&clp->cl_lock);
  269. nfs_inode_return_delegation(inode);
  270. iput(inode);
  271. goto restart;
  272. }
  273. spin_unlock(&clp->cl_lock);
  274. }
  275. struct recall_threadargs {
  276. struct inode *inode;
  277. struct nfs4_client *clp;
  278. const nfs4_stateid *stateid;
  279. struct completion started;
  280. int result;
  281. };
  282. static int recall_thread(void *data)
  283. {
  284. struct recall_threadargs *args = (struct recall_threadargs *)data;
  285. struct inode *inode = igrab(args->inode);
  286. struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
  287. struct nfs_inode *nfsi = NFS_I(inode);
  288. struct nfs_delegation *delegation;
  289. daemonize("nfsv4-delegreturn");
  290. nfs_msync_inode(inode);
  291. down_read(&clp->cl_sem);
  292. down_write(&nfsi->rwsem);
  293. spin_lock(&clp->cl_lock);
  294. delegation = nfsi->delegation;
  295. if (delegation != NULL && memcmp(delegation->stateid.data,
  296. args->stateid->data,
  297. sizeof(delegation->stateid.data)) == 0) {
  298. list_del_init(&delegation->super_list);
  299. nfsi->delegation = NULL;
  300. nfsi->delegation_state = 0;
  301. args->result = 0;
  302. } else {
  303. delegation = NULL;
  304. args->result = -ENOENT;
  305. }
  306. spin_unlock(&clp->cl_lock);
  307. complete(&args->started);
  308. nfs_delegation_claim_opens(inode);
  309. up_write(&nfsi->rwsem);
  310. up_read(&clp->cl_sem);
  311. nfs_msync_inode(inode);
  312. if (delegation != NULL)
  313. nfs_do_return_delegation(inode, delegation);
  314. iput(inode);
  315. module_put_and_exit(0);
  316. }
  317. /*
  318. * Asynchronous delegation recall!
  319. */
  320. int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
  321. {
  322. struct recall_threadargs data = {
  323. .inode = inode,
  324. .stateid = stateid,
  325. };
  326. int status;
  327. init_completion(&data.started);
  328. __module_get(THIS_MODULE);
  329. status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
  330. if (status < 0)
  331. goto out_module_put;
  332. wait_for_completion(&data.started);
  333. return data.result;
  334. out_module_put:
  335. module_put(THIS_MODULE);
  336. return status;
  337. }
  338. /*
  339. * Retrieve the inode associated with a delegation
  340. */
  341. struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle)
  342. {
  343. struct nfs_delegation *delegation;
  344. struct inode *res = NULL;
  345. spin_lock(&clp->cl_lock);
  346. list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
  347. if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
  348. res = igrab(delegation->inode);
  349. break;
  350. }
  351. }
  352. spin_unlock(&clp->cl_lock);
  353. return res;
  354. }
  355. /*
  356. * Mark all delegations as needing to be reclaimed
  357. */
  358. void nfs_delegation_mark_reclaim(struct nfs4_client *clp)
  359. {
  360. struct nfs_delegation *delegation;
  361. spin_lock(&clp->cl_lock);
  362. list_for_each_entry(delegation, &clp->cl_delegations, super_list)
  363. delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
  364. spin_unlock(&clp->cl_lock);
  365. }
  366. /*
  367. * Reap all unclaimed delegations after reboot recovery is done
  368. */
  369. void nfs_delegation_reap_unclaimed(struct nfs4_client *clp)
  370. {
  371. struct nfs_delegation *delegation, *n;
  372. LIST_HEAD(head);
  373. spin_lock(&clp->cl_lock);
  374. list_for_each_entry_safe(delegation, n, &clp->cl_delegations, super_list) {
  375. if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
  376. continue;
  377. list_move(&delegation->super_list, &head);
  378. NFS_I(delegation->inode)->delegation = NULL;
  379. NFS_I(delegation->inode)->delegation_state = 0;
  380. }
  381. spin_unlock(&clp->cl_lock);
  382. while(!list_empty(&head)) {
  383. delegation = list_entry(head.next, struct nfs_delegation, super_list);
  384. list_del(&delegation->super_list);
  385. nfs_free_delegation(delegation);
  386. }
  387. }