delegation.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469
  1. /*
  2. * linux/fs/nfs/delegation.c
  3. *
  4. * Copyright (C) 2004 Trond Myklebust
  5. *
  6. * NFS file delegation management
  7. *
  8. */
  9. #include <linux/completion.h>
  10. #include <linux/kthread.h>
  11. #include <linux/module.h>
  12. #include <linux/sched.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/nfs4.h>
  15. #include <linux/nfs_fs.h>
  16. #include <linux/nfs_xdr.h>
  17. #include "nfs4_fs.h"
  18. #include "delegation.h"
  19. #include "internal.h"
  20. static void nfs_free_delegation(struct nfs_delegation *delegation)
  21. {
  22. if (delegation->cred)
  23. put_rpccred(delegation->cred);
  24. kfree(delegation);
  25. }
  26. static void nfs_free_delegation_callback(struct rcu_head *head)
  27. {
  28. struct nfs_delegation *delegation = container_of(head, struct nfs_delegation, rcu);
  29. nfs_free_delegation(delegation);
  30. }
  31. static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
  32. {
  33. struct inode *inode = state->inode;
  34. struct file_lock *fl;
  35. int status;
  36. for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
  37. if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
  38. continue;
  39. if ((struct nfs_open_context *)fl->fl_file->private_data != ctx)
  40. continue;
  41. status = nfs4_lock_delegation_recall(state, fl);
  42. if (status >= 0)
  43. continue;
  44. switch (status) {
  45. default:
  46. printk(KERN_ERR "%s: unhandled error %d.\n",
  47. __FUNCTION__, status);
  48. case -NFS4ERR_EXPIRED:
  49. /* kill_proc(fl->fl_pid, SIGLOST, 1); */
  50. case -NFS4ERR_STALE_CLIENTID:
  51. nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs_client);
  52. goto out_err;
  53. }
  54. }
  55. return 0;
  56. out_err:
  57. return status;
  58. }
  59. static void nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
  60. {
  61. struct nfs_inode *nfsi = NFS_I(inode);
  62. struct nfs_open_context *ctx;
  63. struct nfs4_state *state;
  64. int err;
  65. again:
  66. spin_lock(&inode->i_lock);
  67. list_for_each_entry(ctx, &nfsi->open_files, list) {
  68. state = ctx->state;
  69. if (state == NULL)
  70. continue;
  71. if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
  72. continue;
  73. if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0)
  74. continue;
  75. get_nfs_open_context(ctx);
  76. spin_unlock(&inode->i_lock);
  77. err = nfs4_open_delegation_recall(ctx, state, stateid);
  78. if (err >= 0)
  79. err = nfs_delegation_claim_locks(ctx, state);
  80. put_nfs_open_context(ctx);
  81. if (err != 0)
  82. return;
  83. goto again;
  84. }
  85. spin_unlock(&inode->i_lock);
  86. }
  87. /*
  88. * Set up a delegation on an inode
  89. */
  90. void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
  91. {
  92. struct nfs_delegation *delegation = NFS_I(inode)->delegation;
  93. if (delegation == NULL)
  94. return;
  95. memcpy(delegation->stateid.data, res->delegation.data,
  96. sizeof(delegation->stateid.data));
  97. delegation->type = res->delegation_type;
  98. delegation->maxsize = res->maxsize;
  99. put_rpccred(cred);
  100. delegation->cred = get_rpccred(cred);
  101. delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
  102. NFS_I(inode)->delegation_state = delegation->type;
  103. smp_wmb();
  104. }
  105. /*
  106. * Set up a delegation on an inode
  107. */
  108. int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
  109. {
  110. struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
  111. struct nfs_inode *nfsi = NFS_I(inode);
  112. struct nfs_delegation *delegation;
  113. int status = 0;
  114. /* Ensure we first revalidate the attributes and page cache! */
  115. if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR)))
  116. __nfs_revalidate_inode(NFS_SERVER(inode), inode);
  117. delegation = kmalloc(sizeof(*delegation), GFP_KERNEL);
  118. if (delegation == NULL)
  119. return -ENOMEM;
  120. memcpy(delegation->stateid.data, res->delegation.data,
  121. sizeof(delegation->stateid.data));
  122. delegation->type = res->delegation_type;
  123. delegation->maxsize = res->maxsize;
  124. delegation->change_attr = nfsi->change_attr;
  125. delegation->cred = get_rpccred(cred);
  126. delegation->inode = inode;
  127. spin_lock(&clp->cl_lock);
  128. if (rcu_dereference(nfsi->delegation) == NULL) {
  129. list_add_rcu(&delegation->super_list, &clp->cl_delegations);
  130. nfsi->delegation_state = delegation->type;
  131. rcu_assign_pointer(nfsi->delegation, delegation);
  132. delegation = NULL;
  133. } else {
  134. if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
  135. sizeof(delegation->stateid)) != 0 ||
  136. delegation->type != nfsi->delegation->type) {
  137. printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
  138. __FUNCTION__, NIPQUAD(clp->cl_addr.sin_addr));
  139. status = -EIO;
  140. }
  141. }
  142. spin_unlock(&clp->cl_lock);
  143. kfree(delegation);
  144. return status;
  145. }
  146. static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
  147. {
  148. int res = 0;
  149. res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
  150. call_rcu(&delegation->rcu, nfs_free_delegation_callback);
  151. return res;
  152. }
  153. /* Sync all data to disk upon delegation return */
  154. static void nfs_msync_inode(struct inode *inode)
  155. {
  156. filemap_fdatawrite(inode->i_mapping);
  157. nfs_wb_all(inode);
  158. filemap_fdatawait(inode->i_mapping);
  159. }
  160. /*
  161. * Basic procedure for returning a delegation to the server
  162. */
  163. static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
  164. {
  165. struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
  166. struct nfs_inode *nfsi = NFS_I(inode);
  167. nfs_msync_inode(inode);
  168. down_read(&clp->cl_sem);
  169. /* Guard against new delegated open calls */
  170. down_write(&nfsi->rwsem);
  171. nfs_delegation_claim_opens(inode, &delegation->stateid);
  172. up_write(&nfsi->rwsem);
  173. up_read(&clp->cl_sem);
  174. nfs_msync_inode(inode);
  175. return nfs_do_return_delegation(inode, delegation);
  176. }
  177. static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, const nfs4_stateid *stateid)
  178. {
  179. struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
  180. if (delegation == NULL)
  181. goto nomatch;
  182. if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
  183. sizeof(delegation->stateid.data)) != 0)
  184. goto nomatch;
  185. list_del_rcu(&delegation->super_list);
  186. nfsi->delegation_state = 0;
  187. rcu_assign_pointer(nfsi->delegation, NULL);
  188. return delegation;
  189. nomatch:
  190. return NULL;
  191. }
  192. int nfs_inode_return_delegation(struct inode *inode)
  193. {
  194. struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
  195. struct nfs_inode *nfsi = NFS_I(inode);
  196. struct nfs_delegation *delegation;
  197. int err = 0;
  198. if (rcu_dereference(nfsi->delegation) != NULL) {
  199. spin_lock(&clp->cl_lock);
  200. delegation = nfs_detach_delegation_locked(nfsi, NULL);
  201. spin_unlock(&clp->cl_lock);
  202. if (delegation != NULL)
  203. err = __nfs_inode_return_delegation(inode, delegation);
  204. }
  205. return err;
  206. }
  207. /*
  208. * Return all delegations associated to a super block
  209. */
  210. void nfs_return_all_delegations(struct super_block *sb)
  211. {
  212. struct nfs_client *clp = NFS_SB(sb)->nfs_client;
  213. struct nfs_delegation *delegation;
  214. struct inode *inode;
  215. if (clp == NULL)
  216. return;
  217. restart:
  218. rcu_read_lock();
  219. list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
  220. if (delegation->inode->i_sb != sb)
  221. continue;
  222. inode = igrab(delegation->inode);
  223. if (inode == NULL)
  224. continue;
  225. spin_lock(&clp->cl_lock);
  226. delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
  227. spin_unlock(&clp->cl_lock);
  228. rcu_read_unlock();
  229. if (delegation != NULL)
  230. __nfs_inode_return_delegation(inode, delegation);
  231. iput(inode);
  232. goto restart;
  233. }
  234. rcu_read_unlock();
  235. }
  236. static int nfs_do_expire_all_delegations(void *ptr)
  237. {
  238. struct nfs_client *clp = ptr;
  239. struct nfs_delegation *delegation;
  240. struct inode *inode;
  241. allow_signal(SIGKILL);
  242. restart:
  243. if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0)
  244. goto out;
  245. if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0)
  246. goto out;
  247. rcu_read_lock();
  248. list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
  249. inode = igrab(delegation->inode);
  250. if (inode == NULL)
  251. continue;
  252. spin_lock(&clp->cl_lock);
  253. delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
  254. spin_unlock(&clp->cl_lock);
  255. rcu_read_unlock();
  256. if (delegation)
  257. __nfs_inode_return_delegation(inode, delegation);
  258. iput(inode);
  259. goto restart;
  260. }
  261. rcu_read_unlock();
  262. out:
  263. nfs_put_client(clp);
  264. module_put_and_exit(0);
  265. }
  266. void nfs_expire_all_delegations(struct nfs_client *clp)
  267. {
  268. struct task_struct *task;
  269. __module_get(THIS_MODULE);
  270. atomic_inc(&clp->cl_count);
  271. task = kthread_run(nfs_do_expire_all_delegations, clp,
  272. "%u.%u.%u.%u-delegreturn",
  273. NIPQUAD(clp->cl_addr.sin_addr));
  274. if (!IS_ERR(task))
  275. return;
  276. nfs_put_client(clp);
  277. module_put(THIS_MODULE);
  278. }
  279. /*
  280. * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
  281. */
  282. void nfs_handle_cb_pathdown(struct nfs_client *clp)
  283. {
  284. struct nfs_delegation *delegation;
  285. struct inode *inode;
  286. if (clp == NULL)
  287. return;
  288. restart:
  289. rcu_read_lock();
  290. list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
  291. inode = igrab(delegation->inode);
  292. if (inode == NULL)
  293. continue;
  294. spin_lock(&clp->cl_lock);
  295. delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
  296. spin_unlock(&clp->cl_lock);
  297. rcu_read_unlock();
  298. if (delegation != NULL)
  299. __nfs_inode_return_delegation(inode, delegation);
  300. iput(inode);
  301. goto restart;
  302. }
  303. rcu_read_unlock();
  304. }
  305. struct recall_threadargs {
  306. struct inode *inode;
  307. struct nfs_client *clp;
  308. const nfs4_stateid *stateid;
  309. struct completion started;
  310. int result;
  311. };
  312. static int recall_thread(void *data)
  313. {
  314. struct recall_threadargs *args = (struct recall_threadargs *)data;
  315. struct inode *inode = igrab(args->inode);
  316. struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
  317. struct nfs_inode *nfsi = NFS_I(inode);
  318. struct nfs_delegation *delegation;
  319. daemonize("nfsv4-delegreturn");
  320. nfs_msync_inode(inode);
  321. down_read(&clp->cl_sem);
  322. down_write(&nfsi->rwsem);
  323. spin_lock(&clp->cl_lock);
  324. delegation = nfs_detach_delegation_locked(nfsi, args->stateid);
  325. if (delegation != NULL)
  326. args->result = 0;
  327. else
  328. args->result = -ENOENT;
  329. spin_unlock(&clp->cl_lock);
  330. complete(&args->started);
  331. nfs_delegation_claim_opens(inode, args->stateid);
  332. up_write(&nfsi->rwsem);
  333. up_read(&clp->cl_sem);
  334. nfs_msync_inode(inode);
  335. if (delegation != NULL)
  336. nfs_do_return_delegation(inode, delegation);
  337. iput(inode);
  338. module_put_and_exit(0);
  339. }
  340. /*
  341. * Asynchronous delegation recall!
  342. */
  343. int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
  344. {
  345. struct recall_threadargs data = {
  346. .inode = inode,
  347. .stateid = stateid,
  348. };
  349. int status;
  350. init_completion(&data.started);
  351. __module_get(THIS_MODULE);
  352. status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
  353. if (status < 0)
  354. goto out_module_put;
  355. wait_for_completion(&data.started);
  356. return data.result;
  357. out_module_put:
  358. module_put(THIS_MODULE);
  359. return status;
  360. }
  361. /*
  362. * Retrieve the inode associated with a delegation
  363. */
  364. struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle)
  365. {
  366. struct nfs_delegation *delegation;
  367. struct inode *res = NULL;
  368. rcu_read_lock();
  369. list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
  370. if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
  371. res = igrab(delegation->inode);
  372. break;
  373. }
  374. }
  375. rcu_read_unlock();
  376. return res;
  377. }
  378. /*
  379. * Mark all delegations as needing to be reclaimed
  380. */
  381. void nfs_delegation_mark_reclaim(struct nfs_client *clp)
  382. {
  383. struct nfs_delegation *delegation;
  384. rcu_read_lock();
  385. list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list)
  386. delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
  387. rcu_read_unlock();
  388. }
  389. /*
  390. * Reap all unclaimed delegations after reboot recovery is done
  391. */
  392. void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
  393. {
  394. struct nfs_delegation *delegation;
  395. restart:
  396. rcu_read_lock();
  397. list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
  398. if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
  399. continue;
  400. spin_lock(&clp->cl_lock);
  401. delegation = nfs_detach_delegation_locked(NFS_I(delegation->inode), NULL);
  402. spin_unlock(&clp->cl_lock);
  403. rcu_read_unlock();
  404. if (delegation != NULL)
  405. call_rcu(&delegation->rcu, nfs_free_delegation_callback);
  406. goto restart;
  407. }
  408. rcu_read_unlock();
  409. }
  410. int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
  411. {
  412. struct nfs_inode *nfsi = NFS_I(inode);
  413. struct nfs_delegation *delegation;
  414. int ret = 0;
  415. rcu_read_lock();
  416. delegation = rcu_dereference(nfsi->delegation);
  417. if (delegation != NULL) {
  418. memcpy(dst->data, delegation->stateid.data, sizeof(dst->data));
  419. ret = 1;
  420. }
  421. rcu_read_unlock();
  422. return ret;
  423. }