delegation.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483
  1. /*
  2. * linux/fs/nfs/delegation.c
  3. *
  4. * Copyright (C) 2004 Trond Myklebust
  5. *
  6. * NFS file delegation management
  7. *
  8. */
  9. #include <linux/completion.h>
  10. #include <linux/kthread.h>
  11. #include <linux/module.h>
  12. #include <linux/sched.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/nfs4.h>
  15. #include <linux/nfs_fs.h>
  16. #include <linux/nfs_xdr.h>
  17. #include "nfs4_fs.h"
  18. #include "delegation.h"
  19. #include "internal.h"
  20. static void nfs_do_free_delegation(struct nfs_delegation *delegation)
  21. {
  22. kfree(delegation);
  23. }
  24. static void nfs_free_delegation_callback(struct rcu_head *head)
  25. {
  26. struct nfs_delegation *delegation = container_of(head, struct nfs_delegation, rcu);
  27. nfs_do_free_delegation(delegation);
  28. }
  29. static void nfs_free_delegation(struct nfs_delegation *delegation)
  30. {
  31. struct rpc_cred *cred;
  32. cred = rcu_dereference(delegation->cred);
  33. rcu_assign_pointer(delegation->cred, NULL);
  34. call_rcu(&delegation->rcu, nfs_free_delegation_callback);
  35. if (cred)
  36. put_rpccred(cred);
  37. }
  38. static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
  39. {
  40. struct inode *inode = state->inode;
  41. struct file_lock *fl;
  42. int status;
  43. for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
  44. if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
  45. continue;
  46. if (nfs_file_open_context(fl->fl_file) != ctx)
  47. continue;
  48. status = nfs4_lock_delegation_recall(state, fl);
  49. if (status >= 0)
  50. continue;
  51. switch (status) {
  52. default:
  53. printk(KERN_ERR "%s: unhandled error %d.\n",
  54. __FUNCTION__, status);
  55. case -NFS4ERR_EXPIRED:
  56. /* kill_proc(fl->fl_pid, SIGLOST, 1); */
  57. case -NFS4ERR_STALE_CLIENTID:
  58. nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs_client);
  59. goto out_err;
  60. }
  61. }
  62. return 0;
  63. out_err:
  64. return status;
  65. }
  66. static void nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
  67. {
  68. struct nfs_inode *nfsi = NFS_I(inode);
  69. struct nfs_open_context *ctx;
  70. struct nfs4_state *state;
  71. int err;
  72. again:
  73. spin_lock(&inode->i_lock);
  74. list_for_each_entry(ctx, &nfsi->open_files, list) {
  75. state = ctx->state;
  76. if (state == NULL)
  77. continue;
  78. if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
  79. continue;
  80. if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0)
  81. continue;
  82. get_nfs_open_context(ctx);
  83. spin_unlock(&inode->i_lock);
  84. err = nfs4_open_delegation_recall(ctx, state, stateid);
  85. if (err >= 0)
  86. err = nfs_delegation_claim_locks(ctx, state);
  87. put_nfs_open_context(ctx);
  88. if (err != 0)
  89. return;
  90. goto again;
  91. }
  92. spin_unlock(&inode->i_lock);
  93. }
  94. /*
  95. * Set up a delegation on an inode
  96. */
  97. void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
  98. {
  99. struct nfs_delegation *delegation = NFS_I(inode)->delegation;
  100. struct rpc_cred *oldcred;
  101. if (delegation == NULL)
  102. return;
  103. memcpy(delegation->stateid.data, res->delegation.data,
  104. sizeof(delegation->stateid.data));
  105. delegation->type = res->delegation_type;
  106. delegation->maxsize = res->maxsize;
  107. oldcred = delegation->cred;
  108. delegation->cred = get_rpccred(cred);
  109. delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
  110. NFS_I(inode)->delegation_state = delegation->type;
  111. smp_wmb();
  112. put_rpccred(oldcred);
  113. }
  114. /*
  115. * Set up a delegation on an inode
  116. */
  117. int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
  118. {
  119. struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
  120. struct nfs_inode *nfsi = NFS_I(inode);
  121. struct nfs_delegation *delegation;
  122. int status = 0;
  123. delegation = kmalloc(sizeof(*delegation), GFP_KERNEL);
  124. if (delegation == NULL)
  125. return -ENOMEM;
  126. memcpy(delegation->stateid.data, res->delegation.data,
  127. sizeof(delegation->stateid.data));
  128. delegation->type = res->delegation_type;
  129. delegation->maxsize = res->maxsize;
  130. delegation->change_attr = nfsi->change_attr;
  131. delegation->cred = get_rpccred(cred);
  132. delegation->inode = inode;
  133. spin_lock(&clp->cl_lock);
  134. if (rcu_dereference(nfsi->delegation) == NULL) {
  135. list_add_rcu(&delegation->super_list, &clp->cl_delegations);
  136. nfsi->delegation_state = delegation->type;
  137. rcu_assign_pointer(nfsi->delegation, delegation);
  138. delegation = NULL;
  139. } else {
  140. if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
  141. sizeof(delegation->stateid)) != 0 ||
  142. delegation->type != nfsi->delegation->type) {
  143. printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
  144. __FUNCTION__, NIPQUAD(clp->cl_addr.sin_addr));
  145. status = -EIO;
  146. }
  147. }
  148. /* Ensure we revalidate the attributes and page cache! */
  149. spin_lock(&inode->i_lock);
  150. nfsi->cache_validity |= NFS_INO_REVAL_FORCED;
  151. spin_unlock(&inode->i_lock);
  152. spin_unlock(&clp->cl_lock);
  153. if (delegation != NULL)
  154. nfs_free_delegation(delegation);
  155. return status;
  156. }
  157. static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
  158. {
  159. int res = 0;
  160. res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
  161. nfs_free_delegation(delegation);
  162. return res;
  163. }
  164. /* Sync all data to disk upon delegation return */
  165. static void nfs_msync_inode(struct inode *inode)
  166. {
  167. filemap_fdatawrite(inode->i_mapping);
  168. nfs_wb_all(inode);
  169. filemap_fdatawait(inode->i_mapping);
  170. }
  171. /*
  172. * Basic procedure for returning a delegation to the server
  173. */
  174. static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
  175. {
  176. struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
  177. struct nfs_inode *nfsi = NFS_I(inode);
  178. nfs_msync_inode(inode);
  179. down_read(&clp->cl_sem);
  180. /* Guard against new delegated open calls */
  181. down_write(&nfsi->rwsem);
  182. nfs_delegation_claim_opens(inode, &delegation->stateid);
  183. up_write(&nfsi->rwsem);
  184. up_read(&clp->cl_sem);
  185. nfs_msync_inode(inode);
  186. return nfs_do_return_delegation(inode, delegation);
  187. }
  188. static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, const nfs4_stateid *stateid)
  189. {
  190. struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
  191. if (delegation == NULL)
  192. goto nomatch;
  193. if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
  194. sizeof(delegation->stateid.data)) != 0)
  195. goto nomatch;
  196. list_del_rcu(&delegation->super_list);
  197. nfsi->delegation_state = 0;
  198. rcu_assign_pointer(nfsi->delegation, NULL);
  199. return delegation;
  200. nomatch:
  201. return NULL;
  202. }
  203. int nfs_inode_return_delegation(struct inode *inode)
  204. {
  205. struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
  206. struct nfs_inode *nfsi = NFS_I(inode);
  207. struct nfs_delegation *delegation;
  208. int err = 0;
  209. if (rcu_dereference(nfsi->delegation) != NULL) {
  210. spin_lock(&clp->cl_lock);
  211. delegation = nfs_detach_delegation_locked(nfsi, NULL);
  212. spin_unlock(&clp->cl_lock);
  213. if (delegation != NULL)
  214. err = __nfs_inode_return_delegation(inode, delegation);
  215. }
  216. return err;
  217. }
  218. /*
  219. * Return all delegations associated to a super block
  220. */
  221. void nfs_return_all_delegations(struct super_block *sb)
  222. {
  223. struct nfs_client *clp = NFS_SB(sb)->nfs_client;
  224. struct nfs_delegation *delegation;
  225. struct inode *inode;
  226. if (clp == NULL)
  227. return;
  228. restart:
  229. rcu_read_lock();
  230. list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
  231. if (delegation->inode->i_sb != sb)
  232. continue;
  233. inode = igrab(delegation->inode);
  234. if (inode == NULL)
  235. continue;
  236. spin_lock(&clp->cl_lock);
  237. delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
  238. spin_unlock(&clp->cl_lock);
  239. rcu_read_unlock();
  240. if (delegation != NULL)
  241. __nfs_inode_return_delegation(inode, delegation);
  242. iput(inode);
  243. goto restart;
  244. }
  245. rcu_read_unlock();
  246. }
  247. static int nfs_do_expire_all_delegations(void *ptr)
  248. {
  249. struct nfs_client *clp = ptr;
  250. struct nfs_delegation *delegation;
  251. struct inode *inode;
  252. allow_signal(SIGKILL);
  253. restart:
  254. if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0)
  255. goto out;
  256. if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0)
  257. goto out;
  258. rcu_read_lock();
  259. list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
  260. inode = igrab(delegation->inode);
  261. if (inode == NULL)
  262. continue;
  263. spin_lock(&clp->cl_lock);
  264. delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
  265. spin_unlock(&clp->cl_lock);
  266. rcu_read_unlock();
  267. if (delegation)
  268. __nfs_inode_return_delegation(inode, delegation);
  269. iput(inode);
  270. goto restart;
  271. }
  272. rcu_read_unlock();
  273. out:
  274. nfs_put_client(clp);
  275. module_put_and_exit(0);
  276. }
  277. void nfs_expire_all_delegations(struct nfs_client *clp)
  278. {
  279. struct task_struct *task;
  280. __module_get(THIS_MODULE);
  281. atomic_inc(&clp->cl_count);
  282. task = kthread_run(nfs_do_expire_all_delegations, clp,
  283. "%u.%u.%u.%u-delegreturn",
  284. NIPQUAD(clp->cl_addr.sin_addr));
  285. if (!IS_ERR(task))
  286. return;
  287. nfs_put_client(clp);
  288. module_put(THIS_MODULE);
  289. }
  290. /*
  291. * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
  292. */
  293. void nfs_handle_cb_pathdown(struct nfs_client *clp)
  294. {
  295. struct nfs_delegation *delegation;
  296. struct inode *inode;
  297. if (clp == NULL)
  298. return;
  299. restart:
  300. rcu_read_lock();
  301. list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
  302. inode = igrab(delegation->inode);
  303. if (inode == NULL)
  304. continue;
  305. spin_lock(&clp->cl_lock);
  306. delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
  307. spin_unlock(&clp->cl_lock);
  308. rcu_read_unlock();
  309. if (delegation != NULL)
  310. __nfs_inode_return_delegation(inode, delegation);
  311. iput(inode);
  312. goto restart;
  313. }
  314. rcu_read_unlock();
  315. }
  316. struct recall_threadargs {
  317. struct inode *inode;
  318. struct nfs_client *clp;
  319. const nfs4_stateid *stateid;
  320. struct completion started;
  321. int result;
  322. };
  323. static int recall_thread(void *data)
  324. {
  325. struct recall_threadargs *args = (struct recall_threadargs *)data;
  326. struct inode *inode = igrab(args->inode);
  327. struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
  328. struct nfs_inode *nfsi = NFS_I(inode);
  329. struct nfs_delegation *delegation;
  330. daemonize("nfsv4-delegreturn");
  331. nfs_msync_inode(inode);
  332. down_read(&clp->cl_sem);
  333. down_write(&nfsi->rwsem);
  334. spin_lock(&clp->cl_lock);
  335. delegation = nfs_detach_delegation_locked(nfsi, args->stateid);
  336. if (delegation != NULL)
  337. args->result = 0;
  338. else
  339. args->result = -ENOENT;
  340. spin_unlock(&clp->cl_lock);
  341. complete(&args->started);
  342. nfs_delegation_claim_opens(inode, args->stateid);
  343. up_write(&nfsi->rwsem);
  344. up_read(&clp->cl_sem);
  345. nfs_msync_inode(inode);
  346. if (delegation != NULL)
  347. nfs_do_return_delegation(inode, delegation);
  348. iput(inode);
  349. module_put_and_exit(0);
  350. }
  351. /*
  352. * Asynchronous delegation recall!
  353. */
  354. int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
  355. {
  356. struct recall_threadargs data = {
  357. .inode = inode,
  358. .stateid = stateid,
  359. };
  360. int status;
  361. init_completion(&data.started);
  362. __module_get(THIS_MODULE);
  363. status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
  364. if (status < 0)
  365. goto out_module_put;
  366. wait_for_completion(&data.started);
  367. return data.result;
  368. out_module_put:
  369. module_put(THIS_MODULE);
  370. return status;
  371. }
  372. /*
  373. * Retrieve the inode associated with a delegation
  374. */
  375. struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle)
  376. {
  377. struct nfs_delegation *delegation;
  378. struct inode *res = NULL;
  379. rcu_read_lock();
  380. list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
  381. if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
  382. res = igrab(delegation->inode);
  383. break;
  384. }
  385. }
  386. rcu_read_unlock();
  387. return res;
  388. }
  389. /*
  390. * Mark all delegations as needing to be reclaimed
  391. */
  392. void nfs_delegation_mark_reclaim(struct nfs_client *clp)
  393. {
  394. struct nfs_delegation *delegation;
  395. rcu_read_lock();
  396. list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list)
  397. delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
  398. rcu_read_unlock();
  399. }
  400. /*
  401. * Reap all unclaimed delegations after reboot recovery is done
  402. */
  403. void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
  404. {
  405. struct nfs_delegation *delegation;
  406. restart:
  407. rcu_read_lock();
  408. list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
  409. if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
  410. continue;
  411. spin_lock(&clp->cl_lock);
  412. delegation = nfs_detach_delegation_locked(NFS_I(delegation->inode), NULL);
  413. spin_unlock(&clp->cl_lock);
  414. rcu_read_unlock();
  415. if (delegation != NULL)
  416. nfs_free_delegation(delegation);
  417. goto restart;
  418. }
  419. rcu_read_unlock();
  420. }
  421. int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
  422. {
  423. struct nfs_inode *nfsi = NFS_I(inode);
  424. struct nfs_delegation *delegation;
  425. int ret = 0;
  426. rcu_read_lock();
  427. delegation = rcu_dereference(nfsi->delegation);
  428. if (delegation != NULL) {
  429. memcpy(dst->data, delegation->stateid.data, sizeof(dst->data));
  430. ret = 1;
  431. }
  432. rcu_read_unlock();
  433. return ret;
  434. }