callback_proc.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596
  1. /*
  2. * linux/fs/nfs/callback_proc.c
  3. *
  4. * Copyright (C) 2004 Trond Myklebust
  5. *
  6. * NFSv4 callback procedures
  7. */
  8. #include <linux/nfs4.h>
  9. #include <linux/nfs_fs.h>
  10. #include <linux/slab.h>
  11. #include "nfs4_fs.h"
  12. #include "callback.h"
  13. #include "delegation.h"
  14. #include "internal.h"
  15. #include "pnfs.h"
  16. #ifdef NFS_DEBUG
  17. #define NFSDBG_FACILITY NFSDBG_CALLBACK
  18. #endif
  19. __be32 nfs4_callback_getattr(struct cb_getattrargs *args,
  20. struct cb_getattrres *res,
  21. struct cb_process_state *cps)
  22. {
  23. struct nfs_delegation *delegation;
  24. struct nfs_inode *nfsi;
  25. struct inode *inode;
  26. res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
  27. if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
  28. goto out;
  29. res->bitmap[0] = res->bitmap[1] = 0;
  30. res->status = htonl(NFS4ERR_BADHANDLE);
  31. dprintk("NFS: GETATTR callback request from %s\n",
  32. rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
  33. inode = nfs_delegation_find_inode(cps->clp, &args->fh);
  34. if (inode == NULL)
  35. goto out;
  36. nfsi = NFS_I(inode);
  37. rcu_read_lock();
  38. delegation = rcu_dereference(nfsi->delegation);
  39. if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
  40. goto out_iput;
  41. res->size = i_size_read(inode);
  42. res->change_attr = delegation->change_attr;
  43. if (nfsi->npages != 0)
  44. res->change_attr++;
  45. res->ctime = inode->i_ctime;
  46. res->mtime = inode->i_mtime;
  47. res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
  48. args->bitmap[0];
  49. res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
  50. args->bitmap[1];
  51. res->status = 0;
  52. out_iput:
  53. rcu_read_unlock();
  54. iput(inode);
  55. out:
  56. dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
  57. return res->status;
  58. }
  59. __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
  60. struct cb_process_state *cps)
  61. {
  62. struct inode *inode;
  63. __be32 res;
  64. res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
  65. if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
  66. goto out;
  67. dprintk("NFS: RECALL callback request from %s\n",
  68. rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
  69. res = htonl(NFS4ERR_BADHANDLE);
  70. inode = nfs_delegation_find_inode(cps->clp, &args->fh);
  71. if (inode == NULL)
  72. goto out;
  73. /* Set up a helper thread to actually return the delegation */
  74. switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
  75. case 0:
  76. res = 0;
  77. break;
  78. case -ENOENT:
  79. if (res != 0)
  80. res = htonl(NFS4ERR_BAD_STATEID);
  81. break;
  82. default:
  83. res = htonl(NFS4ERR_RESOURCE);
  84. }
  85. iput(inode);
  86. out:
  87. dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
  88. return res;
  89. }
  90. int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid)
  91. {
  92. if (delegation == NULL || memcmp(delegation->stateid.data, stateid->data,
  93. sizeof(delegation->stateid.data)) != 0)
  94. return 0;
  95. return 1;
  96. }
  97. #if defined(CONFIG_NFS_V4_1)
  98. /*
  99. * Lookup a layout by filehandle.
  100. *
  101. * Note: gets a refcount on the layout hdr and on its respective inode.
  102. * Caller must put the layout hdr and the inode.
  103. *
  104. * TODO: keep track of all layouts (and delegations) in a hash table
  105. * hashed by filehandle.
  106. */
  107. static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp, struct nfs_fh *fh)
  108. {
  109. struct nfs_server *server;
  110. struct inode *ino;
  111. struct pnfs_layout_hdr *lo;
  112. list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
  113. list_for_each_entry(lo, &server->layouts, plh_layouts) {
  114. if (nfs_compare_fh(fh, &NFS_I(lo->plh_inode)->fh))
  115. continue;
  116. ino = igrab(lo->plh_inode);
  117. if (!ino)
  118. continue;
  119. get_layout_hdr(lo);
  120. return lo;
  121. }
  122. }
  123. return NULL;
  124. }
  125. static struct pnfs_layout_hdr * get_layout_by_fh(struct nfs_client *clp, struct nfs_fh *fh)
  126. {
  127. struct pnfs_layout_hdr *lo;
  128. spin_lock(&clp->cl_lock);
  129. rcu_read_lock();
  130. lo = get_layout_by_fh_locked(clp, fh);
  131. rcu_read_unlock();
  132. spin_unlock(&clp->cl_lock);
  133. return lo;
  134. }
  135. static u32 initiate_file_draining(struct nfs_client *clp,
  136. struct cb_layoutrecallargs *args)
  137. {
  138. struct inode *ino;
  139. struct pnfs_layout_hdr *lo;
  140. u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
  141. LIST_HEAD(free_me_list);
  142. lo = get_layout_by_fh(clp, &args->cbl_fh);
  143. if (!lo)
  144. return NFS4ERR_NOMATCHING_LAYOUT;
  145. ino = lo->plh_inode;
  146. spin_lock(&ino->i_lock);
  147. if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
  148. mark_matching_lsegs_invalid(lo, &free_me_list,
  149. &args->cbl_range))
  150. rv = NFS4ERR_DELAY;
  151. else
  152. rv = NFS4ERR_NOMATCHING_LAYOUT;
  153. pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
  154. spin_unlock(&ino->i_lock);
  155. pnfs_free_lseg_list(&free_me_list);
  156. put_layout_hdr(lo);
  157. iput(ino);
  158. return rv;
  159. }
  160. static u32 initiate_bulk_draining(struct nfs_client *clp,
  161. struct cb_layoutrecallargs *args)
  162. {
  163. struct nfs_server *server;
  164. struct pnfs_layout_hdr *lo;
  165. struct inode *ino;
  166. u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
  167. struct pnfs_layout_hdr *tmp;
  168. LIST_HEAD(recall_list);
  169. LIST_HEAD(free_me_list);
  170. struct pnfs_layout_range range = {
  171. .iomode = IOMODE_ANY,
  172. .offset = 0,
  173. .length = NFS4_MAX_UINT64,
  174. };
  175. spin_lock(&clp->cl_lock);
  176. rcu_read_lock();
  177. list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
  178. if ((args->cbl_recall_type == RETURN_FSID) &&
  179. memcmp(&server->fsid, &args->cbl_fsid,
  180. sizeof(struct nfs_fsid)))
  181. continue;
  182. list_for_each_entry(lo, &server->layouts, plh_layouts) {
  183. if (!igrab(lo->plh_inode))
  184. continue;
  185. get_layout_hdr(lo);
  186. BUG_ON(!list_empty(&lo->plh_bulk_recall));
  187. list_add(&lo->plh_bulk_recall, &recall_list);
  188. }
  189. }
  190. rcu_read_unlock();
  191. spin_unlock(&clp->cl_lock);
  192. list_for_each_entry_safe(lo, tmp,
  193. &recall_list, plh_bulk_recall) {
  194. ino = lo->plh_inode;
  195. spin_lock(&ino->i_lock);
  196. set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
  197. if (mark_matching_lsegs_invalid(lo, &free_me_list, &range))
  198. rv = NFS4ERR_DELAY;
  199. list_del_init(&lo->plh_bulk_recall);
  200. spin_unlock(&ino->i_lock);
  201. pnfs_free_lseg_list(&free_me_list);
  202. put_layout_hdr(lo);
  203. iput(ino);
  204. }
  205. return rv;
  206. }
  207. static u32 do_callback_layoutrecall(struct nfs_client *clp,
  208. struct cb_layoutrecallargs *args)
  209. {
  210. u32 res = NFS4ERR_DELAY;
  211. dprintk("%s enter, type=%i\n", __func__, args->cbl_recall_type);
  212. if (test_and_set_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state))
  213. goto out;
  214. if (args->cbl_recall_type == RETURN_FILE)
  215. res = initiate_file_draining(clp, args);
  216. else
  217. res = initiate_bulk_draining(clp, args);
  218. clear_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state);
  219. out:
  220. dprintk("%s returning %i\n", __func__, res);
  221. return res;
  222. }
  223. __be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args,
  224. void *dummy, struct cb_process_state *cps)
  225. {
  226. u32 res;
  227. dprintk("%s: -->\n", __func__);
  228. if (cps->clp)
  229. res = do_callback_layoutrecall(cps->clp, args);
  230. else
  231. res = NFS4ERR_OP_NOT_IN_SESSION;
  232. dprintk("%s: exit with status = %d\n", __func__, res);
  233. return cpu_to_be32(res);
  234. }
  235. static void pnfs_recall_all_layouts(struct nfs_client *clp)
  236. {
  237. struct cb_layoutrecallargs args;
  238. /* Pretend we got a CB_LAYOUTRECALL(ALL) */
  239. memset(&args, 0, sizeof(args));
  240. args.cbl_recall_type = RETURN_ALL;
  241. /* FIXME we ignore errors, what should we do? */
  242. do_callback_layoutrecall(clp, &args);
  243. }
  244. __be32 nfs4_callback_devicenotify(struct cb_devicenotifyargs *args,
  245. void *dummy, struct cb_process_state *cps)
  246. {
  247. int i;
  248. __be32 res = 0;
  249. struct nfs_client *clp = cps->clp;
  250. struct nfs_server *server = NULL;
  251. dprintk("%s: -->\n", __func__);
  252. if (!clp) {
  253. res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
  254. goto out;
  255. }
  256. for (i = 0; i < args->ndevs; i++) {
  257. struct cb_devicenotifyitem *dev = &args->devs[i];
  258. if (!server ||
  259. server->pnfs_curr_ld->id != dev->cbd_layout_type) {
  260. rcu_read_lock();
  261. list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
  262. if (server->pnfs_curr_ld &&
  263. server->pnfs_curr_ld->id == dev->cbd_layout_type) {
  264. rcu_read_unlock();
  265. goto found;
  266. }
  267. rcu_read_unlock();
  268. dprintk("%s: layout type %u not found\n",
  269. __func__, dev->cbd_layout_type);
  270. continue;
  271. }
  272. found:
  273. if (dev->cbd_notify_type == NOTIFY_DEVICEID4_CHANGE)
  274. dprintk("%s: NOTIFY_DEVICEID4_CHANGE not supported, "
  275. "deleting instead\n", __func__);
  276. nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
  277. }
  278. out:
  279. kfree(args->devs);
  280. dprintk("%s: exit with status = %u\n",
  281. __func__, be32_to_cpu(res));
  282. return res;
  283. }
  284. int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid)
  285. {
  286. if (delegation == NULL)
  287. return 0;
  288. if (stateid->stateid.seqid != 0)
  289. return 0;
  290. if (memcmp(&delegation->stateid.stateid.other,
  291. &stateid->stateid.other,
  292. NFS4_STATEID_OTHER_SIZE))
  293. return 0;
  294. return 1;
  295. }
  296. /*
  297. * Validate the sequenceID sent by the server.
  298. * Return success if the sequenceID is one more than what we last saw on
  299. * this slot, accounting for wraparound. Increments the slot's sequence.
  300. *
  301. * We don't yet implement a duplicate request cache, instead we set the
  302. * back channel ca_maxresponsesize_cached to zero. This is OK for now
  303. * since we only currently implement idempotent callbacks anyway.
  304. *
  305. * We have a single slot backchannel at this time, so we don't bother
  306. * checking the used_slots bit array on the table. The lower layer guarantees
  307. * a single outstanding callback request at a time.
  308. */
  309. static __be32
  310. validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
  311. {
  312. struct nfs4_slot *slot;
  313. dprintk("%s enter. slotid %d seqid %d\n",
  314. __func__, args->csa_slotid, args->csa_sequenceid);
  315. if (args->csa_slotid >= NFS41_BC_MAX_CALLBACKS)
  316. return htonl(NFS4ERR_BADSLOT);
  317. slot = tbl->slots + args->csa_slotid;
  318. dprintk("%s slot table seqid: %d\n", __func__, slot->seq_nr);
  319. /* Normal */
  320. if (likely(args->csa_sequenceid == slot->seq_nr + 1)) {
  321. slot->seq_nr++;
  322. goto out_ok;
  323. }
  324. /* Replay */
  325. if (args->csa_sequenceid == slot->seq_nr) {
  326. dprintk("%s seqid %d is a replay\n",
  327. __func__, args->csa_sequenceid);
  328. /* Signal process_op to set this error on next op */
  329. if (args->csa_cachethis == 0)
  330. return htonl(NFS4ERR_RETRY_UNCACHED_REP);
  331. /* The ca_maxresponsesize_cached is 0 with no DRC */
  332. else if (args->csa_cachethis == 1)
  333. return htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
  334. }
  335. /* Wraparound */
  336. if (args->csa_sequenceid == 1 && (slot->seq_nr + 1) == 0) {
  337. slot->seq_nr = 1;
  338. goto out_ok;
  339. }
  340. /* Misordered request */
  341. return htonl(NFS4ERR_SEQ_MISORDERED);
  342. out_ok:
  343. tbl->highest_used_slotid = args->csa_slotid;
  344. return htonl(NFS4_OK);
  345. }
  346. /*
  347. * For each referring call triple, check the session's slot table for
  348. * a match. If the slot is in use and the sequence numbers match, the
  349. * client is still waiting for a response to the original request.
  350. */
  351. static bool referring_call_exists(struct nfs_client *clp,
  352. uint32_t nrclists,
  353. struct referring_call_list *rclists)
  354. {
  355. bool status = 0;
  356. int i, j;
  357. struct nfs4_session *session;
  358. struct nfs4_slot_table *tbl;
  359. struct referring_call_list *rclist;
  360. struct referring_call *ref;
  361. /*
  362. * XXX When client trunking is implemented, this becomes
  363. * a session lookup from within the loop
  364. */
  365. session = clp->cl_session;
  366. tbl = &session->fc_slot_table;
  367. for (i = 0; i < nrclists; i++) {
  368. rclist = &rclists[i];
  369. if (memcmp(session->sess_id.data,
  370. rclist->rcl_sessionid.data,
  371. NFS4_MAX_SESSIONID_LEN) != 0)
  372. continue;
  373. for (j = 0; j < rclist->rcl_nrefcalls; j++) {
  374. ref = &rclist->rcl_refcalls[j];
  375. dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u "
  376. "slotid %u\n", __func__,
  377. ((u32 *)&rclist->rcl_sessionid.data)[0],
  378. ((u32 *)&rclist->rcl_sessionid.data)[1],
  379. ((u32 *)&rclist->rcl_sessionid.data)[2],
  380. ((u32 *)&rclist->rcl_sessionid.data)[3],
  381. ref->rc_sequenceid, ref->rc_slotid);
  382. spin_lock(&tbl->slot_tbl_lock);
  383. status = (test_bit(ref->rc_slotid, tbl->used_slots) &&
  384. tbl->slots[ref->rc_slotid].seq_nr ==
  385. ref->rc_sequenceid);
  386. spin_unlock(&tbl->slot_tbl_lock);
  387. if (status)
  388. goto out;
  389. }
  390. }
  391. out:
  392. return status;
  393. }
  394. __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
  395. struct cb_sequenceres *res,
  396. struct cb_process_state *cps)
  397. {
  398. struct nfs4_slot_table *tbl;
  399. struct nfs_client *clp;
  400. int i;
  401. __be32 status = htonl(NFS4ERR_BADSESSION);
  402. clp = nfs4_find_client_sessionid(args->csa_addr, &args->csa_sessionid);
  403. if (clp == NULL)
  404. goto out;
  405. tbl = &clp->cl_session->bc_slot_table;
  406. spin_lock(&tbl->slot_tbl_lock);
  407. /* state manager is resetting the session */
  408. if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) {
  409. spin_unlock(&tbl->slot_tbl_lock);
  410. status = htonl(NFS4ERR_DELAY);
  411. /* Return NFS4ERR_BADSESSION if we're draining the session
  412. * in order to reset it.
  413. */
  414. if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
  415. status = htonl(NFS4ERR_BADSESSION);
  416. goto out;
  417. }
  418. status = validate_seqid(&clp->cl_session->bc_slot_table, args);
  419. spin_unlock(&tbl->slot_tbl_lock);
  420. if (status)
  421. goto out;
  422. cps->slotid = args->csa_slotid;
  423. /*
  424. * Check for pending referring calls. If a match is found, a
  425. * related callback was received before the response to the original
  426. * call.
  427. */
  428. if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
  429. status = htonl(NFS4ERR_DELAY);
  430. goto out;
  431. }
  432. memcpy(&res->csr_sessionid, &args->csa_sessionid,
  433. sizeof(res->csr_sessionid));
  434. res->csr_sequenceid = args->csa_sequenceid;
  435. res->csr_slotid = args->csa_slotid;
  436. res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
  437. res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
  438. out:
  439. cps->clp = clp; /* put in nfs4_callback_compound */
  440. for (i = 0; i < args->csa_nrclists; i++)
  441. kfree(args->csa_rclists[i].rcl_refcalls);
  442. kfree(args->csa_rclists);
  443. if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
  444. cps->drc_status = status;
  445. status = 0;
  446. } else
  447. res->csr_status = status;
  448. dprintk("%s: exit with status = %d res->csr_status %d\n", __func__,
  449. ntohl(status), ntohl(res->csr_status));
  450. return status;
  451. }
  452. static bool
  453. validate_bitmap_values(unsigned long mask)
  454. {
  455. return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
  456. }
  457. __be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy,
  458. struct cb_process_state *cps)
  459. {
  460. __be32 status;
  461. fmode_t flags = 0;
  462. status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
  463. if (!cps->clp) /* set in cb_sequence */
  464. goto out;
  465. dprintk("NFS: RECALL_ANY callback request from %s\n",
  466. rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
  467. status = cpu_to_be32(NFS4ERR_INVAL);
  468. if (!validate_bitmap_values(args->craa_type_mask))
  469. goto out;
  470. status = cpu_to_be32(NFS4_OK);
  471. if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
  472. &args->craa_type_mask))
  473. flags = FMODE_READ;
  474. if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
  475. &args->craa_type_mask))
  476. flags |= FMODE_WRITE;
  477. if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *)
  478. &args->craa_type_mask))
  479. pnfs_recall_all_layouts(cps->clp);
  480. if (flags)
  481. nfs_expire_all_delegation_types(cps->clp, flags);
  482. out:
  483. dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
  484. return status;
  485. }
  486. /* Reduce the fore channel's max_slots to the target value */
  487. __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy,
  488. struct cb_process_state *cps)
  489. {
  490. struct nfs4_slot_table *fc_tbl;
  491. __be32 status;
  492. status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
  493. if (!cps->clp) /* set in cb_sequence */
  494. goto out;
  495. dprintk("NFS: CB_RECALL_SLOT request from %s target max slots %d\n",
  496. rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
  497. args->crsa_target_max_slots);
  498. fc_tbl = &cps->clp->cl_session->fc_slot_table;
  499. status = htonl(NFS4ERR_BAD_HIGH_SLOT);
  500. if (args->crsa_target_max_slots > fc_tbl->max_slots ||
  501. args->crsa_target_max_slots < 1)
  502. goto out;
  503. status = htonl(NFS4_OK);
  504. if (args->crsa_target_max_slots == fc_tbl->max_slots)
  505. goto out;
  506. fc_tbl->target_max_slots = args->crsa_target_max_slots;
  507. nfs41_handle_recall_slot(cps->clp);
  508. out:
  509. dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
  510. return status;
  511. }
  512. #endif /* CONFIG_NFS_V4_1 */