nfs4callback.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823
  1. /*
  2. * Copyright (c) 2001 The Regents of the University of Michigan.
  3. * All rights reserved.
  4. *
  5. * Kendrick Smith <kmsmith@umich.edu>
  6. * Andy Adamson <andros@umich.edu>
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions
  10. * are met:
  11. *
  12. * 1. Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions and the following disclaimer.
  14. * 2. Redistributions in binary form must reproduce the above copyright
  15. * notice, this list of conditions and the following disclaimer in the
  16. * documentation and/or other materials provided with the distribution.
  17. * 3. Neither the name of the University nor the names of its
  18. * contributors may be used to endorse or promote products derived
  19. * from this software without specific prior written permission.
  20. *
  21. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  22. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  23. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  24. * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  25. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  26. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  27. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  28. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  29. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  30. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  31. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  32. */
  33. #include <linux/sunrpc/clnt.h>
  34. #include <linux/sunrpc/svc_xprt.h>
  35. #include <linux/slab.h>
  36. #include "nfsd.h"
  37. #include "state.h"
  38. #define NFSDDBG_FACILITY NFSDDBG_PROC
  39. #define NFSPROC4_CB_NULL 0
  40. #define NFSPROC4_CB_COMPOUND 1
  41. /* Index of predefined Linux callback client operations */
  42. enum {
  43. NFSPROC4_CLNT_CB_NULL = 0,
  44. NFSPROC4_CLNT_CB_RECALL,
  45. NFSPROC4_CLNT_CB_SEQUENCE,
  46. };
  47. enum nfs_cb_opnum4 {
  48. OP_CB_RECALL = 4,
  49. OP_CB_SEQUENCE = 11,
  50. };
  51. #define NFS4_MAXTAGLEN 20
  52. #define NFS4_enc_cb_null_sz 0
  53. #define NFS4_dec_cb_null_sz 0
  54. #define cb_compound_enc_hdr_sz 4
  55. #define cb_compound_dec_hdr_sz (3 + (NFS4_MAXTAGLEN >> 2))
  56. #define sessionid_sz (NFS4_MAX_SESSIONID_LEN >> 2)
  57. #define cb_sequence_enc_sz (sessionid_sz + 4 + \
  58. 1 /* no referring calls list yet */)
  59. #define cb_sequence_dec_sz (op_dec_sz + sessionid_sz + 4)
  60. #define op_enc_sz 1
  61. #define op_dec_sz 2
  62. #define enc_nfs4_fh_sz (1 + (NFS4_FHSIZE >> 2))
  63. #define enc_stateid_sz (NFS4_STATEID_SIZE >> 2)
  64. #define NFS4_enc_cb_recall_sz (cb_compound_enc_hdr_sz + \
  65. cb_sequence_enc_sz + \
  66. 1 + enc_stateid_sz + \
  67. enc_nfs4_fh_sz)
  68. #define NFS4_dec_cb_recall_sz (cb_compound_dec_hdr_sz + \
  69. cb_sequence_dec_sz + \
  70. op_dec_sz)
  71. /*
  72. * Generic encode routines from fs/nfs/nfs4xdr.c
  73. */
  74. static inline __be32 *
  75. xdr_writemem(__be32 *p, const void *ptr, int nbytes)
  76. {
  77. int tmp = XDR_QUADLEN(nbytes);
  78. if (!tmp)
  79. return p;
  80. p[tmp-1] = 0;
  81. memcpy(p, ptr, nbytes);
  82. return p + tmp;
  83. }
  84. #define WRITE32(n) *p++ = htonl(n)
  85. #define WRITEMEM(ptr,nbytes) do { \
  86. p = xdr_writemem(p, ptr, nbytes); \
  87. } while (0)
  88. #define RESERVE_SPACE(nbytes) do { \
  89. p = xdr_reserve_space(xdr, nbytes); \
  90. if (!p) dprintk("NFSD: RESERVE_SPACE(%d) failed in function %s\n", (int) (nbytes), __func__); \
  91. BUG_ON(!p); \
  92. } while (0)
  93. /*
  94. * Generic decode routines from fs/nfs/nfs4xdr.c
  95. */
  96. #define DECODE_TAIL \
  97. status = 0; \
  98. out: \
  99. return status; \
  100. xdr_error: \
  101. dprintk("NFSD: xdr error! (%s:%d)\n", __FILE__, __LINE__); \
  102. status = -EIO; \
  103. goto out
  104. #define READ32(x) (x) = ntohl(*p++)
  105. #define READ64(x) do { \
  106. (x) = (u64)ntohl(*p++) << 32; \
  107. (x) |= ntohl(*p++); \
  108. } while (0)
  109. #define READTIME(x) do { \
  110. p++; \
  111. (x.tv_sec) = ntohl(*p++); \
  112. (x.tv_nsec) = ntohl(*p++); \
  113. } while (0)
  114. #define READ_BUF(nbytes) do { \
  115. p = xdr_inline_decode(xdr, nbytes); \
  116. if (!p) { \
  117. dprintk("NFSD: %s: reply buffer overflowed in line %d.\n", \
  118. __func__, __LINE__); \
  119. return -EIO; \
  120. } \
  121. } while (0)
  122. struct nfs4_cb_compound_hdr {
  123. /* args */
  124. u32 ident; /* minorversion 0 only */
  125. u32 nops;
  126. __be32 *nops_p;
  127. u32 minorversion;
  128. /* res */
  129. int status;
  130. };
  131. static struct {
  132. int stat;
  133. int errno;
  134. } nfs_cb_errtbl[] = {
  135. { NFS4_OK, 0 },
  136. { NFS4ERR_PERM, EPERM },
  137. { NFS4ERR_NOENT, ENOENT },
  138. { NFS4ERR_IO, EIO },
  139. { NFS4ERR_NXIO, ENXIO },
  140. { NFS4ERR_ACCESS, EACCES },
  141. { NFS4ERR_EXIST, EEXIST },
  142. { NFS4ERR_XDEV, EXDEV },
  143. { NFS4ERR_NOTDIR, ENOTDIR },
  144. { NFS4ERR_ISDIR, EISDIR },
  145. { NFS4ERR_INVAL, EINVAL },
  146. { NFS4ERR_FBIG, EFBIG },
  147. { NFS4ERR_NOSPC, ENOSPC },
  148. { NFS4ERR_ROFS, EROFS },
  149. { NFS4ERR_MLINK, EMLINK },
  150. { NFS4ERR_NAMETOOLONG, ENAMETOOLONG },
  151. { NFS4ERR_NOTEMPTY, ENOTEMPTY },
  152. { NFS4ERR_DQUOT, EDQUOT },
  153. { NFS4ERR_STALE, ESTALE },
  154. { NFS4ERR_BADHANDLE, EBADHANDLE },
  155. { NFS4ERR_BAD_COOKIE, EBADCOOKIE },
  156. { NFS4ERR_NOTSUPP, ENOTSUPP },
  157. { NFS4ERR_TOOSMALL, ETOOSMALL },
  158. { NFS4ERR_SERVERFAULT, ESERVERFAULT },
  159. { NFS4ERR_BADTYPE, EBADTYPE },
  160. { NFS4ERR_LOCKED, EAGAIN },
  161. { NFS4ERR_RESOURCE, EREMOTEIO },
  162. { NFS4ERR_SYMLINK, ELOOP },
  163. { NFS4ERR_OP_ILLEGAL, EOPNOTSUPP },
  164. { NFS4ERR_DEADLOCK, EDEADLK },
  165. { -1, EIO }
  166. };
  167. static int
  168. nfs_cb_stat_to_errno(int stat)
  169. {
  170. int i;
  171. for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
  172. if (nfs_cb_errtbl[i].stat == stat)
  173. return nfs_cb_errtbl[i].errno;
  174. }
  175. /* If we cannot translate the error, the recovery routines should
  176. * handle it.
  177. * Note: remaining NFSv4 error codes have values > 10000, so should
  178. * not conflict with native Linux error codes.
  179. */
  180. return stat;
  181. }
  182. /*
  183. * XDR encode
  184. */
  185. static void
  186. encode_stateid(struct xdr_stream *xdr, stateid_t *sid)
  187. {
  188. __be32 *p;
  189. RESERVE_SPACE(sizeof(stateid_t));
  190. WRITE32(sid->si_generation);
  191. WRITEMEM(&sid->si_opaque, sizeof(stateid_opaque_t));
  192. }
  193. static void
  194. encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr)
  195. {
  196. __be32 * p;
  197. RESERVE_SPACE(16);
  198. WRITE32(0); /* tag length is always 0 */
  199. WRITE32(hdr->minorversion);
  200. WRITE32(hdr->ident);
  201. hdr->nops_p = p;
  202. WRITE32(hdr->nops);
  203. }
  204. static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
  205. {
  206. *hdr->nops_p = htonl(hdr->nops);
  207. }
  208. static void
  209. encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp,
  210. struct nfs4_cb_compound_hdr *hdr)
  211. {
  212. __be32 *p;
  213. int len = dp->dl_fh.fh_size;
  214. RESERVE_SPACE(4);
  215. WRITE32(OP_CB_RECALL);
  216. encode_stateid(xdr, &dp->dl_stateid);
  217. RESERVE_SPACE(8 + (XDR_QUADLEN(len) << 2));
  218. WRITE32(0); /* truncate optimization not implemented */
  219. WRITE32(len);
  220. WRITEMEM(&dp->dl_fh.fh_base, len);
  221. hdr->nops++;
  222. }
  223. static void
  224. encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb,
  225. struct nfs4_cb_compound_hdr *hdr)
  226. {
  227. __be32 *p;
  228. struct nfsd4_session *ses = cb->cb_clp->cl_cb_session;
  229. if (hdr->minorversion == 0)
  230. return;
  231. RESERVE_SPACE(1 + NFS4_MAX_SESSIONID_LEN + 20);
  232. WRITE32(OP_CB_SEQUENCE);
  233. WRITEMEM(ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN);
  234. WRITE32(ses->se_cb_seq_nr);
  235. WRITE32(0); /* slotid, always 0 */
  236. WRITE32(0); /* highest slotid always 0 */
  237. WRITE32(0); /* cachethis always 0 */
  238. WRITE32(0); /* FIXME: support referring_call_lists */
  239. hdr->nops++;
  240. }
  241. static int
  242. nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p)
  243. {
  244. struct xdr_stream xdrs, *xdr = &xdrs;
  245. xdr_init_encode(&xdrs, &req->rq_snd_buf, p);
  246. RESERVE_SPACE(0);
  247. return 0;
  248. }
  249. static int
  250. nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p,
  251. struct nfsd4_callback *cb)
  252. {
  253. struct xdr_stream xdr;
  254. struct nfs4_delegation *args = cb->cb_op;
  255. struct nfs4_cb_compound_hdr hdr = {
  256. .ident = cb->cb_clp->cl_cb_ident,
  257. .minorversion = cb->cb_minorversion,
  258. };
  259. xdr_init_encode(&xdr, &req->rq_snd_buf, p);
  260. encode_cb_compound_hdr(&xdr, &hdr);
  261. encode_cb_sequence(&xdr, cb, &hdr);
  262. encode_cb_recall(&xdr, args, &hdr);
  263. encode_cb_nops(&hdr);
  264. return 0;
  265. }
  266. static int
  267. decode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr){
  268. __be32 *p;
  269. u32 taglen;
  270. READ_BUF(8);
  271. READ32(hdr->status);
  272. /* We've got no use for the tag; ignore it: */
  273. READ32(taglen);
  274. READ_BUF(taglen + 4);
  275. p += XDR_QUADLEN(taglen);
  276. READ32(hdr->nops);
  277. return 0;
  278. }
  279. static int
  280. decode_cb_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
  281. {
  282. __be32 *p;
  283. u32 op;
  284. int32_t nfserr;
  285. READ_BUF(8);
  286. READ32(op);
  287. if (op != expected) {
  288. dprintk("NFSD: decode_cb_op_hdr: Callback server returned "
  289. " operation %d but we issued a request for %d\n",
  290. op, expected);
  291. return -EIO;
  292. }
  293. READ32(nfserr);
  294. if (nfserr != NFS_OK)
  295. return -nfs_cb_stat_to_errno(nfserr);
  296. return 0;
  297. }
  298. /*
  299. * Our current back channel implmentation supports a single backchannel
  300. * with a single slot.
  301. */
  302. static int
  303. decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb,
  304. struct rpc_rqst *rqstp)
  305. {
  306. struct nfsd4_session *ses = cb->cb_clp->cl_cb_session;
  307. struct nfs4_sessionid id;
  308. int status;
  309. u32 dummy;
  310. __be32 *p;
  311. if (cb->cb_minorversion == 0)
  312. return 0;
  313. status = decode_cb_op_hdr(xdr, OP_CB_SEQUENCE);
  314. if (status)
  315. return status;
  316. /*
  317. * If the server returns different values for sessionID, slotID or
  318. * sequence number, the server is looney tunes.
  319. */
  320. status = -ESERVERFAULT;
  321. READ_BUF(NFS4_MAX_SESSIONID_LEN + 16);
  322. memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
  323. p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
  324. if (memcmp(id.data, ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) {
  325. dprintk("%s Invalid session id\n", __func__);
  326. goto out;
  327. }
  328. READ32(dummy);
  329. if (dummy != ses->se_cb_seq_nr) {
  330. dprintk("%s Invalid sequence number\n", __func__);
  331. goto out;
  332. }
  333. READ32(dummy); /* slotid must be 0 */
  334. if (dummy != 0) {
  335. dprintk("%s Invalid slotid\n", __func__);
  336. goto out;
  337. }
  338. /* FIXME: process highest slotid and target highest slotid */
  339. status = 0;
  340. out:
  341. return status;
  342. }
  343. static int
  344. nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p)
  345. {
  346. return 0;
  347. }
  348. static int
  349. nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p,
  350. struct nfsd4_callback *cb)
  351. {
  352. struct xdr_stream xdr;
  353. struct nfs4_cb_compound_hdr hdr;
  354. int status;
  355. xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
  356. status = decode_cb_compound_hdr(&xdr, &hdr);
  357. if (status)
  358. goto out;
  359. if (cb) {
  360. status = decode_cb_sequence(&xdr, cb, rqstp);
  361. if (status)
  362. goto out;
  363. }
  364. status = decode_cb_op_hdr(&xdr, OP_CB_RECALL);
  365. out:
  366. return status;
  367. }
  368. /*
  369. * RPC procedure tables
  370. */
  371. #define PROC(proc, call, argtype, restype) \
  372. [NFSPROC4_CLNT_##proc] = { \
  373. .p_proc = NFSPROC4_CB_##call, \
  374. .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \
  375. .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \
  376. .p_arglen = NFS4_##argtype##_sz, \
  377. .p_replen = NFS4_##restype##_sz, \
  378. .p_statidx = NFSPROC4_CB_##call, \
  379. .p_name = #proc, \
  380. }
  381. static struct rpc_procinfo nfs4_cb_procedures[] = {
  382. PROC(CB_NULL, NULL, enc_cb_null, dec_cb_null),
  383. PROC(CB_RECALL, COMPOUND, enc_cb_recall, dec_cb_recall),
  384. };
  385. static struct rpc_version nfs_cb_version4 = {
  386. /*
  387. * Note on the callback rpc program version number: despite language in rfc
  388. * 5661 section 18.36.3 requiring servers to use 4 in this field, the
  389. * official xdr descriptions for both 4.0 and 4.1 specify version 1, and
  390. * in practice that appears to be what implementations use. The section
  391. * 18.36.3 language is expected to be fixed in an erratum.
  392. */
  393. .number = 1,
  394. .nrprocs = ARRAY_SIZE(nfs4_cb_procedures),
  395. .procs = nfs4_cb_procedures
  396. };
  397. static struct rpc_version * nfs_cb_version[] = {
  398. &nfs_cb_version4,
  399. };
  400. static struct rpc_program cb_program;
  401. static struct rpc_stat cb_stats = {
  402. .program = &cb_program
  403. };
  404. #define NFS4_CALLBACK 0x40000000
  405. static struct rpc_program cb_program = {
  406. .name = "nfs4_cb",
  407. .number = NFS4_CALLBACK,
  408. .nrvers = ARRAY_SIZE(nfs_cb_version),
  409. .version = nfs_cb_version,
  410. .stats = &cb_stats,
  411. .pipe_dir_name = "/nfsd4_cb",
  412. };
  413. static int max_cb_time(void)
  414. {
  415. return max(nfsd4_lease/10, (time_t)1) * HZ;
  416. }
  417. /* Reference counting, callback cleanup, etc., all look racy as heck.
  418. * And why is cl_cb_set an atomic? */
  419. int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
  420. {
  421. struct rpc_timeout timeparms = {
  422. .to_initval = max_cb_time(),
  423. .to_retries = 0,
  424. };
  425. struct rpc_create_args args = {
  426. .net = &init_net,
  427. .address = (struct sockaddr *) &conn->cb_addr,
  428. .addrsize = conn->cb_addrlen,
  429. .timeout = &timeparms,
  430. .program = &cb_program,
  431. .version = 0,
  432. .authflavor = clp->cl_flavor,
  433. .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
  434. };
  435. struct rpc_clnt *client;
  436. if (clp->cl_minorversion == 0) {
  437. if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
  438. return -EINVAL;
  439. args.client_name = clp->cl_principal;
  440. args.prognumber = conn->cb_prog,
  441. args.protocol = XPRT_TRANSPORT_TCP;
  442. clp->cl_cb_ident = conn->cb_ident;
  443. } else {
  444. args.bc_xprt = conn->cb_xprt;
  445. args.prognumber = clp->cl_cb_session->se_cb_prog;
  446. args.protocol = XPRT_TRANSPORT_BC_TCP;
  447. }
  448. /* Create RPC client */
  449. client = rpc_create(&args);
  450. if (IS_ERR(client)) {
  451. dprintk("NFSD: couldn't create callback client: %ld\n",
  452. PTR_ERR(client));
  453. return PTR_ERR(client);
  454. }
  455. clp->cl_cb_client = client;
  456. return 0;
  457. }
  458. static void warn_no_callback_path(struct nfs4_client *clp, int reason)
  459. {
  460. dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
  461. (int)clp->cl_name.len, clp->cl_name.data, reason);
  462. }
  463. static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
  464. {
  465. struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
  466. if (task->tk_status)
  467. warn_no_callback_path(clp, task->tk_status);
  468. else
  469. atomic_set(&clp->cl_cb_set, 1);
  470. }
  471. static const struct rpc_call_ops nfsd4_cb_probe_ops = {
  472. /* XXX: release method to ensure we set the cb channel down if
  473. * necessary on early failure? */
  474. .rpc_call_done = nfsd4_cb_probe_done,
  475. };
  476. static struct rpc_cred *callback_cred;
  477. int set_callback_cred(void)
  478. {
  479. if (callback_cred)
  480. return 0;
  481. callback_cred = rpc_lookup_machine_cred();
  482. if (!callback_cred)
  483. return -ENOMEM;
  484. return 0;
  485. }
  486. static struct workqueue_struct *callback_wq;
  487. static void do_probe_callback(struct nfs4_client *clp)
  488. {
  489. struct nfsd4_callback *cb = &clp->cl_cb_null;
  490. cb->cb_op = NULL;
  491. cb->cb_clp = clp;
  492. cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL];
  493. cb->cb_msg.rpc_argp = NULL;
  494. cb->cb_msg.rpc_resp = NULL;
  495. cb->cb_msg.rpc_cred = callback_cred;
  496. cb->cb_ops = &nfsd4_cb_probe_ops;
  497. queue_work(callback_wq, &cb->cb_work);
  498. }
  499. /*
  500. * Poke the callback thread to process any updates to the callback
  501. * parameters, and send a null probe.
  502. */
  503. void nfsd4_probe_callback(struct nfs4_client *clp)
  504. {
  505. set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
  506. do_probe_callback(clp);
  507. }
  508. void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
  509. {
  510. BUG_ON(atomic_read(&clp->cl_cb_set));
  511. spin_lock(&clp->cl_lock);
  512. memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
  513. spin_unlock(&clp->cl_lock);
  514. }
  515. /*
  516. * There's currently a single callback channel slot.
  517. * If the slot is available, then mark it busy. Otherwise, set the
  518. * thread for sleeping on the callback RPC wait queue.
  519. */
  520. static int nfsd41_cb_setup_sequence(struct nfs4_client *clp,
  521. struct rpc_task *task)
  522. {
  523. u32 *ptr = (u32 *)clp->cl_cb_session->se_sessionid.data;
  524. int status = 0;
  525. dprintk("%s: %u:%u:%u:%u\n", __func__,
  526. ptr[0], ptr[1], ptr[2], ptr[3]);
  527. if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
  528. rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
  529. dprintk("%s slot is busy\n", __func__);
  530. status = -EAGAIN;
  531. goto out;
  532. }
  533. out:
  534. dprintk("%s status=%d\n", __func__, status);
  535. return status;
  536. }
  537. /*
  538. * TODO: cb_sequence should support referring call lists, cachethis, multiple
  539. * slots, and mark callback channel down on communication errors.
  540. */
  541. static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
  542. {
  543. struct nfsd4_callback *cb = calldata;
  544. struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
  545. struct nfs4_client *clp = dp->dl_client;
  546. u32 minorversion = clp->cl_minorversion;
  547. int status = 0;
  548. cb->cb_minorversion = minorversion;
  549. if (minorversion) {
  550. status = nfsd41_cb_setup_sequence(clp, task);
  551. if (status) {
  552. if (status != -EAGAIN) {
  553. /* terminate rpc task */
  554. task->tk_status = status;
  555. task->tk_action = NULL;
  556. }
  557. return;
  558. }
  559. }
  560. rpc_call_start(task);
  561. }
  562. static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
  563. {
  564. struct nfsd4_callback *cb = calldata;
  565. struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
  566. struct nfs4_client *clp = dp->dl_client;
  567. dprintk("%s: minorversion=%d\n", __func__,
  568. clp->cl_minorversion);
  569. if (clp->cl_minorversion) {
  570. /* No need for lock, access serialized in nfsd4_cb_prepare */
  571. ++clp->cl_cb_session->se_cb_seq_nr;
  572. clear_bit(0, &clp->cl_cb_slot_busy);
  573. rpc_wake_up_next(&clp->cl_cb_waitq);
  574. dprintk("%s: freed slot, new seqid=%d\n", __func__,
  575. clp->cl_cb_session->se_cb_seq_nr);
  576. /* We're done looking into the sequence information */
  577. task->tk_msg.rpc_resp = NULL;
  578. }
  579. }
  580. static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
  581. {
  582. struct nfsd4_callback *cb = calldata;
  583. struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
  584. struct nfs4_client *clp = dp->dl_client;
  585. struct rpc_clnt *current_rpc_client = clp->cl_cb_client;
  586. nfsd4_cb_done(task, calldata);
  587. if (current_rpc_client == NULL) {
  588. /* We're shutting down; give up. */
  589. /* XXX: err, or is it ok just to fall through
  590. * and rpc_restart_call? */
  591. return;
  592. }
  593. switch (task->tk_status) {
  594. case 0:
  595. return;
  596. case -EBADHANDLE:
  597. case -NFS4ERR_BAD_STATEID:
  598. /* Race: client probably got cb_recall
  599. * before open reply granting delegation */
  600. break;
  601. default:
  602. /* Network partition? */
  603. atomic_set(&clp->cl_cb_set, 0);
  604. warn_no_callback_path(clp, task->tk_status);
  605. if (current_rpc_client != task->tk_client) {
  606. /* queue a callback on the new connection: */
  607. atomic_inc(&dp->dl_count);
  608. nfsd4_cb_recall(dp);
  609. return;
  610. }
  611. }
  612. if (dp->dl_retries--) {
  613. rpc_delay(task, 2*HZ);
  614. task->tk_status = 0;
  615. rpc_restart_call_prepare(task);
  616. return;
  617. } else {
  618. atomic_set(&clp->cl_cb_set, 0);
  619. warn_no_callback_path(clp, task->tk_status);
  620. }
  621. }
  622. static void nfsd4_cb_recall_release(void *calldata)
  623. {
  624. struct nfsd4_callback *cb = calldata;
  625. struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
  626. nfs4_put_delegation(dp);
  627. }
  628. static const struct rpc_call_ops nfsd4_cb_recall_ops = {
  629. .rpc_call_prepare = nfsd4_cb_prepare,
  630. .rpc_call_done = nfsd4_cb_recall_done,
  631. .rpc_release = nfsd4_cb_recall_release,
  632. };
  633. int nfsd4_create_callback_queue(void)
  634. {
  635. callback_wq = create_singlethread_workqueue("nfsd4_callbacks");
  636. if (!callback_wq)
  637. return -ENOMEM;
  638. return 0;
  639. }
  640. void nfsd4_destroy_callback_queue(void)
  641. {
  642. destroy_workqueue(callback_wq);
  643. }
  644. /* must be called under the state lock */
  645. void nfsd4_shutdown_callback(struct nfs4_client *clp)
  646. {
  647. set_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags);
  648. /*
  649. * Note this won't actually result in a null callback;
  650. * instead, nfsd4_do_callback_rpc() will detect the killed
  651. * client, destroy the rpc client, and stop:
  652. */
  653. do_probe_callback(clp);
  654. flush_workqueue(callback_wq);
  655. }
  656. void nfsd4_release_cb(struct nfsd4_callback *cb)
  657. {
  658. if (cb->cb_ops->rpc_release)
  659. cb->cb_ops->rpc_release(cb);
  660. }
  661. void nfsd4_process_cb_update(struct nfsd4_callback *cb)
  662. {
  663. struct nfs4_cb_conn conn;
  664. struct nfs4_client *clp = cb->cb_clp;
  665. int err;
  666. /*
  667. * This is either an update, or the client dying; in either case,
  668. * kill the old client:
  669. */
  670. if (clp->cl_cb_client) {
  671. rpc_shutdown_client(clp->cl_cb_client);
  672. clp->cl_cb_client = NULL;
  673. }
  674. if (test_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags))
  675. return;
  676. spin_lock(&clp->cl_lock);
  677. /*
  678. * Only serialized callback code is allowed to clear these
  679. * flags; main nfsd code can only set them:
  680. */
  681. BUG_ON(!clp->cl_cb_flags);
  682. clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
  683. memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
  684. spin_unlock(&clp->cl_lock);
  685. err = setup_callback_client(clp, &conn);
  686. if (err)
  687. warn_no_callback_path(clp, err);
  688. }
  689. void nfsd4_do_callback_rpc(struct work_struct *w)
  690. {
  691. struct nfsd4_callback *cb = container_of(w, struct nfsd4_callback, cb_work);
  692. struct nfs4_client *clp = cb->cb_clp;
  693. struct rpc_clnt *clnt;
  694. if (clp->cl_cb_flags)
  695. nfsd4_process_cb_update(cb);
  696. clnt = clp->cl_cb_client;
  697. if (!clnt) {
  698. /* Callback channel broken, or client killed; give up: */
  699. nfsd4_release_cb(cb);
  700. return;
  701. }
  702. rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
  703. cb->cb_ops, cb);
  704. }
  705. void nfsd4_cb_recall(struct nfs4_delegation *dp)
  706. {
  707. struct nfsd4_callback *cb = &dp->dl_recall;
  708. dp->dl_retries = 1;
  709. cb->cb_op = dp;
  710. cb->cb_clp = dp->dl_client;
  711. cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL];
  712. cb->cb_msg.rpc_argp = cb;
  713. cb->cb_msg.rpc_resp = cb;
  714. cb->cb_msg.rpc_cred = callback_cred;
  715. cb->cb_ops = &nfsd4_cb_recall_ops;
  716. dp->dl_retries = 1;
  717. queue_work(callback_wq, &dp->dl_recall.cb_work);
  718. }