clnt.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413
  1. /*
  2. * linux/net/sunrpc/clnt.c
  3. *
  4. * This file contains the high-level RPC interface.
  5. * It is modeled as a finite state machine to support both synchronous
  6. * and asynchronous requests.
  7. *
  8. * - RPC header generation and argument serialization.
  9. * - Credential refresh.
  10. * - TCP connect handling.
  11. * - Retry of operation when it is suspected the operation failed because
  12. * of uid squashing on the server, or when the credentials were stale
  13. * and need to be refreshed, or when a packet was damaged in transit.
  14. * This may be have to be moved to the VFS layer.
  15. *
  16. * NB: BSD uses a more intelligent approach to guessing when a request
  17. * or reply has been lost by keeping the RTO estimate for each procedure.
  18. * We currently make do with a constant timeout value.
  19. *
  20. * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
  21. * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
  22. */
  23. #include <asm/system.h>
  24. #include <linux/module.h>
  25. #include <linux/types.h>
  26. #include <linux/mm.h>
  27. #include <linux/slab.h>
  28. #include <linux/smp_lock.h>
  29. #include <linux/utsname.h>
  30. #include <linux/workqueue.h>
  31. #include <linux/sunrpc/clnt.h>
  32. #include <linux/sunrpc/rpc_pipe_fs.h>
  33. #include <linux/sunrpc/metrics.h>
  34. #define RPC_SLACK_SPACE (1024) /* total overkill */
  35. #ifdef RPC_DEBUG
  36. # define RPCDBG_FACILITY RPCDBG_CALL
  37. #endif
  38. #define dprint_status(t) \
  39. dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \
  40. __FUNCTION__, t->tk_status)
  41. static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
  42. static void call_start(struct rpc_task *task);
  43. static void call_reserve(struct rpc_task *task);
  44. static void call_reserveresult(struct rpc_task *task);
  45. static void call_allocate(struct rpc_task *task);
  46. static void call_encode(struct rpc_task *task);
  47. static void call_decode(struct rpc_task *task);
  48. static void call_bind(struct rpc_task *task);
  49. static void call_bind_status(struct rpc_task *task);
  50. static void call_transmit(struct rpc_task *task);
  51. static void call_status(struct rpc_task *task);
  52. static void call_transmit_status(struct rpc_task *task);
  53. static void call_refresh(struct rpc_task *task);
  54. static void call_refreshresult(struct rpc_task *task);
  55. static void call_timeout(struct rpc_task *task);
  56. static void call_connect(struct rpc_task *task);
  57. static void call_connect_status(struct rpc_task *task);
  58. static __be32 * call_header(struct rpc_task *task);
  59. static __be32 * call_verify(struct rpc_task *task);
  60. static int
  61. rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name)
  62. {
  63. static uint32_t clntid;
  64. int error;
  65. clnt->cl_vfsmnt = ERR_PTR(-ENOENT);
  66. clnt->cl_dentry = ERR_PTR(-ENOENT);
  67. if (dir_name == NULL)
  68. return 0;
  69. clnt->cl_vfsmnt = rpc_get_mount();
  70. if (IS_ERR(clnt->cl_vfsmnt))
  71. return PTR_ERR(clnt->cl_vfsmnt);
  72. for (;;) {
  73. snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname),
  74. "%s/clnt%x", dir_name,
  75. (unsigned int)clntid++);
  76. clnt->cl_pathname[sizeof(clnt->cl_pathname) - 1] = '\0';
  77. clnt->cl_dentry = rpc_mkdir(clnt->cl_pathname, clnt);
  78. if (!IS_ERR(clnt->cl_dentry))
  79. return 0;
  80. error = PTR_ERR(clnt->cl_dentry);
  81. if (error != -EEXIST) {
  82. printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n",
  83. clnt->cl_pathname, error);
  84. rpc_put_mount();
  85. return error;
  86. }
  87. }
  88. }
  89. static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, struct rpc_program *program, u32 vers, rpc_authflavor_t flavor)
  90. {
  91. struct rpc_version *version;
  92. struct rpc_clnt *clnt = NULL;
  93. struct rpc_auth *auth;
  94. int err;
  95. int len;
  96. dprintk("RPC: creating %s client for %s (xprt %p)\n",
  97. program->name, servname, xprt);
  98. err = -EINVAL;
  99. if (!xprt)
  100. goto out_no_xprt;
  101. if (vers >= program->nrvers || !(version = program->version[vers]))
  102. goto out_err;
  103. err = -ENOMEM;
  104. clnt = kzalloc(sizeof(*clnt), GFP_KERNEL);
  105. if (!clnt)
  106. goto out_err;
  107. atomic_set(&clnt->cl_users, 0);
  108. atomic_set(&clnt->cl_count, 1);
  109. clnt->cl_parent = clnt;
  110. clnt->cl_server = clnt->cl_inline_name;
  111. len = strlen(servname) + 1;
  112. if (len > sizeof(clnt->cl_inline_name)) {
  113. char *buf = kmalloc(len, GFP_KERNEL);
  114. if (buf != 0)
  115. clnt->cl_server = buf;
  116. else
  117. len = sizeof(clnt->cl_inline_name);
  118. }
  119. strlcpy(clnt->cl_server, servname, len);
  120. clnt->cl_xprt = xprt;
  121. clnt->cl_procinfo = version->procs;
  122. clnt->cl_maxproc = version->nrprocs;
  123. clnt->cl_protname = program->name;
  124. clnt->cl_prog = program->number;
  125. clnt->cl_vers = version->number;
  126. clnt->cl_stats = program->stats;
  127. clnt->cl_metrics = rpc_alloc_iostats(clnt);
  128. err = -ENOMEM;
  129. if (clnt->cl_metrics == NULL)
  130. goto out_no_stats;
  131. clnt->cl_program = program;
  132. if (!xprt_bound(clnt->cl_xprt))
  133. clnt->cl_autobind = 1;
  134. clnt->cl_rtt = &clnt->cl_rtt_default;
  135. rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval);
  136. err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
  137. if (err < 0)
  138. goto out_no_path;
  139. auth = rpcauth_create(flavor, clnt);
  140. if (IS_ERR(auth)) {
  141. printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n",
  142. flavor);
  143. err = PTR_ERR(auth);
  144. goto out_no_auth;
  145. }
  146. /* save the nodename */
  147. clnt->cl_nodelen = strlen(utsname()->nodename);
  148. if (clnt->cl_nodelen > UNX_MAXNODENAME)
  149. clnt->cl_nodelen = UNX_MAXNODENAME;
  150. memcpy(clnt->cl_nodename, utsname()->nodename, clnt->cl_nodelen);
  151. return clnt;
  152. out_no_auth:
  153. if (!IS_ERR(clnt->cl_dentry)) {
  154. rpc_rmdir(clnt->cl_dentry);
  155. rpc_put_mount();
  156. }
  157. out_no_path:
  158. rpc_free_iostats(clnt->cl_metrics);
  159. out_no_stats:
  160. if (clnt->cl_server != clnt->cl_inline_name)
  161. kfree(clnt->cl_server);
  162. kfree(clnt);
  163. out_err:
  164. xprt_put(xprt);
  165. out_no_xprt:
  166. return ERR_PTR(err);
  167. }
  168. /*
  169. * rpc_create - create an RPC client and transport with one call
  170. * @args: rpc_clnt create argument structure
  171. *
  172. * Creates and initializes an RPC transport and an RPC client.
  173. *
  174. * It can ping the server in order to determine if it is up, and to see if
  175. * it supports this program and version. RPC_CLNT_CREATE_NOPING disables
  176. * this behavior so asynchronous tasks can also use rpc_create.
  177. */
  178. struct rpc_clnt *rpc_create(struct rpc_create_args *args)
  179. {
  180. struct rpc_xprt *xprt;
  181. struct rpc_clnt *clnt;
  182. xprt = xprt_create_transport(args->protocol, args->address,
  183. args->addrsize, args->timeout);
  184. if (IS_ERR(xprt))
  185. return (struct rpc_clnt *)xprt;
  186. /*
  187. * By default, kernel RPC client connects from a reserved port.
  188. * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters,
  189. * but it is always enabled for rpciod, which handles the connect
  190. * operation.
  191. */
  192. xprt->resvport = 1;
  193. if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT)
  194. xprt->resvport = 0;
  195. dprintk("RPC: creating %s client for %s (xprt %p)\n",
  196. args->program->name, args->servername, xprt);
  197. clnt = rpc_new_client(xprt, args->servername, args->program,
  198. args->version, args->authflavor);
  199. if (IS_ERR(clnt))
  200. return clnt;
  201. if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
  202. int err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);
  203. if (err != 0) {
  204. rpc_shutdown_client(clnt);
  205. return ERR_PTR(err);
  206. }
  207. }
  208. clnt->cl_softrtry = 1;
  209. if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
  210. clnt->cl_softrtry = 0;
  211. if (args->flags & RPC_CLNT_CREATE_INTR)
  212. clnt->cl_intr = 1;
  213. if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
  214. clnt->cl_autobind = 1;
  215. if (args->flags & RPC_CLNT_CREATE_ONESHOT)
  216. clnt->cl_oneshot = 1;
  217. if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
  218. clnt->cl_discrtry = 1;
  219. return clnt;
  220. }
  221. EXPORT_SYMBOL_GPL(rpc_create);
  222. /*
  223. * This function clones the RPC client structure. It allows us to share the
  224. * same transport while varying parameters such as the authentication
  225. * flavour.
  226. */
  227. struct rpc_clnt *
  228. rpc_clone_client(struct rpc_clnt *clnt)
  229. {
  230. struct rpc_clnt *new;
  231. int err = -ENOMEM;
  232. new = kmemdup(clnt, sizeof(*new), GFP_KERNEL);
  233. if (!new)
  234. goto out_no_clnt;
  235. atomic_set(&new->cl_count, 1);
  236. atomic_set(&new->cl_users, 0);
  237. new->cl_metrics = rpc_alloc_iostats(clnt);
  238. if (new->cl_metrics == NULL)
  239. goto out_no_stats;
  240. err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
  241. if (err != 0)
  242. goto out_no_path;
  243. new->cl_parent = clnt;
  244. atomic_inc(&clnt->cl_count);
  245. new->cl_xprt = xprt_get(clnt->cl_xprt);
  246. /* Turn off autobind on clones */
  247. new->cl_autobind = 0;
  248. new->cl_oneshot = 0;
  249. new->cl_dead = 0;
  250. rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval);
  251. if (new->cl_auth)
  252. atomic_inc(&new->cl_auth->au_count);
  253. return new;
  254. out_no_path:
  255. rpc_free_iostats(new->cl_metrics);
  256. out_no_stats:
  257. kfree(new);
  258. out_no_clnt:
  259. dprintk("RPC: %s: returned error %d\n", __FUNCTION__, err);
  260. return ERR_PTR(err);
  261. }
  262. /*
  263. * Properly shut down an RPC client, terminating all outstanding
  264. * requests. Note that we must be certain that cl_oneshot and
  265. * cl_dead are cleared, or else the client would be destroyed
  266. * when the last task releases it.
  267. */
  268. int
  269. rpc_shutdown_client(struct rpc_clnt *clnt)
  270. {
  271. dprintk("RPC: shutting down %s client for %s, tasks=%d\n",
  272. clnt->cl_protname, clnt->cl_server,
  273. atomic_read(&clnt->cl_users));
  274. while (atomic_read(&clnt->cl_users) > 0) {
  275. /* Don't let rpc_release_client destroy us */
  276. clnt->cl_oneshot = 0;
  277. clnt->cl_dead = 0;
  278. rpc_killall_tasks(clnt);
  279. wait_event_timeout(destroy_wait,
  280. !atomic_read(&clnt->cl_users), 1*HZ);
  281. }
  282. if (atomic_read(&clnt->cl_users) < 0) {
  283. printk(KERN_ERR "RPC: rpc_shutdown_client clnt %p tasks=%d\n",
  284. clnt, atomic_read(&clnt->cl_users));
  285. #ifdef RPC_DEBUG
  286. rpc_show_tasks();
  287. #endif
  288. BUG();
  289. }
  290. return rpc_destroy_client(clnt);
  291. }
  292. /*
  293. * Delete an RPC client
  294. */
  295. int
  296. rpc_destroy_client(struct rpc_clnt *clnt)
  297. {
  298. if (!atomic_dec_and_test(&clnt->cl_count))
  299. return 1;
  300. BUG_ON(atomic_read(&clnt->cl_users) != 0);
  301. dprintk("RPC: destroying %s client for %s\n",
  302. clnt->cl_protname, clnt->cl_server);
  303. if (clnt->cl_auth) {
  304. rpcauth_destroy(clnt->cl_auth);
  305. clnt->cl_auth = NULL;
  306. }
  307. if (!IS_ERR(clnt->cl_dentry)) {
  308. rpc_rmdir(clnt->cl_dentry);
  309. rpc_put_mount();
  310. }
  311. if (clnt->cl_parent != clnt) {
  312. rpc_destroy_client(clnt->cl_parent);
  313. goto out_free;
  314. }
  315. if (clnt->cl_server != clnt->cl_inline_name)
  316. kfree(clnt->cl_server);
  317. out_free:
  318. rpc_free_iostats(clnt->cl_metrics);
  319. clnt->cl_metrics = NULL;
  320. xprt_put(clnt->cl_xprt);
  321. kfree(clnt);
  322. return 0;
  323. }
  324. /*
  325. * Release an RPC client
  326. */
  327. void
  328. rpc_release_client(struct rpc_clnt *clnt)
  329. {
  330. dprintk("RPC: rpc_release_client(%p, %d)\n",
  331. clnt, atomic_read(&clnt->cl_users));
  332. if (!atomic_dec_and_test(&clnt->cl_users))
  333. return;
  334. wake_up(&destroy_wait);
  335. if (clnt->cl_oneshot || clnt->cl_dead)
  336. rpc_destroy_client(clnt);
  337. }
  338. /**
  339. * rpc_bind_new_program - bind a new RPC program to an existing client
  340. * @old - old rpc_client
  341. * @program - rpc program to set
  342. * @vers - rpc program version
  343. *
  344. * Clones the rpc client and sets up a new RPC program. This is mainly
  345. * of use for enabling different RPC programs to share the same transport.
  346. * The Sun NFSv2/v3 ACL protocol can do this.
  347. */
  348. struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
  349. struct rpc_program *program,
  350. int vers)
  351. {
  352. struct rpc_clnt *clnt;
  353. struct rpc_version *version;
  354. int err;
  355. BUG_ON(vers >= program->nrvers || !program->version[vers]);
  356. version = program->version[vers];
  357. clnt = rpc_clone_client(old);
  358. if (IS_ERR(clnt))
  359. goto out;
  360. clnt->cl_procinfo = version->procs;
  361. clnt->cl_maxproc = version->nrprocs;
  362. clnt->cl_protname = program->name;
  363. clnt->cl_prog = program->number;
  364. clnt->cl_vers = version->number;
  365. clnt->cl_stats = program->stats;
  366. err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);
  367. if (err != 0) {
  368. rpc_shutdown_client(clnt);
  369. clnt = ERR_PTR(err);
  370. }
  371. out:
  372. return clnt;
  373. }
  374. /*
  375. * Default callback for async RPC calls
  376. */
  377. static void
  378. rpc_default_callback(struct rpc_task *task, void *data)
  379. {
  380. }
  381. static const struct rpc_call_ops rpc_default_ops = {
  382. .rpc_call_done = rpc_default_callback,
  383. };
  384. /*
  385. * Export the signal mask handling for synchronous code that
  386. * sleeps on RPC calls
  387. */
  388. #define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM))
  389. static void rpc_save_sigmask(sigset_t *oldset, int intr)
  390. {
  391. unsigned long sigallow = sigmask(SIGKILL);
  392. sigset_t sigmask;
  393. /* Block all signals except those listed in sigallow */
  394. if (intr)
  395. sigallow |= RPC_INTR_SIGNALS;
  396. siginitsetinv(&sigmask, sigallow);
  397. sigprocmask(SIG_BLOCK, &sigmask, oldset);
  398. }
  399. static inline void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset)
  400. {
  401. rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task));
  402. }
  403. static inline void rpc_restore_sigmask(sigset_t *oldset)
  404. {
  405. sigprocmask(SIG_SETMASK, oldset, NULL);
  406. }
  407. void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
  408. {
  409. rpc_save_sigmask(oldset, clnt->cl_intr);
  410. }
  411. void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
  412. {
  413. rpc_restore_sigmask(oldset);
  414. }
  415. /*
  416. * New rpc_call implementation
  417. */
  418. int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
  419. {
  420. struct rpc_task *task;
  421. sigset_t oldset;
  422. int status;
  423. /* If this client is slain all further I/O fails */
  424. if (clnt->cl_dead)
  425. return -EIO;
  426. BUG_ON(flags & RPC_TASK_ASYNC);
  427. task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL);
  428. if (task == NULL)
  429. return -ENOMEM;
  430. /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */
  431. rpc_task_sigmask(task, &oldset);
  432. /* Set up the call info struct and execute the task */
  433. rpc_call_setup(task, msg, 0);
  434. if (task->tk_status == 0) {
  435. atomic_inc(&task->tk_count);
  436. rpc_execute(task);
  437. }
  438. status = task->tk_status;
  439. rpc_put_task(task);
  440. rpc_restore_sigmask(&oldset);
  441. return status;
  442. }
  443. /*
  444. * New rpc_call implementation
  445. */
  446. int
  447. rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
  448. const struct rpc_call_ops *tk_ops, void *data)
  449. {
  450. struct rpc_task *task;
  451. sigset_t oldset;
  452. int status;
  453. /* If this client is slain all further I/O fails */
  454. status = -EIO;
  455. if (clnt->cl_dead)
  456. goto out_release;
  457. flags |= RPC_TASK_ASYNC;
  458. /* Create/initialize a new RPC task */
  459. status = -ENOMEM;
  460. if (!(task = rpc_new_task(clnt, flags, tk_ops, data)))
  461. goto out_release;
  462. /* Mask signals on GSS_AUTH upcalls */
  463. rpc_task_sigmask(task, &oldset);
  464. rpc_call_setup(task, msg, 0);
  465. /* Set up the call info struct and execute the task */
  466. status = task->tk_status;
  467. if (status == 0)
  468. rpc_execute(task);
  469. else
  470. rpc_put_task(task);
  471. rpc_restore_sigmask(&oldset);
  472. return status;
  473. out_release:
  474. rpc_release_calldata(tk_ops, data);
  475. return status;
  476. }
  477. void
  478. rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags)
  479. {
  480. task->tk_msg = *msg;
  481. task->tk_flags |= flags;
  482. /* Bind the user cred */
  483. if (task->tk_msg.rpc_cred != NULL)
  484. rpcauth_holdcred(task);
  485. else
  486. rpcauth_bindcred(task);
  487. if (task->tk_status == 0)
  488. task->tk_action = call_start;
  489. else
  490. task->tk_action = rpc_exit_task;
  491. }
  492. /**
  493. * rpc_peeraddr - extract remote peer address from clnt's xprt
  494. * @clnt: RPC client structure
  495. * @buf: target buffer
  496. * @size: length of target buffer
  497. *
  498. * Returns the number of bytes that are actually in the stored address.
  499. */
  500. size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize)
  501. {
  502. size_t bytes;
  503. struct rpc_xprt *xprt = clnt->cl_xprt;
  504. bytes = sizeof(xprt->addr);
  505. if (bytes > bufsize)
  506. bytes = bufsize;
  507. memcpy(buf, &clnt->cl_xprt->addr, bytes);
  508. return xprt->addrlen;
  509. }
  510. EXPORT_SYMBOL_GPL(rpc_peeraddr);
  511. /**
  512. * rpc_peeraddr2str - return remote peer address in printable format
  513. * @clnt: RPC client structure
  514. * @format: address format
  515. *
  516. */
  517. char *rpc_peeraddr2str(struct rpc_clnt *clnt, enum rpc_display_format_t format)
  518. {
  519. struct rpc_xprt *xprt = clnt->cl_xprt;
  520. if (xprt->address_strings[format] != NULL)
  521. return xprt->address_strings[format];
  522. else
  523. return "unprintable";
  524. }
  525. EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
  526. void
  527. rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
  528. {
  529. struct rpc_xprt *xprt = clnt->cl_xprt;
  530. if (xprt->ops->set_buffer_size)
  531. xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
  532. }
  533. /*
  534. * Return size of largest payload RPC client can support, in bytes
  535. *
  536. * For stream transports, this is one RPC record fragment (see RFC
  537. * 1831), as we don't support multi-record requests yet. For datagram
  538. * transports, this is the size of an IP packet minus the IP, UDP, and
  539. * RPC header sizes.
  540. */
  541. size_t rpc_max_payload(struct rpc_clnt *clnt)
  542. {
  543. return clnt->cl_xprt->max_payload;
  544. }
  545. EXPORT_SYMBOL_GPL(rpc_max_payload);
  546. /**
  547. * rpc_force_rebind - force transport to check that remote port is unchanged
  548. * @clnt: client to rebind
  549. *
  550. */
  551. void rpc_force_rebind(struct rpc_clnt *clnt)
  552. {
  553. if (clnt->cl_autobind)
  554. xprt_clear_bound(clnt->cl_xprt);
  555. }
  556. EXPORT_SYMBOL_GPL(rpc_force_rebind);
  557. /*
  558. * Restart an (async) RPC call. Usually called from within the
  559. * exit handler.
  560. */
  561. void
  562. rpc_restart_call(struct rpc_task *task)
  563. {
  564. if (RPC_ASSASSINATED(task))
  565. return;
  566. task->tk_action = call_start;
  567. }
  568. /*
  569. * 0. Initial state
  570. *
  571. * Other FSM states can be visited zero or more times, but
  572. * this state is visited exactly once for each RPC.
  573. */
  574. static void
  575. call_start(struct rpc_task *task)
  576. {
  577. struct rpc_clnt *clnt = task->tk_client;
  578. dprintk("RPC: %5u call_start %s%d proc %d (%s)\n", task->tk_pid,
  579. clnt->cl_protname, clnt->cl_vers,
  580. task->tk_msg.rpc_proc->p_proc,
  581. (RPC_IS_ASYNC(task) ? "async" : "sync"));
  582. /* Increment call count */
  583. task->tk_msg.rpc_proc->p_count++;
  584. clnt->cl_stats->rpccnt++;
  585. task->tk_action = call_reserve;
  586. }
  587. /*
  588. * 1. Reserve an RPC call slot
  589. */
  590. static void
  591. call_reserve(struct rpc_task *task)
  592. {
  593. dprint_status(task);
  594. if (!rpcauth_uptodatecred(task)) {
  595. task->tk_action = call_refresh;
  596. return;
  597. }
  598. task->tk_status = 0;
  599. task->tk_action = call_reserveresult;
  600. xprt_reserve(task);
  601. }
  602. /*
  603. * 1b. Grok the result of xprt_reserve()
  604. */
  605. static void
  606. call_reserveresult(struct rpc_task *task)
  607. {
  608. int status = task->tk_status;
  609. dprint_status(task);
  610. /*
  611. * After a call to xprt_reserve(), we must have either
  612. * a request slot or else an error status.
  613. */
  614. task->tk_status = 0;
  615. if (status >= 0) {
  616. if (task->tk_rqstp) {
  617. task->tk_action = call_allocate;
  618. return;
  619. }
  620. printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
  621. __FUNCTION__, status);
  622. rpc_exit(task, -EIO);
  623. return;
  624. }
  625. /*
  626. * Even though there was an error, we may have acquired
  627. * a request slot somehow. Make sure not to leak it.
  628. */
  629. if (task->tk_rqstp) {
  630. printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
  631. __FUNCTION__, status);
  632. xprt_release(task);
  633. }
  634. switch (status) {
  635. case -EAGAIN: /* woken up; retry */
  636. task->tk_action = call_reserve;
  637. return;
  638. case -EIO: /* probably a shutdown */
  639. break;
  640. default:
  641. printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
  642. __FUNCTION__, status);
  643. break;
  644. }
  645. rpc_exit(task, status);
  646. }
  647. /*
  648. * 2. Allocate the buffer. For details, see sched.c:rpc_malloc.
  649. * (Note: buffer memory is freed in xprt_release).
  650. */
  651. static void
  652. call_allocate(struct rpc_task *task)
  653. {
  654. struct rpc_rqst *req = task->tk_rqstp;
  655. struct rpc_xprt *xprt = task->tk_xprt;
  656. unsigned int bufsiz;
  657. dprint_status(task);
  658. task->tk_action = call_bind;
  659. if (req->rq_buffer)
  660. return;
  661. /* FIXME: compute buffer requirements more exactly using
  662. * auth->au_wslack */
  663. bufsiz = task->tk_msg.rpc_proc->p_bufsiz + RPC_SLACK_SPACE;
  664. if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL)
  665. return;
  666. dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
  667. if (RPC_IS_ASYNC(task) || !signalled()) {
  668. xprt_release(task);
  669. task->tk_action = call_reserve;
  670. rpc_delay(task, HZ>>4);
  671. return;
  672. }
  673. rpc_exit(task, -ERESTARTSYS);
  674. }
  675. static inline int
  676. rpc_task_need_encode(struct rpc_task *task)
  677. {
  678. return task->tk_rqstp->rq_snd_buf.len == 0;
  679. }
  680. static inline void
  681. rpc_task_force_reencode(struct rpc_task *task)
  682. {
  683. task->tk_rqstp->rq_snd_buf.len = 0;
  684. }
  685. /*
  686. * 3. Encode arguments of an RPC call
  687. */
  688. static void
  689. call_encode(struct rpc_task *task)
  690. {
  691. struct rpc_rqst *req = task->tk_rqstp;
  692. struct xdr_buf *sndbuf = &req->rq_snd_buf;
  693. struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
  694. unsigned int bufsiz;
  695. kxdrproc_t encode;
  696. __be32 *p;
  697. dprint_status(task);
  698. /* Default buffer setup */
  699. bufsiz = req->rq_bufsize >> 1;
  700. sndbuf->head[0].iov_base = (void *)req->rq_buffer;
  701. sndbuf->head[0].iov_len = bufsiz;
  702. sndbuf->tail[0].iov_len = 0;
  703. sndbuf->page_len = 0;
  704. sndbuf->len = 0;
  705. sndbuf->buflen = bufsiz;
  706. rcvbuf->head[0].iov_base = (void *)((char *)req->rq_buffer + bufsiz);
  707. rcvbuf->head[0].iov_len = bufsiz;
  708. rcvbuf->tail[0].iov_len = 0;
  709. rcvbuf->page_len = 0;
  710. rcvbuf->len = 0;
  711. rcvbuf->buflen = bufsiz;
  712. /* Encode header and provided arguments */
  713. encode = task->tk_msg.rpc_proc->p_encode;
  714. if (!(p = call_header(task))) {
  715. printk(KERN_INFO "RPC: call_header failed, exit EIO\n");
  716. rpc_exit(task, -EIO);
  717. return;
  718. }
  719. if (encode == NULL)
  720. return;
  721. lock_kernel();
  722. task->tk_status = rpcauth_wrap_req(task, encode, req, p,
  723. task->tk_msg.rpc_argp);
  724. unlock_kernel();
  725. if (task->tk_status == -ENOMEM) {
  726. /* XXX: Is this sane? */
  727. rpc_delay(task, 3*HZ);
  728. task->tk_status = -EAGAIN;
  729. }
  730. }
  731. /*
  732. * 4. Get the server port number if not yet set
  733. */
  734. static void
  735. call_bind(struct rpc_task *task)
  736. {
  737. struct rpc_xprt *xprt = task->tk_xprt;
  738. dprint_status(task);
  739. task->tk_action = call_connect;
  740. if (!xprt_bound(xprt)) {
  741. task->tk_action = call_bind_status;
  742. task->tk_timeout = xprt->bind_timeout;
  743. xprt->ops->rpcbind(task);
  744. }
  745. }
  746. /*
  747. * 4a. Sort out bind result
  748. */
  749. static void
  750. call_bind_status(struct rpc_task *task)
  751. {
  752. int status = -EACCES;
  753. if (task->tk_status >= 0) {
  754. dprint_status(task);
  755. task->tk_status = 0;
  756. task->tk_action = call_connect;
  757. return;
  758. }
  759. switch (task->tk_status) {
  760. case -EACCES:
  761. dprintk("RPC: %5u remote rpcbind: RPC program/version "
  762. "unavailable\n", task->tk_pid);
  763. rpc_delay(task, 3*HZ);
  764. goto retry_timeout;
  765. case -ETIMEDOUT:
  766. dprintk("RPC: %5u rpcbind request timed out\n",
  767. task->tk_pid);
  768. goto retry_timeout;
  769. case -EPFNOSUPPORT:
  770. dprintk("RPC: %5u remote rpcbind service unavailable\n",
  771. task->tk_pid);
  772. break;
  773. case -EPROTONOSUPPORT:
  774. dprintk("RPC: %5u remote rpcbind version 2 unavailable\n",
  775. task->tk_pid);
  776. break;
  777. default:
  778. dprintk("RPC: %5u unrecognized rpcbind error (%d)\n",
  779. task->tk_pid, -task->tk_status);
  780. status = -EIO;
  781. }
  782. rpc_exit(task, status);
  783. return;
  784. retry_timeout:
  785. task->tk_action = call_timeout;
  786. }
  787. /*
  788. * 4b. Connect to the RPC server
  789. */
  790. static void
  791. call_connect(struct rpc_task *task)
  792. {
  793. struct rpc_xprt *xprt = task->tk_xprt;
  794. dprintk("RPC: %5u call_connect xprt %p %s connected\n",
  795. task->tk_pid, xprt,
  796. (xprt_connected(xprt) ? "is" : "is not"));
  797. task->tk_action = call_transmit;
  798. if (!xprt_connected(xprt)) {
  799. task->tk_action = call_connect_status;
  800. if (task->tk_status < 0)
  801. return;
  802. xprt_connect(task);
  803. }
  804. }
  805. /*
  806. * 4c. Sort out connect result
  807. */
  808. static void
  809. call_connect_status(struct rpc_task *task)
  810. {
  811. struct rpc_clnt *clnt = task->tk_client;
  812. int status = task->tk_status;
  813. dprint_status(task);
  814. task->tk_status = 0;
  815. if (status >= 0) {
  816. clnt->cl_stats->netreconn++;
  817. task->tk_action = call_transmit;
  818. return;
  819. }
  820. /* Something failed: remote service port may have changed */
  821. rpc_force_rebind(clnt);
  822. switch (status) {
  823. case -ENOTCONN:
  824. case -EAGAIN:
  825. task->tk_action = call_bind;
  826. if (!RPC_IS_SOFT(task))
  827. return;
  828. /* if soft mounted, test if we've timed out */
  829. case -ETIMEDOUT:
  830. task->tk_action = call_timeout;
  831. return;
  832. }
  833. rpc_exit(task, -EIO);
  834. }
  835. /*
  836. * 5. Transmit the RPC request, and wait for reply
  837. */
  838. static void
  839. call_transmit(struct rpc_task *task)
  840. {
  841. dprint_status(task);
  842. task->tk_action = call_status;
  843. if (task->tk_status < 0)
  844. return;
  845. task->tk_status = xprt_prepare_transmit(task);
  846. if (task->tk_status != 0)
  847. return;
  848. task->tk_action = call_transmit_status;
  849. /* Encode here so that rpcsec_gss can use correct sequence number. */
  850. if (rpc_task_need_encode(task)) {
  851. BUG_ON(task->tk_rqstp->rq_bytes_sent != 0);
  852. call_encode(task);
  853. /* Did the encode result in an error condition? */
  854. if (task->tk_status != 0)
  855. return;
  856. }
  857. xprt_transmit(task);
  858. if (task->tk_status < 0)
  859. return;
  860. /*
  861. * On success, ensure that we call xprt_end_transmit() before sleeping
  862. * in order to allow access to the socket to other RPC requests.
  863. */
  864. call_transmit_status(task);
  865. if (task->tk_msg.rpc_proc->p_decode != NULL)
  866. return;
  867. task->tk_action = rpc_exit_task;
  868. rpc_wake_up_task(task);
  869. }
  870. /*
  871. * 5a. Handle cleanup after a transmission
  872. */
  873. static void
  874. call_transmit_status(struct rpc_task *task)
  875. {
  876. task->tk_action = call_status;
  877. /*
  878. * Special case: if we've been waiting on the socket's write_space()
  879. * callback, then don't call xprt_end_transmit().
  880. */
  881. if (task->tk_status == -EAGAIN)
  882. return;
  883. xprt_end_transmit(task);
  884. rpc_task_force_reencode(task);
  885. }
  886. /*
  887. * 6. Sort out the RPC call status
  888. */
  889. static void
  890. call_status(struct rpc_task *task)
  891. {
  892. struct rpc_clnt *clnt = task->tk_client;
  893. struct rpc_rqst *req = task->tk_rqstp;
  894. int status;
  895. if (req->rq_received > 0 && !req->rq_bytes_sent)
  896. task->tk_status = req->rq_received;
  897. dprint_status(task);
  898. status = task->tk_status;
  899. if (status >= 0) {
  900. task->tk_action = call_decode;
  901. return;
  902. }
  903. task->tk_status = 0;
  904. switch(status) {
  905. case -EHOSTDOWN:
  906. case -EHOSTUNREACH:
  907. case -ENETUNREACH:
  908. /*
  909. * Delay any retries for 3 seconds, then handle as if it
  910. * were a timeout.
  911. */
  912. rpc_delay(task, 3*HZ);
  913. case -ETIMEDOUT:
  914. task->tk_action = call_timeout;
  915. break;
  916. case -ECONNREFUSED:
  917. case -ENOTCONN:
  918. rpc_force_rebind(clnt);
  919. task->tk_action = call_bind;
  920. break;
  921. case -EAGAIN:
  922. task->tk_action = call_transmit;
  923. break;
  924. case -EIO:
  925. /* shutdown or soft timeout */
  926. rpc_exit(task, status);
  927. break;
  928. default:
  929. printk("%s: RPC call returned error %d\n",
  930. clnt->cl_protname, -status);
  931. rpc_exit(task, status);
  932. }
  933. }
  934. /*
  935. * 6a. Handle RPC timeout
  936. * We do not release the request slot, so we keep using the
  937. * same XID for all retransmits.
  938. */
  939. static void
  940. call_timeout(struct rpc_task *task)
  941. {
  942. struct rpc_clnt *clnt = task->tk_client;
  943. if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
  944. dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid);
  945. goto retry;
  946. }
  947. dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
  948. task->tk_timeouts++;
  949. if (RPC_IS_SOFT(task)) {
  950. printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
  951. clnt->cl_protname, clnt->cl_server);
  952. rpc_exit(task, -EIO);
  953. return;
  954. }
  955. if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
  956. task->tk_flags |= RPC_CALL_MAJORSEEN;
  957. printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
  958. clnt->cl_protname, clnt->cl_server);
  959. }
  960. rpc_force_rebind(clnt);
  961. retry:
  962. clnt->cl_stats->rpcretrans++;
  963. task->tk_action = call_bind;
  964. task->tk_status = 0;
  965. }
  966. /*
  967. * 7. Decode the RPC reply
  968. */
  969. static void
  970. call_decode(struct rpc_task *task)
  971. {
  972. struct rpc_clnt *clnt = task->tk_client;
  973. struct rpc_rqst *req = task->tk_rqstp;
  974. kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode;
  975. __be32 *p;
  976. dprintk("RPC: %5u call_decode (status %d)\n",
  977. task->tk_pid, task->tk_status);
  978. if (task->tk_flags & RPC_CALL_MAJORSEEN) {
  979. printk(KERN_NOTICE "%s: server %s OK\n",
  980. clnt->cl_protname, clnt->cl_server);
  981. task->tk_flags &= ~RPC_CALL_MAJORSEEN;
  982. }
  983. if (task->tk_status < 12) {
  984. if (!RPC_IS_SOFT(task)) {
  985. task->tk_action = call_bind;
  986. clnt->cl_stats->rpcretrans++;
  987. goto out_retry;
  988. }
  989. dprintk("RPC: %s: too small RPC reply size (%d bytes)\n",
  990. clnt->cl_protname, task->tk_status);
  991. task->tk_action = call_timeout;
  992. goto out_retry;
  993. }
  994. /*
  995. * Ensure that we see all writes made by xprt_complete_rqst()
  996. * before it changed req->rq_received.
  997. */
  998. smp_rmb();
  999. req->rq_rcv_buf.len = req->rq_private_buf.len;
  1000. /* Check that the softirq receive buffer is valid */
  1001. WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
  1002. sizeof(req->rq_rcv_buf)) != 0);
  1003. /* Verify the RPC header */
  1004. p = call_verify(task);
  1005. if (IS_ERR(p)) {
  1006. if (p == ERR_PTR(-EAGAIN))
  1007. goto out_retry;
  1008. return;
  1009. }
  1010. task->tk_action = rpc_exit_task;
  1011. if (decode) {
  1012. lock_kernel();
  1013. task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
  1014. task->tk_msg.rpc_resp);
  1015. unlock_kernel();
  1016. }
  1017. dprintk("RPC: %5u call_decode result %d\n", task->tk_pid,
  1018. task->tk_status);
  1019. return;
  1020. out_retry:
  1021. req->rq_received = req->rq_private_buf.len = 0;
  1022. task->tk_status = 0;
  1023. }
  1024. /*
  1025. * 8. Refresh the credentials if rejected by the server
  1026. */
  1027. static void
  1028. call_refresh(struct rpc_task *task)
  1029. {
  1030. dprint_status(task);
  1031. xprt_release(task); /* Must do to obtain new XID */
  1032. task->tk_action = call_refreshresult;
  1033. task->tk_status = 0;
  1034. task->tk_client->cl_stats->rpcauthrefresh++;
  1035. rpcauth_refreshcred(task);
  1036. }
  1037. /*
  1038. * 8a. Process the results of a credential refresh
  1039. */
  1040. static void
  1041. call_refreshresult(struct rpc_task *task)
  1042. {
  1043. int status = task->tk_status;
  1044. dprint_status(task);
  1045. task->tk_status = 0;
  1046. task->tk_action = call_reserve;
  1047. if (status >= 0 && rpcauth_uptodatecred(task))
  1048. return;
  1049. if (status == -EACCES) {
  1050. rpc_exit(task, -EACCES);
  1051. return;
  1052. }
  1053. task->tk_action = call_refresh;
  1054. if (status != -ETIMEDOUT)
  1055. rpc_delay(task, 3*HZ);
  1056. return;
  1057. }
  1058. /*
  1059. * Call header serialization
  1060. */
  1061. static __be32 *
  1062. call_header(struct rpc_task *task)
  1063. {
  1064. struct rpc_clnt *clnt = task->tk_client;
  1065. struct rpc_rqst *req = task->tk_rqstp;
  1066. __be32 *p = req->rq_svec[0].iov_base;
  1067. /* FIXME: check buffer size? */
  1068. p = xprt_skip_transport_header(task->tk_xprt, p);
  1069. *p++ = req->rq_xid; /* XID */
  1070. *p++ = htonl(RPC_CALL); /* CALL */
  1071. *p++ = htonl(RPC_VERSION); /* RPC version */
  1072. *p++ = htonl(clnt->cl_prog); /* program number */
  1073. *p++ = htonl(clnt->cl_vers); /* program version */
  1074. *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */
  1075. p = rpcauth_marshcred(task, p);
  1076. req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
  1077. return p;
  1078. }
  1079. /*
  1080. * Reply header verification
  1081. */
  1082. static __be32 *
  1083. call_verify(struct rpc_task *task)
  1084. {
  1085. struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
  1086. int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
  1087. __be32 *p = iov->iov_base;
  1088. u32 n;
  1089. int error = -EACCES;
  1090. if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) {
  1091. /* RFC-1014 says that the representation of XDR data must be a
  1092. * multiple of four bytes
  1093. * - if it isn't pointer subtraction in the NFS client may give
  1094. * undefined results
  1095. */
  1096. printk(KERN_WARNING
  1097. "call_verify: XDR representation not a multiple of"
  1098. " 4 bytes: 0x%x\n", task->tk_rqstp->rq_rcv_buf.len);
  1099. goto out_eio;
  1100. }
  1101. if ((len -= 3) < 0)
  1102. goto out_overflow;
  1103. p += 1; /* skip XID */
  1104. if ((n = ntohl(*p++)) != RPC_REPLY) {
  1105. printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n);
  1106. goto out_garbage;
  1107. }
  1108. if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
  1109. if (--len < 0)
  1110. goto out_overflow;
  1111. switch ((n = ntohl(*p++))) {
  1112. case RPC_AUTH_ERROR:
  1113. break;
  1114. case RPC_MISMATCH:
  1115. dprintk("RPC: %5u %s: RPC call version "
  1116. "mismatch!\n",
  1117. task->tk_pid, __FUNCTION__);
  1118. error = -EPROTONOSUPPORT;
  1119. goto out_err;
  1120. default:
  1121. dprintk("RPC: %5u %s: RPC call rejected, "
  1122. "unknown error: %x\n",
  1123. task->tk_pid, __FUNCTION__, n);
  1124. goto out_eio;
  1125. }
  1126. if (--len < 0)
  1127. goto out_overflow;
  1128. switch ((n = ntohl(*p++))) {
  1129. case RPC_AUTH_REJECTEDCRED:
  1130. case RPC_AUTH_REJECTEDVERF:
  1131. case RPCSEC_GSS_CREDPROBLEM:
  1132. case RPCSEC_GSS_CTXPROBLEM:
  1133. if (!task->tk_cred_retry)
  1134. break;
  1135. task->tk_cred_retry--;
  1136. dprintk("RPC: %5u %s: retry stale creds\n",
  1137. task->tk_pid, __FUNCTION__);
  1138. rpcauth_invalcred(task);
  1139. task->tk_action = call_refresh;
  1140. goto out_retry;
  1141. case RPC_AUTH_BADCRED:
  1142. case RPC_AUTH_BADVERF:
  1143. /* possibly garbled cred/verf? */
  1144. if (!task->tk_garb_retry)
  1145. break;
  1146. task->tk_garb_retry--;
  1147. dprintk("RPC: %5u %s: retry garbled creds\n",
  1148. task->tk_pid, __FUNCTION__);
  1149. task->tk_action = call_bind;
  1150. goto out_retry;
  1151. case RPC_AUTH_TOOWEAK:
  1152. printk(KERN_NOTICE "call_verify: server %s requires stronger "
  1153. "authentication.\n", task->tk_client->cl_server);
  1154. break;
  1155. default:
  1156. printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n);
  1157. error = -EIO;
  1158. }
  1159. dprintk("RPC: %5u %s: call rejected %d\n",
  1160. task->tk_pid, __FUNCTION__, n);
  1161. goto out_err;
  1162. }
  1163. if (!(p = rpcauth_checkverf(task, p))) {
  1164. printk(KERN_WARNING "call_verify: auth check failed\n");
  1165. goto out_garbage; /* bad verifier, retry */
  1166. }
  1167. len = p - (__be32 *)iov->iov_base - 1;
  1168. if (len < 0)
  1169. goto out_overflow;
  1170. switch ((n = ntohl(*p++))) {
  1171. case RPC_SUCCESS:
  1172. return p;
  1173. case RPC_PROG_UNAVAIL:
  1174. dprintk("RPC: %5u %s: program %u is unsupported by server %s\n",
  1175. task->tk_pid, __FUNCTION__,
  1176. (unsigned int)task->tk_client->cl_prog,
  1177. task->tk_client->cl_server);
  1178. error = -EPFNOSUPPORT;
  1179. goto out_err;
  1180. case RPC_PROG_MISMATCH:
  1181. dprintk("RPC: %5u %s: program %u, version %u unsupported by "
  1182. "server %s\n", task->tk_pid, __FUNCTION__,
  1183. (unsigned int)task->tk_client->cl_prog,
  1184. (unsigned int)task->tk_client->cl_vers,
  1185. task->tk_client->cl_server);
  1186. error = -EPROTONOSUPPORT;
  1187. goto out_err;
  1188. case RPC_PROC_UNAVAIL:
  1189. dprintk("RPC: %5u %s: proc %p unsupported by program %u, "
  1190. "version %u on server %s\n",
  1191. task->tk_pid, __FUNCTION__,
  1192. task->tk_msg.rpc_proc,
  1193. task->tk_client->cl_prog,
  1194. task->tk_client->cl_vers,
  1195. task->tk_client->cl_server);
  1196. error = -EOPNOTSUPP;
  1197. goto out_err;
  1198. case RPC_GARBAGE_ARGS:
  1199. dprintk("RPC: %5u %s: server saw garbage\n",
  1200. task->tk_pid, __FUNCTION__);
  1201. break; /* retry */
  1202. default:
  1203. printk(KERN_WARNING "call_verify: server accept status: %x\n", n);
  1204. /* Also retry */
  1205. }
  1206. out_garbage:
  1207. task->tk_client->cl_stats->rpcgarbage++;
  1208. if (task->tk_garb_retry) {
  1209. task->tk_garb_retry--;
  1210. dprintk("RPC: %5u %s: retrying\n",
  1211. task->tk_pid, __FUNCTION__);
  1212. task->tk_action = call_bind;
  1213. out_retry:
  1214. return ERR_PTR(-EAGAIN);
  1215. }
  1216. printk(KERN_WARNING "RPC %s: retry failed, exit EIO\n", __FUNCTION__);
  1217. out_eio:
  1218. error = -EIO;
  1219. out_err:
  1220. rpc_exit(task, error);
  1221. return ERR_PTR(error);
  1222. out_overflow:
  1223. printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__);
  1224. goto out_garbage;
  1225. }
  1226. static int rpcproc_encode_null(void *rqstp, __be32 *data, void *obj)
  1227. {
  1228. return 0;
  1229. }
  1230. static int rpcproc_decode_null(void *rqstp, __be32 *data, void *obj)
  1231. {
  1232. return 0;
  1233. }
  1234. static struct rpc_procinfo rpcproc_null = {
  1235. .p_encode = rpcproc_encode_null,
  1236. .p_decode = rpcproc_decode_null,
  1237. };
  1238. int rpc_ping(struct rpc_clnt *clnt, int flags)
  1239. {
  1240. struct rpc_message msg = {
  1241. .rpc_proc = &rpcproc_null,
  1242. };
  1243. int err;
  1244. msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
  1245. err = rpc_call_sync(clnt, &msg, flags);
  1246. put_rpccred(msg.rpc_cred);
  1247. return err;
  1248. }