svc.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089
  1. /*
  2. * linux/net/sunrpc/svc.c
  3. *
  4. * High-level RPC service routines
  5. *
  6. * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  7. *
  8. * Multiple threads pools and NUMAisation
  9. * Copyright (c) 2006 Silicon Graphics, Inc.
  10. * by Greg Banks <gnb@melbourne.sgi.com>
  11. */
  12. #include <linux/linkage.h>
  13. #include <linux/sched.h>
  14. #include <linux/errno.h>
  15. #include <linux/net.h>
  16. #include <linux/in.h>
  17. #include <linux/mm.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/module.h>
  20. #include <linux/sunrpc/types.h>
  21. #include <linux/sunrpc/xdr.h>
  22. #include <linux/sunrpc/stats.h>
  23. #include <linux/sunrpc/svcsock.h>
  24. #include <linux/sunrpc/clnt.h>
  25. #define RPCDBG_FACILITY RPCDBG_SVCDSP
  26. #define svc_serv_is_pooled(serv) ((serv)->sv_function)
  27. /*
  28. * Mode for mapping cpus to pools.
  29. */
  30. enum {
  31. SVC_POOL_AUTO = -1, /* choose one of the others */
  32. SVC_POOL_GLOBAL, /* no mapping, just a single global pool
  33. * (legacy & UP mode) */
  34. SVC_POOL_PERCPU, /* one pool per cpu */
  35. SVC_POOL_PERNODE /* one pool per numa node */
  36. };
  37. #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
  38. /*
  39. * Structure for mapping cpus to pools and vice versa.
  40. * Setup once during sunrpc initialisation.
  41. */
  42. static struct svc_pool_map {
  43. int count; /* How many svc_servs use us */
  44. int mode; /* Note: int not enum to avoid
  45. * warnings about "enumeration value
  46. * not handled in switch" */
  47. unsigned int npools;
  48. unsigned int *pool_to; /* maps pool id to cpu or node */
  49. unsigned int *to_pool; /* maps cpu or node to pool id */
  50. } svc_pool_map = {
  51. .count = 0,
  52. .mode = SVC_POOL_DEFAULT
  53. };
  54. static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
  55. static int
  56. param_set_pool_mode(const char *val, struct kernel_param *kp)
  57. {
  58. int *ip = (int *)kp->arg;
  59. struct svc_pool_map *m = &svc_pool_map;
  60. int err;
  61. mutex_lock(&svc_pool_map_mutex);
  62. err = -EBUSY;
  63. if (m->count)
  64. goto out;
  65. err = 0;
  66. if (!strncmp(val, "auto", 4))
  67. *ip = SVC_POOL_AUTO;
  68. else if (!strncmp(val, "global", 6))
  69. *ip = SVC_POOL_GLOBAL;
  70. else if (!strncmp(val, "percpu", 6))
  71. *ip = SVC_POOL_PERCPU;
  72. else if (!strncmp(val, "pernode", 7))
  73. *ip = SVC_POOL_PERNODE;
  74. else
  75. err = -EINVAL;
  76. out:
  77. mutex_unlock(&svc_pool_map_mutex);
  78. return err;
  79. }
  80. static int
  81. param_get_pool_mode(char *buf, struct kernel_param *kp)
  82. {
  83. int *ip = (int *)kp->arg;
  84. switch (*ip)
  85. {
  86. case SVC_POOL_AUTO:
  87. return strlcpy(buf, "auto", 20);
  88. case SVC_POOL_GLOBAL:
  89. return strlcpy(buf, "global", 20);
  90. case SVC_POOL_PERCPU:
  91. return strlcpy(buf, "percpu", 20);
  92. case SVC_POOL_PERNODE:
  93. return strlcpy(buf, "pernode", 20);
  94. default:
  95. return sprintf(buf, "%d", *ip);
  96. }
  97. }
  98. module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
  99. &svc_pool_map.mode, 0644);
  100. /*
  101. * Detect best pool mapping mode heuristically,
  102. * according to the machine's topology.
  103. */
  104. static int
  105. svc_pool_map_choose_mode(void)
  106. {
  107. unsigned int node;
  108. if (num_online_nodes() > 1) {
  109. /*
  110. * Actually have multiple NUMA nodes,
  111. * so split pools on NUMA node boundaries
  112. */
  113. return SVC_POOL_PERNODE;
  114. }
  115. node = any_online_node(node_online_map);
  116. if (nr_cpus_node(node) > 2) {
  117. /*
  118. * Non-trivial SMP, or CONFIG_NUMA on
  119. * non-NUMA hardware, e.g. with a generic
  120. * x86_64 kernel on Xeons. In this case we
  121. * want to divide the pools on cpu boundaries.
  122. */
  123. return SVC_POOL_PERCPU;
  124. }
  125. /* default: one global pool */
  126. return SVC_POOL_GLOBAL;
  127. }
  128. /*
  129. * Allocate the to_pool[] and pool_to[] arrays.
  130. * Returns 0 on success or an errno.
  131. */
  132. static int
  133. svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
  134. {
  135. m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
  136. if (!m->to_pool)
  137. goto fail;
  138. m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
  139. if (!m->pool_to)
  140. goto fail_free;
  141. return 0;
  142. fail_free:
  143. kfree(m->to_pool);
  144. fail:
  145. return -ENOMEM;
  146. }
  147. /*
  148. * Initialise the pool map for SVC_POOL_PERCPU mode.
  149. * Returns number of pools or <0 on error.
  150. */
  151. static int
  152. svc_pool_map_init_percpu(struct svc_pool_map *m)
  153. {
  154. unsigned int maxpools = nr_cpu_ids;
  155. unsigned int pidx = 0;
  156. unsigned int cpu;
  157. int err;
  158. err = svc_pool_map_alloc_arrays(m, maxpools);
  159. if (err)
  160. return err;
  161. for_each_online_cpu(cpu) {
  162. BUG_ON(pidx > maxpools);
  163. m->to_pool[cpu] = pidx;
  164. m->pool_to[pidx] = cpu;
  165. pidx++;
  166. }
  167. /* cpus brought online later all get mapped to pool0, sorry */
  168. return pidx;
  169. };
  170. /*
  171. * Initialise the pool map for SVC_POOL_PERNODE mode.
  172. * Returns number of pools or <0 on error.
  173. */
  174. static int
  175. svc_pool_map_init_pernode(struct svc_pool_map *m)
  176. {
  177. unsigned int maxpools = nr_node_ids;
  178. unsigned int pidx = 0;
  179. unsigned int node;
  180. int err;
  181. err = svc_pool_map_alloc_arrays(m, maxpools);
  182. if (err)
  183. return err;
  184. for_each_node_with_cpus(node) {
  185. /* some architectures (e.g. SN2) have cpuless nodes */
  186. BUG_ON(pidx > maxpools);
  187. m->to_pool[node] = pidx;
  188. m->pool_to[pidx] = node;
  189. pidx++;
  190. }
  191. /* nodes brought online later all get mapped to pool0, sorry */
  192. return pidx;
  193. }
  194. /*
  195. * Add a reference to the global map of cpus to pools (and
  196. * vice versa). Initialise the map if we're the first user.
  197. * Returns the number of pools.
  198. */
  199. static unsigned int
  200. svc_pool_map_get(void)
  201. {
  202. struct svc_pool_map *m = &svc_pool_map;
  203. int npools = -1;
  204. mutex_lock(&svc_pool_map_mutex);
  205. if (m->count++) {
  206. mutex_unlock(&svc_pool_map_mutex);
  207. return m->npools;
  208. }
  209. if (m->mode == SVC_POOL_AUTO)
  210. m->mode = svc_pool_map_choose_mode();
  211. switch (m->mode) {
  212. case SVC_POOL_PERCPU:
  213. npools = svc_pool_map_init_percpu(m);
  214. break;
  215. case SVC_POOL_PERNODE:
  216. npools = svc_pool_map_init_pernode(m);
  217. break;
  218. }
  219. if (npools < 0) {
  220. /* default, or memory allocation failure */
  221. npools = 1;
  222. m->mode = SVC_POOL_GLOBAL;
  223. }
  224. m->npools = npools;
  225. mutex_unlock(&svc_pool_map_mutex);
  226. return m->npools;
  227. }
  228. /*
  229. * Drop a reference to the global map of cpus to pools.
  230. * When the last reference is dropped, the map data is
  231. * freed; this allows the sysadmin to change the pool
  232. * mode using the pool_mode module option without
  233. * rebooting or re-loading sunrpc.ko.
  234. */
  235. static void
  236. svc_pool_map_put(void)
  237. {
  238. struct svc_pool_map *m = &svc_pool_map;
  239. mutex_lock(&svc_pool_map_mutex);
  240. if (!--m->count) {
  241. m->mode = SVC_POOL_DEFAULT;
  242. kfree(m->to_pool);
  243. kfree(m->pool_to);
  244. m->npools = 0;
  245. }
  246. mutex_unlock(&svc_pool_map_mutex);
  247. }
  248. /*
  249. * Set the current thread's cpus_allowed mask so that it
  250. * will only run on cpus in the given pool.
  251. *
  252. * Returns 1 and fills in oldmask iff a cpumask was applied.
  253. */
  254. static inline int
  255. svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask)
  256. {
  257. struct svc_pool_map *m = &svc_pool_map;
  258. /*
  259. * The caller checks for sv_nrpools > 1, which
  260. * implies that we've been initialized.
  261. */
  262. BUG_ON(m->count == 0);
  263. switch (m->mode)
  264. {
  265. default:
  266. return 0;
  267. case SVC_POOL_PERCPU:
  268. {
  269. unsigned int cpu = m->pool_to[pidx];
  270. *oldmask = current->cpus_allowed;
  271. set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
  272. return 1;
  273. }
  274. case SVC_POOL_PERNODE:
  275. {
  276. unsigned int node = m->pool_to[pidx];
  277. node_to_cpumask_ptr(nodecpumask, node);
  278. *oldmask = current->cpus_allowed;
  279. set_cpus_allowed_ptr(current, nodecpumask);
  280. return 1;
  281. }
  282. }
  283. }
  284. /*
  285. * Use the mapping mode to choose a pool for a given CPU.
  286. * Used when enqueueing an incoming RPC. Always returns
  287. * a non-NULL pool pointer.
  288. */
  289. struct svc_pool *
  290. svc_pool_for_cpu(struct svc_serv *serv, int cpu)
  291. {
  292. struct svc_pool_map *m = &svc_pool_map;
  293. unsigned int pidx = 0;
  294. /*
  295. * An uninitialised map happens in a pure client when
  296. * lockd is brought up, so silently treat it the
  297. * same as SVC_POOL_GLOBAL.
  298. */
  299. if (svc_serv_is_pooled(serv)) {
  300. switch (m->mode) {
  301. case SVC_POOL_PERCPU:
  302. pidx = m->to_pool[cpu];
  303. break;
  304. case SVC_POOL_PERNODE:
  305. pidx = m->to_pool[cpu_to_node(cpu)];
  306. break;
  307. }
  308. }
  309. return &serv->sv_pools[pidx % serv->sv_nrpools];
  310. }
  311. /*
  312. * Create an RPC service
  313. */
  314. static struct svc_serv *
  315. __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
  316. void (*shutdown)(struct svc_serv *serv))
  317. {
  318. struct svc_serv *serv;
  319. unsigned int vers;
  320. unsigned int xdrsize;
  321. unsigned int i;
  322. if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
  323. return NULL;
  324. serv->sv_name = prog->pg_name;
  325. serv->sv_program = prog;
  326. serv->sv_nrthreads = 1;
  327. serv->sv_stats = prog->pg_stats;
  328. if (bufsize > RPCSVC_MAXPAYLOAD)
  329. bufsize = RPCSVC_MAXPAYLOAD;
  330. serv->sv_max_payload = bufsize? bufsize : 4096;
  331. serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
  332. serv->sv_shutdown = shutdown;
  333. xdrsize = 0;
  334. while (prog) {
  335. prog->pg_lovers = prog->pg_nvers-1;
  336. for (vers=0; vers<prog->pg_nvers ; vers++)
  337. if (prog->pg_vers[vers]) {
  338. prog->pg_hivers = vers;
  339. if (prog->pg_lovers > vers)
  340. prog->pg_lovers = vers;
  341. if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
  342. xdrsize = prog->pg_vers[vers]->vs_xdrsize;
  343. }
  344. prog = prog->pg_next;
  345. }
  346. serv->sv_xdrsize = xdrsize;
  347. INIT_LIST_HEAD(&serv->sv_tempsocks);
  348. INIT_LIST_HEAD(&serv->sv_permsocks);
  349. init_timer(&serv->sv_temptimer);
  350. spin_lock_init(&serv->sv_lock);
  351. serv->sv_nrpools = npools;
  352. serv->sv_pools =
  353. kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
  354. GFP_KERNEL);
  355. if (!serv->sv_pools) {
  356. kfree(serv);
  357. return NULL;
  358. }
  359. for (i = 0; i < serv->sv_nrpools; i++) {
  360. struct svc_pool *pool = &serv->sv_pools[i];
  361. dprintk("svc: initialising pool %u for %s\n",
  362. i, serv->sv_name);
  363. pool->sp_id = i;
  364. INIT_LIST_HEAD(&pool->sp_threads);
  365. INIT_LIST_HEAD(&pool->sp_sockets);
  366. INIT_LIST_HEAD(&pool->sp_all_threads);
  367. spin_lock_init(&pool->sp_lock);
  368. }
  369. /* Remove any stale portmap registrations */
  370. svc_register(serv, 0, 0);
  371. return serv;
  372. }
  373. struct svc_serv *
  374. svc_create(struct svc_program *prog, unsigned int bufsize,
  375. void (*shutdown)(struct svc_serv *serv))
  376. {
  377. return __svc_create(prog, bufsize, /*npools*/1, shutdown);
  378. }
  379. EXPORT_SYMBOL(svc_create);
  380. struct svc_serv *
  381. svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
  382. void (*shutdown)(struct svc_serv *serv),
  383. svc_thread_fn func, int sig, struct module *mod)
  384. {
  385. struct svc_serv *serv;
  386. unsigned int npools = svc_pool_map_get();
  387. serv = __svc_create(prog, bufsize, npools, shutdown);
  388. if (serv != NULL) {
  389. serv->sv_function = func;
  390. serv->sv_kill_signal = sig;
  391. serv->sv_module = mod;
  392. }
  393. return serv;
  394. }
  395. EXPORT_SYMBOL(svc_create_pooled);
  396. /*
  397. * Destroy an RPC service. Should be called with appropriate locking to
  398. * protect the sv_nrthreads, sv_permsocks and sv_tempsocks.
  399. */
  400. void
  401. svc_destroy(struct svc_serv *serv)
  402. {
  403. dprintk("svc: svc_destroy(%s, %d)\n",
  404. serv->sv_program->pg_name,
  405. serv->sv_nrthreads);
  406. if (serv->sv_nrthreads) {
  407. if (--(serv->sv_nrthreads) != 0) {
  408. svc_sock_update_bufs(serv);
  409. return;
  410. }
  411. } else
  412. printk("svc_destroy: no threads for serv=%p!\n", serv);
  413. del_timer_sync(&serv->sv_temptimer);
  414. svc_close_all(&serv->sv_tempsocks);
  415. if (serv->sv_shutdown)
  416. serv->sv_shutdown(serv);
  417. svc_close_all(&serv->sv_permsocks);
  418. BUG_ON(!list_empty(&serv->sv_permsocks));
  419. BUG_ON(!list_empty(&serv->sv_tempsocks));
  420. cache_clean_deferred(serv);
  421. if (svc_serv_is_pooled(serv))
  422. svc_pool_map_put();
  423. /* Unregister service with the portmapper */
  424. svc_register(serv, 0, 0);
  425. kfree(serv->sv_pools);
  426. kfree(serv);
  427. }
  428. EXPORT_SYMBOL(svc_destroy);
  429. /*
  430. * Allocate an RPC server's buffer space.
  431. * We allocate pages and place them in rq_argpages.
  432. */
  433. static int
  434. svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
  435. {
  436. unsigned int pages, arghi;
  437. pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
  438. * We assume one is at most one page
  439. */
  440. arghi = 0;
  441. BUG_ON(pages > RPCSVC_MAXPAGES);
  442. while (pages) {
  443. struct page *p = alloc_page(GFP_KERNEL);
  444. if (!p)
  445. break;
  446. rqstp->rq_pages[arghi++] = p;
  447. pages--;
  448. }
  449. return pages == 0;
  450. }
  451. /*
  452. * Release an RPC server buffer
  453. */
  454. static void
  455. svc_release_buffer(struct svc_rqst *rqstp)
  456. {
  457. unsigned int i;
  458. for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++)
  459. if (rqstp->rq_pages[i])
  460. put_page(rqstp->rq_pages[i]);
  461. }
  462. struct svc_rqst *
  463. svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool)
  464. {
  465. struct svc_rqst *rqstp;
  466. rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL);
  467. if (!rqstp)
  468. goto out_enomem;
  469. init_waitqueue_head(&rqstp->rq_wait);
  470. serv->sv_nrthreads++;
  471. spin_lock_bh(&pool->sp_lock);
  472. pool->sp_nrthreads++;
  473. list_add(&rqstp->rq_all, &pool->sp_all_threads);
  474. spin_unlock_bh(&pool->sp_lock);
  475. rqstp->rq_server = serv;
  476. rqstp->rq_pool = pool;
  477. rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL);
  478. if (!rqstp->rq_argp)
  479. goto out_thread;
  480. rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL);
  481. if (!rqstp->rq_resp)
  482. goto out_thread;
  483. if (!svc_init_buffer(rqstp, serv->sv_max_mesg))
  484. goto out_thread;
  485. return rqstp;
  486. out_thread:
  487. svc_exit_thread(rqstp);
  488. out_enomem:
  489. return ERR_PTR(-ENOMEM);
  490. }
  491. EXPORT_SYMBOL(svc_prepare_thread);
  492. /*
  493. * Create a thread in the given pool. Caller must hold BKL or another lock to
  494. * serialize access to the svc_serv struct. On a NUMA or SMP machine, with a
  495. * multi-pool serv, the thread will be restricted to run on the cpus belonging
  496. * to the pool.
  497. */
  498. static int
  499. __svc_create_thread(svc_thread_fn func, struct svc_serv *serv,
  500. struct svc_pool *pool)
  501. {
  502. struct svc_rqst *rqstp;
  503. int error = -ENOMEM;
  504. int have_oldmask = 0;
  505. cpumask_t uninitialized_var(oldmask);
  506. rqstp = svc_prepare_thread(serv, pool);
  507. if (IS_ERR(rqstp)) {
  508. error = PTR_ERR(rqstp);
  509. goto out;
  510. }
  511. if (serv->sv_nrpools > 1)
  512. have_oldmask = svc_pool_map_set_cpumask(pool->sp_id, &oldmask);
  513. error = kernel_thread((int (*)(void *)) func, rqstp, 0);
  514. if (have_oldmask)
  515. set_cpus_allowed(current, oldmask);
  516. if (error < 0)
  517. goto out_thread;
  518. svc_sock_update_bufs(serv);
  519. error = 0;
  520. out:
  521. return error;
  522. out_thread:
  523. svc_exit_thread(rqstp);
  524. goto out;
  525. }
  526. /*
  527. * Choose a pool in which to create a new thread, for svc_set_num_threads
  528. */
  529. static inline struct svc_pool *
  530. choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
  531. {
  532. if (pool != NULL)
  533. return pool;
  534. return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
  535. }
  536. /*
  537. * Choose a thread to kill, for svc_set_num_threads
  538. */
  539. static inline struct task_struct *
  540. choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
  541. {
  542. unsigned int i;
  543. struct task_struct *task = NULL;
  544. if (pool != NULL) {
  545. spin_lock_bh(&pool->sp_lock);
  546. } else {
  547. /* choose a pool in round-robin fashion */
  548. for (i = 0; i < serv->sv_nrpools; i++) {
  549. pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
  550. spin_lock_bh(&pool->sp_lock);
  551. if (!list_empty(&pool->sp_all_threads))
  552. goto found_pool;
  553. spin_unlock_bh(&pool->sp_lock);
  554. }
  555. return NULL;
  556. }
  557. found_pool:
  558. if (!list_empty(&pool->sp_all_threads)) {
  559. struct svc_rqst *rqstp;
  560. /*
  561. * Remove from the pool->sp_all_threads list
  562. * so we don't try to kill it again.
  563. */
  564. rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
  565. list_del_init(&rqstp->rq_all);
  566. task = rqstp->rq_task;
  567. }
  568. spin_unlock_bh(&pool->sp_lock);
  569. return task;
  570. }
  571. /*
  572. * Create or destroy enough new threads to make the number
  573. * of threads the given number. If `pool' is non-NULL, applies
  574. * only to threads in that pool, otherwise round-robins between
  575. * all pools. Must be called with a svc_get() reference and
  576. * the BKL or another lock to protect access to svc_serv fields.
  577. *
  578. * Destroying threads relies on the service threads filling in
  579. * rqstp->rq_task, which only the nfs ones do. Assumes the serv
  580. * has been created using svc_create_pooled().
  581. *
  582. * Based on code that used to be in nfsd_svc() but tweaked
  583. * to be pool-aware.
  584. */
  585. int
  586. svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
  587. {
  588. struct task_struct *victim;
  589. int error = 0;
  590. unsigned int state = serv->sv_nrthreads-1;
  591. if (pool == NULL) {
  592. /* The -1 assumes caller has done a svc_get() */
  593. nrservs -= (serv->sv_nrthreads-1);
  594. } else {
  595. spin_lock_bh(&pool->sp_lock);
  596. nrservs -= pool->sp_nrthreads;
  597. spin_unlock_bh(&pool->sp_lock);
  598. }
  599. /* create new threads */
  600. while (nrservs > 0) {
  601. nrservs--;
  602. __module_get(serv->sv_module);
  603. error = __svc_create_thread(serv->sv_function, serv,
  604. choose_pool(serv, pool, &state));
  605. if (error < 0) {
  606. module_put(serv->sv_module);
  607. break;
  608. }
  609. }
  610. /* destroy old threads */
  611. while (nrservs < 0 &&
  612. (victim = choose_victim(serv, pool, &state)) != NULL) {
  613. send_sig(serv->sv_kill_signal, victim, 1);
  614. nrservs++;
  615. }
  616. return error;
  617. }
  618. EXPORT_SYMBOL(svc_set_num_threads);
  619. /*
  620. * Called from a server thread as it's exiting. Caller must hold the BKL or
  621. * the "service mutex", whichever is appropriate for the service.
  622. */
  623. void
  624. svc_exit_thread(struct svc_rqst *rqstp)
  625. {
  626. struct svc_serv *serv = rqstp->rq_server;
  627. struct svc_pool *pool = rqstp->rq_pool;
  628. svc_release_buffer(rqstp);
  629. kfree(rqstp->rq_resp);
  630. kfree(rqstp->rq_argp);
  631. kfree(rqstp->rq_auth_data);
  632. spin_lock_bh(&pool->sp_lock);
  633. pool->sp_nrthreads--;
  634. list_del(&rqstp->rq_all);
  635. spin_unlock_bh(&pool->sp_lock);
  636. kfree(rqstp);
  637. /* Release the server */
  638. if (serv)
  639. svc_destroy(serv);
  640. }
  641. EXPORT_SYMBOL(svc_exit_thread);
  642. /*
  643. * Register an RPC service with the local portmapper.
  644. * To unregister a service, call this routine with
  645. * proto and port == 0.
  646. */
  647. int
  648. svc_register(struct svc_serv *serv, int proto, unsigned short port)
  649. {
  650. struct svc_program *progp;
  651. unsigned long flags;
  652. unsigned int i;
  653. int error = 0, dummy;
  654. if (!port)
  655. clear_thread_flag(TIF_SIGPENDING);
  656. for (progp = serv->sv_program; progp; progp = progp->pg_next) {
  657. for (i = 0; i < progp->pg_nvers; i++) {
  658. if (progp->pg_vers[i] == NULL)
  659. continue;
  660. dprintk("svc: svc_register(%s, %s, %d, %d)%s\n",
  661. progp->pg_name,
  662. proto == IPPROTO_UDP? "udp" : "tcp",
  663. port,
  664. i,
  665. progp->pg_vers[i]->vs_hidden?
  666. " (but not telling portmap)" : "");
  667. if (progp->pg_vers[i]->vs_hidden)
  668. continue;
  669. error = rpcb_register(progp->pg_prog, i, proto, port, &dummy);
  670. if (error < 0)
  671. break;
  672. if (port && !dummy) {
  673. error = -EACCES;
  674. break;
  675. }
  676. }
  677. }
  678. if (!port) {
  679. spin_lock_irqsave(&current->sighand->siglock, flags);
  680. recalc_sigpending();
  681. spin_unlock_irqrestore(&current->sighand->siglock, flags);
  682. }
  683. return error;
  684. }
  685. /*
  686. * Printk the given error with the address of the client that caused it.
  687. */
  688. static int
  689. __attribute__ ((format (printf, 2, 3)))
  690. svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
  691. {
  692. va_list args;
  693. int r;
  694. char buf[RPC_MAX_ADDRBUFLEN];
  695. if (!net_ratelimit())
  696. return 0;
  697. printk(KERN_WARNING "svc: %s: ",
  698. svc_print_addr(rqstp, buf, sizeof(buf)));
  699. va_start(args, fmt);
  700. r = vprintk(fmt, args);
  701. va_end(args);
  702. return r;
  703. }
  704. /*
  705. * Process the RPC request.
  706. */
  707. int
  708. svc_process(struct svc_rqst *rqstp)
  709. {
  710. struct svc_program *progp;
  711. struct svc_version *versp = NULL; /* compiler food */
  712. struct svc_procedure *procp = NULL;
  713. struct kvec * argv = &rqstp->rq_arg.head[0];
  714. struct kvec * resv = &rqstp->rq_res.head[0];
  715. struct svc_serv *serv = rqstp->rq_server;
  716. kxdrproc_t xdr;
  717. __be32 *statp;
  718. u32 dir, prog, vers, proc;
  719. __be32 auth_stat, rpc_stat;
  720. int auth_res;
  721. __be32 *reply_statp;
  722. rpc_stat = rpc_success;
  723. if (argv->iov_len < 6*4)
  724. goto err_short_len;
  725. /* setup response xdr_buf.
  726. * Initially it has just one page
  727. */
  728. rqstp->rq_resused = 1;
  729. resv->iov_base = page_address(rqstp->rq_respages[0]);
  730. resv->iov_len = 0;
  731. rqstp->rq_res.pages = rqstp->rq_respages + 1;
  732. rqstp->rq_res.len = 0;
  733. rqstp->rq_res.page_base = 0;
  734. rqstp->rq_res.page_len = 0;
  735. rqstp->rq_res.buflen = PAGE_SIZE;
  736. rqstp->rq_res.tail[0].iov_base = NULL;
  737. rqstp->rq_res.tail[0].iov_len = 0;
  738. /* Will be turned off only in gss privacy case: */
  739. rqstp->rq_splice_ok = 1;
  740. /* Setup reply header */
  741. rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
  742. rqstp->rq_xid = svc_getu32(argv);
  743. svc_putu32(resv, rqstp->rq_xid);
  744. dir = svc_getnl(argv);
  745. vers = svc_getnl(argv);
  746. /* First words of reply: */
  747. svc_putnl(resv, 1); /* REPLY */
  748. if (dir != 0) /* direction != CALL */
  749. goto err_bad_dir;
  750. if (vers != 2) /* RPC version number */
  751. goto err_bad_rpc;
  752. /* Save position in case we later decide to reject: */
  753. reply_statp = resv->iov_base + resv->iov_len;
  754. svc_putnl(resv, 0); /* ACCEPT */
  755. rqstp->rq_prog = prog = svc_getnl(argv); /* program number */
  756. rqstp->rq_vers = vers = svc_getnl(argv); /* version number */
  757. rqstp->rq_proc = proc = svc_getnl(argv); /* procedure number */
  758. progp = serv->sv_program;
  759. for (progp = serv->sv_program; progp; progp = progp->pg_next)
  760. if (prog == progp->pg_prog)
  761. break;
  762. /*
  763. * Decode auth data, and add verifier to reply buffer.
  764. * We do this before anything else in order to get a decent
  765. * auth verifier.
  766. */
  767. auth_res = svc_authenticate(rqstp, &auth_stat);
  768. /* Also give the program a chance to reject this call: */
  769. if (auth_res == SVC_OK && progp) {
  770. auth_stat = rpc_autherr_badcred;
  771. auth_res = progp->pg_authenticate(rqstp);
  772. }
  773. switch (auth_res) {
  774. case SVC_OK:
  775. break;
  776. case SVC_GARBAGE:
  777. goto err_garbage;
  778. case SVC_SYSERR:
  779. rpc_stat = rpc_system_err;
  780. goto err_bad;
  781. case SVC_DENIED:
  782. goto err_bad_auth;
  783. case SVC_DROP:
  784. goto dropit;
  785. case SVC_COMPLETE:
  786. goto sendit;
  787. }
  788. if (progp == NULL)
  789. goto err_bad_prog;
  790. if (vers >= progp->pg_nvers ||
  791. !(versp = progp->pg_vers[vers]))
  792. goto err_bad_vers;
  793. procp = versp->vs_proc + proc;
  794. if (proc >= versp->vs_nproc || !procp->pc_func)
  795. goto err_bad_proc;
  796. rqstp->rq_server = serv;
  797. rqstp->rq_procinfo = procp;
  798. /* Syntactic check complete */
  799. serv->sv_stats->rpccnt++;
  800. /* Build the reply header. */
  801. statp = resv->iov_base +resv->iov_len;
  802. svc_putnl(resv, RPC_SUCCESS);
  803. /* Bump per-procedure stats counter */
  804. procp->pc_count++;
  805. /* Initialize storage for argp and resp */
  806. memset(rqstp->rq_argp, 0, procp->pc_argsize);
  807. memset(rqstp->rq_resp, 0, procp->pc_ressize);
  808. /* un-reserve some of the out-queue now that we have a
  809. * better idea of reply size
  810. */
  811. if (procp->pc_xdrressize)
  812. svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
  813. /* Call the function that processes the request. */
  814. if (!versp->vs_dispatch) {
  815. /* Decode arguments */
  816. xdr = procp->pc_decode;
  817. if (xdr && !xdr(rqstp, argv->iov_base, rqstp->rq_argp))
  818. goto err_garbage;
  819. *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
  820. /* Encode reply */
  821. if (*statp == rpc_drop_reply) {
  822. if (procp->pc_release)
  823. procp->pc_release(rqstp, NULL, rqstp->rq_resp);
  824. goto dropit;
  825. }
  826. if (*statp == rpc_success && (xdr = procp->pc_encode)
  827. && !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) {
  828. dprintk("svc: failed to encode reply\n");
  829. /* serv->sv_stats->rpcsystemerr++; */
  830. *statp = rpc_system_err;
  831. }
  832. } else {
  833. dprintk("svc: calling dispatcher\n");
  834. if (!versp->vs_dispatch(rqstp, statp)) {
  835. /* Release reply info */
  836. if (procp->pc_release)
  837. procp->pc_release(rqstp, NULL, rqstp->rq_resp);
  838. goto dropit;
  839. }
  840. }
  841. /* Check RPC status result */
  842. if (*statp != rpc_success)
  843. resv->iov_len = ((void*)statp) - resv->iov_base + 4;
  844. /* Release reply info */
  845. if (procp->pc_release)
  846. procp->pc_release(rqstp, NULL, rqstp->rq_resp);
  847. if (procp->pc_encode == NULL)
  848. goto dropit;
  849. sendit:
  850. if (svc_authorise(rqstp))
  851. goto dropit;
  852. return svc_send(rqstp);
  853. dropit:
  854. svc_authorise(rqstp); /* doesn't hurt to call this twice */
  855. dprintk("svc: svc_process dropit\n");
  856. svc_drop(rqstp);
  857. return 0;
  858. err_short_len:
  859. svc_printk(rqstp, "short len %Zd, dropping request\n",
  860. argv->iov_len);
  861. goto dropit; /* drop request */
  862. err_bad_dir:
  863. svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
  864. serv->sv_stats->rpcbadfmt++;
  865. goto dropit; /* drop request */
  866. err_bad_rpc:
  867. serv->sv_stats->rpcbadfmt++;
  868. svc_putnl(resv, 1); /* REJECT */
  869. svc_putnl(resv, 0); /* RPC_MISMATCH */
  870. svc_putnl(resv, 2); /* Only RPCv2 supported */
  871. svc_putnl(resv, 2);
  872. goto sendit;
  873. err_bad_auth:
  874. dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat));
  875. serv->sv_stats->rpcbadauth++;
  876. /* Restore write pointer to location of accept status: */
  877. xdr_ressize_check(rqstp, reply_statp);
  878. svc_putnl(resv, 1); /* REJECT */
  879. svc_putnl(resv, 1); /* AUTH_ERROR */
  880. svc_putnl(resv, ntohl(auth_stat)); /* status */
  881. goto sendit;
  882. err_bad_prog:
  883. dprintk("svc: unknown program %d\n", prog);
  884. serv->sv_stats->rpcbadfmt++;
  885. svc_putnl(resv, RPC_PROG_UNAVAIL);
  886. goto sendit;
  887. err_bad_vers:
  888. svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
  889. vers, prog, progp->pg_name);
  890. serv->sv_stats->rpcbadfmt++;
  891. svc_putnl(resv, RPC_PROG_MISMATCH);
  892. svc_putnl(resv, progp->pg_lovers);
  893. svc_putnl(resv, progp->pg_hivers);
  894. goto sendit;
  895. err_bad_proc:
  896. svc_printk(rqstp, "unknown procedure (%d)\n", proc);
  897. serv->sv_stats->rpcbadfmt++;
  898. svc_putnl(resv, RPC_PROC_UNAVAIL);
  899. goto sendit;
  900. err_garbage:
  901. svc_printk(rqstp, "failed to decode args\n");
  902. rpc_stat = rpc_garbage_args;
  903. err_bad:
  904. serv->sv_stats->rpcbadfmt++;
  905. svc_putnl(resv, ntohl(rpc_stat));
  906. goto sendit;
  907. }
  908. EXPORT_SYMBOL(svc_process);
  909. /*
  910. * Return (transport-specific) limit on the rpc payload.
  911. */
  912. u32 svc_max_payload(const struct svc_rqst *rqstp)
  913. {
  914. u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload;
  915. if (rqstp->rq_server->sv_max_payload < max)
  916. max = rqstp->rq_server->sv_max_payload;
  917. return max;
  918. }
  919. EXPORT_SYMBOL_GPL(svc_max_payload);