svc.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. /*
  2. * linux/include/linux/sunrpc/svc.h
  3. *
  4. * RPC server declarations.
  5. *
  6. * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  7. */
  8. #ifndef SUNRPC_SVC_H
  9. #define SUNRPC_SVC_H
  10. #include <linux/in.h>
  11. #include <linux/in6.h>
  12. #include <linux/sunrpc/types.h>
  13. #include <linux/sunrpc/xdr.h>
  14. #include <linux/sunrpc/auth.h>
  15. #include <linux/sunrpc/svcauth.h>
  16. #include <linux/wait.h>
  17. #include <linux/mm.h>
  18. /*
  19. * This is the RPC server thread function prototype
  20. */
  21. typedef void (*svc_thread_fn)(struct svc_rqst *);
  22. /*
  23. *
  24. * RPC service thread pool.
  25. *
  26. * Pool of threads and temporary sockets. Generally there is only
  27. * a single one of these per RPC service, but on NUMA machines those
  28. * services that can benefit from it (i.e. nfs but not lockd) will
  29. * have one pool per NUMA node. This optimisation reduces cross-
  30. * node traffic on multi-node NUMA NFS servers.
  31. */
  32. struct svc_pool {
  33. unsigned int sp_id; /* pool id; also node id on NUMA */
  34. spinlock_t sp_lock; /* protects all fields */
  35. struct list_head sp_threads; /* idle server threads */
  36. struct list_head sp_sockets; /* pending sockets */
  37. unsigned int sp_nrthreads; /* # of threads in pool */
  38. struct list_head sp_all_threads; /* all server threads */
  39. } ____cacheline_aligned_in_smp;
  40. /*
  41. * RPC service.
  42. *
  43. * An RPC service is a ``daemon,'' possibly multithreaded, which
  44. * receives and processes incoming RPC messages.
  45. * It has one or more transport sockets associated with it, and maintains
  46. * a list of idle threads waiting for input.
  47. *
  48. * We currently do not support more than one RPC program per daemon.
  49. */
  50. struct svc_serv {
  51. struct svc_program * sv_program; /* RPC program */
  52. struct svc_stat * sv_stats; /* RPC statistics */
  53. spinlock_t sv_lock;
  54. unsigned int sv_nrthreads; /* # of server threads */
  55. unsigned int sv_max_payload; /* datagram payload size */
  56. unsigned int sv_max_mesg; /* max_payload + 1 page for overheads */
  57. unsigned int sv_xdrsize; /* XDR buffer size */
  58. struct list_head sv_permsocks; /* all permanent sockets */
  59. struct list_head sv_tempsocks; /* all temporary sockets */
  60. int sv_tmpcnt; /* count of temporary sockets */
  61. struct timer_list sv_temptimer; /* timer for aging temporary sockets */
  62. char * sv_name; /* service name */
  63. unsigned int sv_nrpools; /* number of thread pools */
  64. struct svc_pool * sv_pools; /* array of thread pools */
  65. void (*sv_shutdown)(struct svc_serv *serv);
  66. /* Callback to use when last thread
  67. * exits.
  68. */
  69. struct module * sv_module; /* optional module to count when
  70. * adding threads */
  71. svc_thread_fn sv_function; /* main function for threads */
  72. int sv_kill_signal; /* signal to kill threads */
  73. };
  74. /*
  75. * We use sv_nrthreads as a reference count. svc_destroy() drops
  76. * this refcount, so we need to bump it up around operations that
  77. * change the number of threads. Horrible, but there it is.
  78. * Should be called with the BKL held.
  79. */
  80. static inline void svc_get(struct svc_serv *serv)
  81. {
  82. serv->sv_nrthreads++;
  83. }
  84. /*
  85. * Maximum payload size supported by a kernel RPC server.
  86. * This is use to determine the max number of pages nfsd is
  87. * willing to return in a single READ operation.
  88. *
  89. * These happen to all be powers of 2, which is not strictly
  90. * necessary but helps enforce the real limitation, which is
  91. * that they should be multiples of PAGE_CACHE_SIZE.
  92. *
  93. * For UDP transports, a block plus NFS,RPC, and UDP headers
  94. * has to fit into the IP datagram limit of 64K. The largest
  95. * feasible number for all known page sizes is probably 48K,
  96. * but we choose 32K here. This is the same as the historical
  97. * Linux limit; someone who cares more about NFS/UDP performance
  98. * can test a larger number.
  99. *
  100. * For TCP transports we have more freedom. A size of 1MB is
  101. * chosen to match the client limit. Other OSes are known to
  102. * have larger limits, but those numbers are probably beyond
  103. * the point of diminishing returns.
  104. */
  105. #define RPCSVC_MAXPAYLOAD (1*1024*1024u)
  106. #define RPCSVC_MAXPAYLOAD_TCP RPCSVC_MAXPAYLOAD
  107. #define RPCSVC_MAXPAYLOAD_UDP (32*1024u)
  108. extern u32 svc_max_payload(const struct svc_rqst *rqstp);
  109. /*
  110. * RPC Requsts and replies are stored in one or more pages.
  111. * We maintain an array of pages for each server thread.
  112. * Requests are copied into these pages as they arrive. Remaining
  113. * pages are available to write the reply into.
  114. *
  115. * Pages are sent using ->sendpage so each server thread needs to
  116. * allocate more to replace those used in sending. To help keep track
  117. * of these pages we have a receive list where all pages initialy live,
  118. * and a send list where pages are moved to when there are to be part
  119. * of a reply.
  120. *
  121. * We use xdr_buf for holding responses as it fits well with NFS
  122. * read responses (that have a header, and some data pages, and possibly
  123. * a tail) and means we can share some client side routines.
  124. *
  125. * The xdr_buf.head kvec always points to the first page in the rq_*pages
  126. * list. The xdr_buf.pages pointer points to the second page on that
  127. * list. xdr_buf.tail points to the end of the first page.
  128. * This assumes that the non-page part of an rpc reply will fit
  129. * in a page - NFSd ensures this. lockd also has no trouble.
  130. *
  131. * Each request/reply pair can have at most one "payload", plus two pages,
  132. * one for the request, and one for the reply.
  133. * We using ->sendfile to return read data, we might need one extra page
  134. * if the request is not page-aligned. So add another '1'.
  135. */
  136. #define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \
  137. + 2 + 1)
  138. static inline u32 svc_getnl(struct kvec *iov)
  139. {
  140. __be32 val, *vp;
  141. vp = iov->iov_base;
  142. val = *vp++;
  143. iov->iov_base = (void*)vp;
  144. iov->iov_len -= sizeof(__be32);
  145. return ntohl(val);
  146. }
  147. static inline void svc_putnl(struct kvec *iov, u32 val)
  148. {
  149. __be32 *vp = iov->iov_base + iov->iov_len;
  150. *vp = htonl(val);
  151. iov->iov_len += sizeof(__be32);
  152. }
  153. static inline __be32 svc_getu32(struct kvec *iov)
  154. {
  155. __be32 val, *vp;
  156. vp = iov->iov_base;
  157. val = *vp++;
  158. iov->iov_base = (void*)vp;
  159. iov->iov_len -= sizeof(__be32);
  160. return val;
  161. }
  162. static inline void svc_ungetu32(struct kvec *iov)
  163. {
  164. __be32 *vp = (__be32 *)iov->iov_base;
  165. iov->iov_base = (void *)(vp - 1);
  166. iov->iov_len += sizeof(*vp);
  167. }
  168. static inline void svc_putu32(struct kvec *iov, __be32 val)
  169. {
  170. __be32 *vp = iov->iov_base + iov->iov_len;
  171. *vp = val;
  172. iov->iov_len += sizeof(__be32);
  173. }
  174. union svc_addr_u {
  175. struct in_addr addr;
  176. struct in6_addr addr6;
  177. };
  178. /*
  179. * The context of a single thread, including the request currently being
  180. * processed.
  181. */
  182. struct svc_rqst {
  183. struct list_head rq_list; /* idle list */
  184. struct list_head rq_all; /* all threads list */
  185. struct svc_sock * rq_sock; /* socket */
  186. struct sockaddr_storage rq_addr; /* peer address */
  187. size_t rq_addrlen;
  188. struct svc_serv * rq_server; /* RPC service definition */
  189. struct svc_pool * rq_pool; /* thread pool */
  190. struct svc_procedure * rq_procinfo; /* procedure info */
  191. struct auth_ops * rq_authop; /* authentication flavour */
  192. struct svc_cred rq_cred; /* auth info */
  193. struct sk_buff * rq_skbuff; /* fast recv inet buffer */
  194. struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */
  195. struct xdr_buf rq_arg;
  196. struct xdr_buf rq_res;
  197. struct page * rq_pages[RPCSVC_MAXPAGES];
  198. struct page * *rq_respages; /* points into rq_pages */
  199. int rq_resused; /* number of pages used for result */
  200. struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
  201. __be32 rq_xid; /* transmission id */
  202. u32 rq_prog; /* program number */
  203. u32 rq_vers; /* program version */
  204. u32 rq_proc; /* procedure number */
  205. u32 rq_prot; /* IP protocol */
  206. unsigned short
  207. rq_secure : 1; /* secure port */
  208. union svc_addr_u rq_daddr; /* dest addr of request
  209. * - reply from here */
  210. void * rq_argp; /* decoded arguments */
  211. void * rq_resp; /* xdr'd results */
  212. void * rq_auth_data; /* flavor-specific data */
  213. int rq_reserved; /* space on socket outq
  214. * reserved for this request
  215. */
  216. struct cache_req rq_chandle; /* handle passed to caches for
  217. * request delaying
  218. */
  219. /* Catering to nfsd */
  220. struct auth_domain * rq_client; /* RPC peer info */
  221. struct svc_cacherep * rq_cacherep; /* cache info */
  222. struct knfsd_fh * rq_reffh; /* Referrence filehandle, used to
  223. * determine what device number
  224. * to report (real or virtual)
  225. */
  226. int rq_splice_ok; /* turned off in gss privacy
  227. * to prevent encrypting page
  228. * cache pages */
  229. wait_queue_head_t rq_wait; /* synchronization */
  230. struct task_struct *rq_task; /* service thread */
  231. };
  232. /*
  233. * Rigorous type checking on sockaddr type conversions
  234. */
  235. static inline struct sockaddr_in *svc_addr_in(struct svc_rqst *rqst)
  236. {
  237. return (struct sockaddr_in *) &rqst->rq_addr;
  238. }
  239. static inline struct sockaddr_in6 *svc_addr_in6(struct svc_rqst *rqst)
  240. {
  241. return (struct sockaddr_in6 *) &rqst->rq_addr;
  242. }
  243. static inline struct sockaddr *svc_addr(struct svc_rqst *rqst)
  244. {
  245. return (struct sockaddr *) &rqst->rq_addr;
  246. }
  247. /*
  248. * Check buffer bounds after decoding arguments
  249. */
  250. static inline int
  251. xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p)
  252. {
  253. char *cp = (char *)p;
  254. struct kvec *vec = &rqstp->rq_arg.head[0];
  255. return cp >= (char*)vec->iov_base
  256. && cp <= (char*)vec->iov_base + vec->iov_len;
  257. }
  258. static inline int
  259. xdr_ressize_check(struct svc_rqst *rqstp, __be32 *p)
  260. {
  261. struct kvec *vec = &rqstp->rq_res.head[0];
  262. char *cp = (char*)p;
  263. vec->iov_len = cp - (char*)vec->iov_base;
  264. return vec->iov_len <= PAGE_SIZE;
  265. }
  266. static inline void svc_free_res_pages(struct svc_rqst *rqstp)
  267. {
  268. while (rqstp->rq_resused) {
  269. struct page **pp = (rqstp->rq_respages +
  270. --rqstp->rq_resused);
  271. if (*pp) {
  272. put_page(*pp);
  273. *pp = NULL;
  274. }
  275. }
  276. }
  277. struct svc_deferred_req {
  278. u32 prot; /* protocol (UDP or TCP) */
  279. struct svc_sock *svsk;
  280. struct sockaddr_storage addr; /* where reply must go */
  281. size_t addrlen;
  282. union svc_addr_u daddr; /* where reply must come from */
  283. struct cache_deferred_req handle;
  284. int argslen;
  285. __be32 args[0];
  286. };
  287. /*
  288. * List of RPC programs on the same transport endpoint
  289. */
  290. struct svc_program {
  291. struct svc_program * pg_next; /* other programs (same xprt) */
  292. u32 pg_prog; /* program number */
  293. unsigned int pg_lovers; /* lowest version */
  294. unsigned int pg_hivers; /* lowest version */
  295. unsigned int pg_nvers; /* number of versions */
  296. struct svc_version ** pg_vers; /* version array */
  297. char * pg_name; /* service name */
  298. char * pg_class; /* class name: services sharing authentication */
  299. struct svc_stat * pg_stats; /* rpc statistics */
  300. int (*pg_authenticate)(struct svc_rqst *);
  301. };
  302. /*
  303. * RPC program version
  304. */
  305. struct svc_version {
  306. u32 vs_vers; /* version number */
  307. u32 vs_nproc; /* number of procedures */
  308. struct svc_procedure * vs_proc; /* per-procedure info */
  309. u32 vs_xdrsize; /* xdrsize needed for this version */
  310. unsigned int vs_hidden : 1; /* Don't register with portmapper.
  311. * Only used for nfsacl so far. */
  312. /* Override dispatch function (e.g. when caching replies).
  313. * A return value of 0 means drop the request.
  314. * vs_dispatch == NULL means use default dispatcher.
  315. */
  316. int (*vs_dispatch)(struct svc_rqst *, __be32 *);
  317. };
  318. /*
  319. * RPC procedure info
  320. */
  321. typedef __be32 (*svc_procfunc)(struct svc_rqst *, void *argp, void *resp);
  322. struct svc_procedure {
  323. svc_procfunc pc_func; /* process the request */
  324. kxdrproc_t pc_decode; /* XDR decode args */
  325. kxdrproc_t pc_encode; /* XDR encode result */
  326. kxdrproc_t pc_release; /* XDR free result */
  327. unsigned int pc_argsize; /* argument struct size */
  328. unsigned int pc_ressize; /* result struct size */
  329. unsigned int pc_count; /* call count */
  330. unsigned int pc_cachetype; /* cache info (NFS) */
  331. unsigned int pc_xdrressize; /* maximum size of XDR reply */
  332. };
  333. /*
  334. * Function prototypes.
  335. */
  336. struct svc_serv * svc_create(struct svc_program *, unsigned int,
  337. void (*shutdown)(struct svc_serv*));
  338. int svc_create_thread(svc_thread_fn, struct svc_serv *);
  339. void svc_exit_thread(struct svc_rqst *);
  340. struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
  341. void (*shutdown)(struct svc_serv*),
  342. svc_thread_fn, int sig, struct module *);
  343. int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
  344. void svc_destroy(struct svc_serv *);
  345. int svc_process(struct svc_rqst *);
  346. int svc_register(struct svc_serv *, int, unsigned short);
  347. void svc_wake_up(struct svc_serv *);
  348. void svc_reserve(struct svc_rqst *rqstp, int space);
  349. struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu);
  350. char * svc_print_addr(struct svc_rqst *, char *, size_t);
  351. #define RPC_MAX_ADDRBUFLEN (63U)
  352. /*
  353. * When we want to reduce the size of the reserved space in the response
  354. * buffer, we need to take into account the size of any checksum data that
  355. * may be at the end of the packet. This is difficult to determine exactly
  356. * for all cases without actually generating the checksum, so we just use a
  357. * static value.
  358. */
  359. static inline void
  360. svc_reserve_auth(struct svc_rqst *rqstp, int space)
  361. {
  362. int added_space = 0;
  363. switch(rqstp->rq_authop->flavour) {
  364. case RPC_AUTH_GSS:
  365. added_space = RPC_MAX_AUTH_SIZE;
  366. }
  367. return svc_reserve(rqstp, space + added_space);
  368. }
  369. #endif /* SUNRPC_SVC_H */