svcsock.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683
  1. /*
  2. * linux/net/sunrpc/svcsock.c
  3. *
  4. * These are the RPC server socket internals.
  5. *
  6. * The server scheduling algorithm does not always distribute the load
  7. * evenly when servicing a single client. May need to modify the
  8. * svc_sock_enqueue procedure...
  9. *
  10. * TCP support is largely untested and may be a little slow. The problem
  11. * is that we currently do two separate recvfrom's, one for the 4-byte
  12. * record length, and the second for the actual record. This could possibly
  13. * be improved by always reading a minimum size of around 100 bytes and
  14. * tucking any superfluous bytes away in a temporary store. Still, that
  15. * leaves write requests out in the rain. An alternative may be to peek at
  16. * the first skb in the queue, and if it matches the next TCP sequence
  17. * number, to extract the record marker. Yuck.
  18. *
  19. * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  20. */
  21. #include <linux/sched.h>
  22. #include <linux/errno.h>
  23. #include <linux/fcntl.h>
  24. #include <linux/net.h>
  25. #include <linux/in.h>
  26. #include <linux/inet.h>
  27. #include <linux/udp.h>
  28. #include <linux/tcp.h>
  29. #include <linux/unistd.h>
  30. #include <linux/slab.h>
  31. #include <linux/netdevice.h>
  32. #include <linux/skbuff.h>
  33. #include <linux/file.h>
  34. #include <net/sock.h>
  35. #include <net/checksum.h>
  36. #include <net/ip.h>
  37. #include <net/tcp_states.h>
  38. #include <asm/uaccess.h>
  39. #include <asm/ioctls.h>
  40. #include <linux/sunrpc/types.h>
  41. #include <linux/sunrpc/xdr.h>
  42. #include <linux/sunrpc/svcsock.h>
  43. #include <linux/sunrpc/stats.h>
  44. /* SMP locking strategy:
  45. *
  46. * svc_serv->sv_lock protects most stuff for that service.
  47. * svc_sock->sk_defer_lock protects the svc_sock->sk_deferred list
  48. * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply.
  49. *
  50. * Some flags can be set to certain values at any time
  51. * providing that certain rules are followed:
  52. *
  53. * SK_CONN, SK_DATA, can be set or cleared at any time.
  54. * after a set, svc_sock_enqueue must be called.
  55. * after a clear, the socket must be read/accepted
  56. * if this succeeds, it must be set again.
  57. * SK_CLOSE can set at any time. It is never cleared.
  58. *
  59. */
  60. #define RPCDBG_FACILITY RPCDBG_SVCSOCK
  61. static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
  62. int *errp, int pmap_reg);
  63. static void svc_udp_data_ready(struct sock *, int);
  64. static int svc_udp_recvfrom(struct svc_rqst *);
  65. static int svc_udp_sendto(struct svc_rqst *);
  66. static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk);
  67. static int svc_deferred_recv(struct svc_rqst *rqstp);
  68. static struct cache_deferred_req *svc_defer(struct cache_req *req);
  69. /* apparently the "standard" is that clients close
  70. * idle connections after 5 minutes, servers after
  71. * 6 minutes
  72. * http://www.connectathon.org/talks96/nfstcp.pdf
  73. */
  74. static int svc_conn_age_period = 6*60;
  75. /*
  76. * Queue up an idle server thread. Must have serv->sv_lock held.
  77. * Note: this is really a stack rather than a queue, so that we only
  78. * use as many different threads as we need, and the rest don't polute
  79. * the cache.
  80. */
  81. static inline void
  82. svc_serv_enqueue(struct svc_serv *serv, struct svc_rqst *rqstp)
  83. {
  84. list_add(&rqstp->rq_list, &serv->sv_threads);
  85. }
  86. /*
  87. * Dequeue an nfsd thread. Must have serv->sv_lock held.
  88. */
  89. static inline void
  90. svc_serv_dequeue(struct svc_serv *serv, struct svc_rqst *rqstp)
  91. {
  92. list_del(&rqstp->rq_list);
  93. }
  94. /*
  95. * Release an skbuff after use
  96. */
  97. static inline void
  98. svc_release_skb(struct svc_rqst *rqstp)
  99. {
  100. struct sk_buff *skb = rqstp->rq_skbuff;
  101. struct svc_deferred_req *dr = rqstp->rq_deferred;
  102. if (skb) {
  103. rqstp->rq_skbuff = NULL;
  104. dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
  105. skb_free_datagram(rqstp->rq_sock->sk_sk, skb);
  106. }
  107. if (dr) {
  108. rqstp->rq_deferred = NULL;
  109. kfree(dr);
  110. }
  111. }
  112. /*
  113. * Any space to write?
  114. */
  115. static inline unsigned long
  116. svc_sock_wspace(struct svc_sock *svsk)
  117. {
  118. int wspace;
  119. if (svsk->sk_sock->type == SOCK_STREAM)
  120. wspace = sk_stream_wspace(svsk->sk_sk);
  121. else
  122. wspace = sock_wspace(svsk->sk_sk);
  123. return wspace;
  124. }
  125. /*
  126. * Queue up a socket with data pending. If there are idle nfsd
  127. * processes, wake 'em up.
  128. *
  129. */
  130. static void
  131. svc_sock_enqueue(struct svc_sock *svsk)
  132. {
  133. struct svc_serv *serv = svsk->sk_server;
  134. struct svc_rqst *rqstp;
  135. if (!(svsk->sk_flags &
  136. ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) ))
  137. return;
  138. if (test_bit(SK_DEAD, &svsk->sk_flags))
  139. return;
  140. spin_lock_bh(&serv->sv_lock);
  141. if (!list_empty(&serv->sv_threads) &&
  142. !list_empty(&serv->sv_sockets))
  143. printk(KERN_ERR
  144. "svc_sock_enqueue: threads and sockets both waiting??\n");
  145. if (test_bit(SK_DEAD, &svsk->sk_flags)) {
  146. /* Don't enqueue dead sockets */
  147. dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk);
  148. goto out_unlock;
  149. }
  150. /* Mark socket as busy. It will remain in this state until the
  151. * server has processed all pending data and put the socket back
  152. * on the idle list. We update SK_BUSY atomically because
  153. * it also guards against trying to enqueue the svc_sock twice.
  154. */
  155. if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) {
  156. /* Don't enqueue socket while already enqueued */
  157. dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk);
  158. goto out_unlock;
  159. }
  160. set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
  161. if (((atomic_read(&svsk->sk_reserved) + serv->sv_bufsz)*2
  162. > svc_sock_wspace(svsk))
  163. && !test_bit(SK_CLOSE, &svsk->sk_flags)
  164. && !test_bit(SK_CONN, &svsk->sk_flags)) {
  165. /* Don't enqueue while not enough space for reply */
  166. dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n",
  167. svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_bufsz,
  168. svc_sock_wspace(svsk));
  169. clear_bit(SK_BUSY, &svsk->sk_flags);
  170. goto out_unlock;
  171. }
  172. clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
  173. if (!list_empty(&serv->sv_threads)) {
  174. rqstp = list_entry(serv->sv_threads.next,
  175. struct svc_rqst,
  176. rq_list);
  177. dprintk("svc: socket %p served by daemon %p\n",
  178. svsk->sk_sk, rqstp);
  179. svc_serv_dequeue(serv, rqstp);
  180. if (rqstp->rq_sock)
  181. printk(KERN_ERR
  182. "svc_sock_enqueue: server %p, rq_sock=%p!\n",
  183. rqstp, rqstp->rq_sock);
  184. rqstp->rq_sock = svsk;
  185. atomic_inc(&svsk->sk_inuse);
  186. rqstp->rq_reserved = serv->sv_bufsz;
  187. atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
  188. wake_up(&rqstp->rq_wait);
  189. } else {
  190. dprintk("svc: socket %p put into queue\n", svsk->sk_sk);
  191. list_add_tail(&svsk->sk_ready, &serv->sv_sockets);
  192. }
  193. out_unlock:
  194. spin_unlock_bh(&serv->sv_lock);
  195. }
  196. /*
  197. * Dequeue the first socket. Must be called with the serv->sv_lock held.
  198. */
  199. static inline struct svc_sock *
  200. svc_sock_dequeue(struct svc_serv *serv)
  201. {
  202. struct svc_sock *svsk;
  203. if (list_empty(&serv->sv_sockets))
  204. return NULL;
  205. svsk = list_entry(serv->sv_sockets.next,
  206. struct svc_sock, sk_ready);
  207. list_del_init(&svsk->sk_ready);
  208. dprintk("svc: socket %p dequeued, inuse=%d\n",
  209. svsk->sk_sk, atomic_read(&svsk->sk_inuse));
  210. return svsk;
  211. }
  212. /*
  213. * Having read something from a socket, check whether it
  214. * needs to be re-enqueued.
  215. * Note: SK_DATA only gets cleared when a read-attempt finds
  216. * no (or insufficient) data.
  217. */
  218. static inline void
  219. svc_sock_received(struct svc_sock *svsk)
  220. {
  221. clear_bit(SK_BUSY, &svsk->sk_flags);
  222. svc_sock_enqueue(svsk);
  223. }
  224. /**
  225. * svc_reserve - change the space reserved for the reply to a request.
  226. * @rqstp: The request in question
  227. * @space: new max space to reserve
  228. *
  229. * Each request reserves some space on the output queue of the socket
  230. * to make sure the reply fits. This function reduces that reserved
  231. * space to be the amount of space used already, plus @space.
  232. *
  233. */
  234. void svc_reserve(struct svc_rqst *rqstp, int space)
  235. {
  236. space += rqstp->rq_res.head[0].iov_len;
  237. if (space < rqstp->rq_reserved) {
  238. struct svc_sock *svsk = rqstp->rq_sock;
  239. atomic_sub((rqstp->rq_reserved - space), &svsk->sk_reserved);
  240. rqstp->rq_reserved = space;
  241. svc_sock_enqueue(svsk);
  242. }
  243. }
  244. /*
  245. * Release a socket after use.
  246. */
  247. static inline void
  248. svc_sock_put(struct svc_sock *svsk)
  249. {
  250. if (atomic_dec_and_test(&svsk->sk_inuse) && test_bit(SK_DEAD, &svsk->sk_flags)) {
  251. dprintk("svc: releasing dead socket\n");
  252. sock_release(svsk->sk_sock);
  253. kfree(svsk);
  254. }
  255. }
  256. static void
  257. svc_sock_release(struct svc_rqst *rqstp)
  258. {
  259. struct svc_sock *svsk = rqstp->rq_sock;
  260. svc_release_skb(rqstp);
  261. svc_free_allpages(rqstp);
  262. rqstp->rq_res.page_len = 0;
  263. rqstp->rq_res.page_base = 0;
  264. /* Reset response buffer and release
  265. * the reservation.
  266. * But first, check that enough space was reserved
  267. * for the reply, otherwise we have a bug!
  268. */
  269. if ((rqstp->rq_res.len) > rqstp->rq_reserved)
  270. printk(KERN_ERR "RPC request reserved %d but used %d\n",
  271. rqstp->rq_reserved,
  272. rqstp->rq_res.len);
  273. rqstp->rq_res.head[0].iov_len = 0;
  274. svc_reserve(rqstp, 0);
  275. rqstp->rq_sock = NULL;
  276. svc_sock_put(svsk);
  277. }
  278. /*
  279. * External function to wake up a server waiting for data
  280. */
  281. void
  282. svc_wake_up(struct svc_serv *serv)
  283. {
  284. struct svc_rqst *rqstp;
  285. spin_lock_bh(&serv->sv_lock);
  286. if (!list_empty(&serv->sv_threads)) {
  287. rqstp = list_entry(serv->sv_threads.next,
  288. struct svc_rqst,
  289. rq_list);
  290. dprintk("svc: daemon %p woken up.\n", rqstp);
  291. /*
  292. svc_serv_dequeue(serv, rqstp);
  293. rqstp->rq_sock = NULL;
  294. */
  295. wake_up(&rqstp->rq_wait);
  296. }
  297. spin_unlock_bh(&serv->sv_lock);
  298. }
  299. /*
  300. * Generic sendto routine
  301. */
  302. static int
  303. svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
  304. {
  305. struct svc_sock *svsk = rqstp->rq_sock;
  306. struct socket *sock = svsk->sk_sock;
  307. int slen;
  308. char buffer[CMSG_SPACE(sizeof(struct in_pktinfo))];
  309. struct cmsghdr *cmh = (struct cmsghdr *)buffer;
  310. struct in_pktinfo *pki = (struct in_pktinfo *)CMSG_DATA(cmh);
  311. int len = 0;
  312. int result;
  313. int size;
  314. struct page **ppage = xdr->pages;
  315. size_t base = xdr->page_base;
  316. unsigned int pglen = xdr->page_len;
  317. unsigned int flags = MSG_MORE;
  318. slen = xdr->len;
  319. if (rqstp->rq_prot == IPPROTO_UDP) {
  320. /* set the source and destination */
  321. struct msghdr msg;
  322. msg.msg_name = &rqstp->rq_addr;
  323. msg.msg_namelen = sizeof(rqstp->rq_addr);
  324. msg.msg_iov = NULL;
  325. msg.msg_iovlen = 0;
  326. msg.msg_flags = MSG_MORE;
  327. msg.msg_control = cmh;
  328. msg.msg_controllen = sizeof(buffer);
  329. cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
  330. cmh->cmsg_level = SOL_IP;
  331. cmh->cmsg_type = IP_PKTINFO;
  332. pki->ipi_ifindex = 0;
  333. pki->ipi_spec_dst.s_addr = rqstp->rq_daddr;
  334. if (sock_sendmsg(sock, &msg, 0) < 0)
  335. goto out;
  336. }
  337. /* send head */
  338. if (slen == xdr->head[0].iov_len)
  339. flags = 0;
  340. len = kernel_sendpage(sock, rqstp->rq_respages[0], 0, xdr->head[0].iov_len, flags);
  341. if (len != xdr->head[0].iov_len)
  342. goto out;
  343. slen -= xdr->head[0].iov_len;
  344. if (slen == 0)
  345. goto out;
  346. /* send page data */
  347. size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen;
  348. while (pglen > 0) {
  349. if (slen == size)
  350. flags = 0;
  351. result = kernel_sendpage(sock, *ppage, base, size, flags);
  352. if (result > 0)
  353. len += result;
  354. if (result != size)
  355. goto out;
  356. slen -= size;
  357. pglen -= size;
  358. size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen;
  359. base = 0;
  360. ppage++;
  361. }
  362. /* send tail */
  363. if (xdr->tail[0].iov_len) {
  364. result = kernel_sendpage(sock, rqstp->rq_respages[rqstp->rq_restailpage],
  365. ((unsigned long)xdr->tail[0].iov_base)& (PAGE_SIZE-1),
  366. xdr->tail[0].iov_len, 0);
  367. if (result > 0)
  368. len += result;
  369. }
  370. out:
  371. dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %x)\n",
  372. rqstp->rq_sock, xdr->head[0].iov_base, xdr->head[0].iov_len, xdr->len, len,
  373. rqstp->rq_addr.sin_addr.s_addr);
  374. return len;
  375. }
  376. /*
  377. * Report socket names for nfsdfs
  378. */
  379. static int one_sock_name(char *buf, struct svc_sock *svsk)
  380. {
  381. int len;
  382. switch(svsk->sk_sk->sk_family) {
  383. case AF_INET:
  384. len = sprintf(buf, "ipv4 %s %u.%u.%u.%u %d\n",
  385. svsk->sk_sk->sk_protocol==IPPROTO_UDP?
  386. "udp" : "tcp",
  387. NIPQUAD(inet_sk(svsk->sk_sk)->rcv_saddr),
  388. inet_sk(svsk->sk_sk)->num);
  389. break;
  390. default:
  391. len = sprintf(buf, "*unknown-%d*\n",
  392. svsk->sk_sk->sk_family);
  393. }
  394. return len;
  395. }
  396. int
  397. svc_sock_names(char *buf, struct svc_serv *serv, char *toclose)
  398. {
  399. struct svc_sock *svsk, *closesk = NULL;
  400. int len = 0;
  401. if (!serv)
  402. return 0;
  403. spin_lock(&serv->sv_lock);
  404. list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) {
  405. int onelen = one_sock_name(buf+len, svsk);
  406. if (toclose && strcmp(toclose, buf+len) == 0)
  407. closesk = svsk;
  408. else
  409. len += onelen;
  410. }
  411. spin_unlock(&serv->sv_lock);
  412. if (closesk)
  413. svc_delete_socket(closesk);
  414. return len;
  415. }
  416. EXPORT_SYMBOL(svc_sock_names);
  417. /*
  418. * Check input queue length
  419. */
  420. static int
  421. svc_recv_available(struct svc_sock *svsk)
  422. {
  423. struct socket *sock = svsk->sk_sock;
  424. int avail, err;
  425. err = kernel_sock_ioctl(sock, TIOCINQ, (unsigned long) &avail);
  426. return (err >= 0)? avail : err;
  427. }
  428. /*
  429. * Generic recvfrom routine.
  430. */
  431. static int
  432. svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen)
  433. {
  434. struct msghdr msg;
  435. struct socket *sock;
  436. int len, alen;
  437. rqstp->rq_addrlen = sizeof(rqstp->rq_addr);
  438. sock = rqstp->rq_sock->sk_sock;
  439. msg.msg_name = &rqstp->rq_addr;
  440. msg.msg_namelen = sizeof(rqstp->rq_addr);
  441. msg.msg_control = NULL;
  442. msg.msg_controllen = 0;
  443. msg.msg_flags = MSG_DONTWAIT;
  444. len = kernel_recvmsg(sock, &msg, iov, nr, buflen, MSG_DONTWAIT);
  445. /* sock_recvmsg doesn't fill in the name/namelen, so we must..
  446. * possibly we should cache this in the svc_sock structure
  447. * at accept time. FIXME
  448. */
  449. alen = sizeof(rqstp->rq_addr);
  450. kernel_getpeername(sock, (struct sockaddr *)&rqstp->rq_addr, &alen);
  451. dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n",
  452. rqstp->rq_sock, iov[0].iov_base, iov[0].iov_len, len);
  453. return len;
  454. }
  455. /*
  456. * Set socket snd and rcv buffer lengths
  457. */
  458. static inline void
  459. svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv)
  460. {
  461. #if 0
  462. mm_segment_t oldfs;
  463. oldfs = get_fs(); set_fs(KERNEL_DS);
  464. sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
  465. (char*)&snd, sizeof(snd));
  466. sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF,
  467. (char*)&rcv, sizeof(rcv));
  468. #else
  469. /* sock_setsockopt limits use to sysctl_?mem_max,
  470. * which isn't acceptable. Until that is made conditional
  471. * on not having CAP_SYS_RESOURCE or similar, we go direct...
  472. * DaveM said I could!
  473. */
  474. lock_sock(sock->sk);
  475. sock->sk->sk_sndbuf = snd * 2;
  476. sock->sk->sk_rcvbuf = rcv * 2;
  477. sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
  478. release_sock(sock->sk);
  479. #endif
  480. }
  481. /*
  482. * INET callback when data has been received on the socket.
  483. */
  484. static void
  485. svc_udp_data_ready(struct sock *sk, int count)
  486. {
  487. struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
  488. if (svsk) {
  489. dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
  490. svsk, sk, count, test_bit(SK_BUSY, &svsk->sk_flags));
  491. set_bit(SK_DATA, &svsk->sk_flags);
  492. svc_sock_enqueue(svsk);
  493. }
  494. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  495. wake_up_interruptible(sk->sk_sleep);
  496. }
  497. /*
  498. * INET callback when space is newly available on the socket.
  499. */
  500. static void
  501. svc_write_space(struct sock *sk)
  502. {
  503. struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data);
  504. if (svsk) {
  505. dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
  506. svsk, sk, test_bit(SK_BUSY, &svsk->sk_flags));
  507. svc_sock_enqueue(svsk);
  508. }
  509. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) {
  510. dprintk("RPC svc_write_space: someone sleeping on %p\n",
  511. svsk);
  512. wake_up_interruptible(sk->sk_sleep);
  513. }
  514. }
  515. /*
  516. * Receive a datagram from a UDP socket.
  517. */
  518. static int
  519. svc_udp_recvfrom(struct svc_rqst *rqstp)
  520. {
  521. struct svc_sock *svsk = rqstp->rq_sock;
  522. struct svc_serv *serv = svsk->sk_server;
  523. struct sk_buff *skb;
  524. int err, len;
  525. if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
  526. /* udp sockets need large rcvbuf as all pending
  527. * requests are still in that buffer. sndbuf must
  528. * also be large enough that there is enough space
  529. * for one reply per thread.
  530. */
  531. svc_sock_setbufsize(svsk->sk_sock,
  532. (serv->sv_nrthreads+3) * serv->sv_bufsz,
  533. (serv->sv_nrthreads+3) * serv->sv_bufsz);
  534. if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
  535. svc_sock_received(svsk);
  536. return svc_deferred_recv(rqstp);
  537. }
  538. clear_bit(SK_DATA, &svsk->sk_flags);
  539. while ((skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) {
  540. if (err == -EAGAIN) {
  541. svc_sock_received(svsk);
  542. return err;
  543. }
  544. /* possibly an icmp error */
  545. dprintk("svc: recvfrom returned error %d\n", -err);
  546. }
  547. if (skb->tstamp.off_sec == 0) {
  548. struct timeval tv;
  549. tv.tv_sec = xtime.tv_sec;
  550. tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
  551. skb_set_timestamp(skb, &tv);
  552. /* Don't enable netstamp, sunrpc doesn't
  553. need that much accuracy */
  554. }
  555. skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp);
  556. set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */
  557. /*
  558. * Maybe more packets - kick another thread ASAP.
  559. */
  560. svc_sock_received(svsk);
  561. len = skb->len - sizeof(struct udphdr);
  562. rqstp->rq_arg.len = len;
  563. rqstp->rq_prot = IPPROTO_UDP;
  564. /* Get sender address */
  565. rqstp->rq_addr.sin_family = AF_INET;
  566. rqstp->rq_addr.sin_port = skb->h.uh->source;
  567. rqstp->rq_addr.sin_addr.s_addr = skb->nh.iph->saddr;
  568. rqstp->rq_daddr = skb->nh.iph->daddr;
  569. if (skb_is_nonlinear(skb)) {
  570. /* we have to copy */
  571. local_bh_disable();
  572. if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
  573. local_bh_enable();
  574. /* checksum error */
  575. skb_free_datagram(svsk->sk_sk, skb);
  576. return 0;
  577. }
  578. local_bh_enable();
  579. skb_free_datagram(svsk->sk_sk, skb);
  580. } else {
  581. /* we can use it in-place */
  582. rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr);
  583. rqstp->rq_arg.head[0].iov_len = len;
  584. if (skb_checksum_complete(skb)) {
  585. skb_free_datagram(svsk->sk_sk, skb);
  586. return 0;
  587. }
  588. rqstp->rq_skbuff = skb;
  589. }
  590. rqstp->rq_arg.page_base = 0;
  591. if (len <= rqstp->rq_arg.head[0].iov_len) {
  592. rqstp->rq_arg.head[0].iov_len = len;
  593. rqstp->rq_arg.page_len = 0;
  594. } else {
  595. rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
  596. rqstp->rq_argused += (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE;
  597. }
  598. if (serv->sv_stats)
  599. serv->sv_stats->netudpcnt++;
  600. return len;
  601. }
  602. static int
  603. svc_udp_sendto(struct svc_rqst *rqstp)
  604. {
  605. int error;
  606. error = svc_sendto(rqstp, &rqstp->rq_res);
  607. if (error == -ECONNREFUSED)
  608. /* ICMP error on earlier request. */
  609. error = svc_sendto(rqstp, &rqstp->rq_res);
  610. return error;
  611. }
  612. static void
  613. svc_udp_init(struct svc_sock *svsk)
  614. {
  615. svsk->sk_sk->sk_data_ready = svc_udp_data_ready;
  616. svsk->sk_sk->sk_write_space = svc_write_space;
  617. svsk->sk_recvfrom = svc_udp_recvfrom;
  618. svsk->sk_sendto = svc_udp_sendto;
  619. /* initialise setting must have enough space to
  620. * receive and respond to one request.
  621. * svc_udp_recvfrom will re-adjust if necessary
  622. */
  623. svc_sock_setbufsize(svsk->sk_sock,
  624. 3 * svsk->sk_server->sv_bufsz,
  625. 3 * svsk->sk_server->sv_bufsz);
  626. set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */
  627. set_bit(SK_CHNGBUF, &svsk->sk_flags);
  628. }
  629. /*
  630. * A data_ready event on a listening socket means there's a connection
  631. * pending. Do not use state_change as a substitute for it.
  632. */
  633. static void
  634. svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
  635. {
  636. struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
  637. dprintk("svc: socket %p TCP (listen) state change %d\n",
  638. sk, sk->sk_state);
  639. /*
  640. * This callback may called twice when a new connection
  641. * is established as a child socket inherits everything
  642. * from a parent LISTEN socket.
  643. * 1) data_ready method of the parent socket will be called
  644. * when one of child sockets become ESTABLISHED.
  645. * 2) data_ready method of the child socket may be called
  646. * when it receives data before the socket is accepted.
  647. * In case of 2, we should ignore it silently.
  648. */
  649. if (sk->sk_state == TCP_LISTEN) {
  650. if (svsk) {
  651. set_bit(SK_CONN, &svsk->sk_flags);
  652. svc_sock_enqueue(svsk);
  653. } else
  654. printk("svc: socket %p: no user data\n", sk);
  655. }
  656. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  657. wake_up_interruptible_all(sk->sk_sleep);
  658. }
  659. /*
  660. * A state change on a connected socket means it's dying or dead.
  661. */
  662. static void
  663. svc_tcp_state_change(struct sock *sk)
  664. {
  665. struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
  666. dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
  667. sk, sk->sk_state, sk->sk_user_data);
  668. if (!svsk)
  669. printk("svc: socket %p: no user data\n", sk);
  670. else {
  671. set_bit(SK_CLOSE, &svsk->sk_flags);
  672. svc_sock_enqueue(svsk);
  673. }
  674. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  675. wake_up_interruptible_all(sk->sk_sleep);
  676. }
  677. static void
  678. svc_tcp_data_ready(struct sock *sk, int count)
  679. {
  680. struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
  681. dprintk("svc: socket %p TCP data ready (svsk %p)\n",
  682. sk, sk->sk_user_data);
  683. if (svsk) {
  684. set_bit(SK_DATA, &svsk->sk_flags);
  685. svc_sock_enqueue(svsk);
  686. }
  687. if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
  688. wake_up_interruptible(sk->sk_sleep);
  689. }
  690. /*
  691. * Accept a TCP connection
  692. */
  693. static void
  694. svc_tcp_accept(struct svc_sock *svsk)
  695. {
  696. struct sockaddr_in sin;
  697. struct svc_serv *serv = svsk->sk_server;
  698. struct socket *sock = svsk->sk_sock;
  699. struct socket *newsock;
  700. struct svc_sock *newsvsk;
  701. int err, slen;
  702. dprintk("svc: tcp_accept %p sock %p\n", svsk, sock);
  703. if (!sock)
  704. return;
  705. clear_bit(SK_CONN, &svsk->sk_flags);
  706. err = kernel_accept(sock, &newsock, O_NONBLOCK);
  707. if (err < 0) {
  708. if (err == -ENOMEM)
  709. printk(KERN_WARNING "%s: no more sockets!\n",
  710. serv->sv_name);
  711. else if (err != -EAGAIN && net_ratelimit())
  712. printk(KERN_WARNING "%s: accept failed (err %d)!\n",
  713. serv->sv_name, -err);
  714. return;
  715. }
  716. set_bit(SK_CONN, &svsk->sk_flags);
  717. svc_sock_enqueue(svsk);
  718. slen = sizeof(sin);
  719. err = kernel_getpeername(newsock, (struct sockaddr *) &sin, &slen);
  720. if (err < 0) {
  721. if (net_ratelimit())
  722. printk(KERN_WARNING "%s: peername failed (err %d)!\n",
  723. serv->sv_name, -err);
  724. goto failed; /* aborted connection or whatever */
  725. }
  726. /* Ideally, we would want to reject connections from unauthorized
  727. * hosts here, but when we get encription, the IP of the host won't
  728. * tell us anything. For now just warn about unpriv connections.
  729. */
  730. if (ntohs(sin.sin_port) >= 1024) {
  731. dprintk(KERN_WARNING
  732. "%s: connect from unprivileged port: %u.%u.%u.%u:%d\n",
  733. serv->sv_name,
  734. NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
  735. }
  736. dprintk("%s: connect from %u.%u.%u.%u:%04x\n", serv->sv_name,
  737. NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
  738. /* make sure that a write doesn't block forever when
  739. * low on memory
  740. */
  741. newsock->sk->sk_sndtimeo = HZ*30;
  742. if (!(newsvsk = svc_setup_socket(serv, newsock, &err, 0)))
  743. goto failed;
  744. /* make sure that we don't have too many active connections.
  745. * If we have, something must be dropped.
  746. *
  747. * There's no point in trying to do random drop here for
  748. * DoS prevention. The NFS clients does 1 reconnect in 15
  749. * seconds. An attacker can easily beat that.
  750. *
  751. * The only somewhat efficient mechanism would be if drop
  752. * old connections from the same IP first. But right now
  753. * we don't even record the client IP in svc_sock.
  754. */
  755. if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) {
  756. struct svc_sock *svsk = NULL;
  757. spin_lock_bh(&serv->sv_lock);
  758. if (!list_empty(&serv->sv_tempsocks)) {
  759. if (net_ratelimit()) {
  760. /* Try to help the admin */
  761. printk(KERN_NOTICE "%s: too many open TCP "
  762. "sockets, consider increasing the "
  763. "number of nfsd threads\n",
  764. serv->sv_name);
  765. printk(KERN_NOTICE "%s: last TCP connect from "
  766. "%u.%u.%u.%u:%d\n",
  767. serv->sv_name,
  768. NIPQUAD(sin.sin_addr.s_addr),
  769. ntohs(sin.sin_port));
  770. }
  771. /*
  772. * Always select the oldest socket. It's not fair,
  773. * but so is life
  774. */
  775. svsk = list_entry(serv->sv_tempsocks.prev,
  776. struct svc_sock,
  777. sk_list);
  778. set_bit(SK_CLOSE, &svsk->sk_flags);
  779. atomic_inc(&svsk->sk_inuse);
  780. }
  781. spin_unlock_bh(&serv->sv_lock);
  782. if (svsk) {
  783. svc_sock_enqueue(svsk);
  784. svc_sock_put(svsk);
  785. }
  786. }
  787. if (serv->sv_stats)
  788. serv->sv_stats->nettcpconn++;
  789. return;
  790. failed:
  791. sock_release(newsock);
  792. return;
  793. }
  794. /*
  795. * Receive data from a TCP socket.
  796. */
  797. static int
  798. svc_tcp_recvfrom(struct svc_rqst *rqstp)
  799. {
  800. struct svc_sock *svsk = rqstp->rq_sock;
  801. struct svc_serv *serv = svsk->sk_server;
  802. int len;
  803. struct kvec vec[RPCSVC_MAXPAGES];
  804. int pnum, vlen;
  805. dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
  806. svsk, test_bit(SK_DATA, &svsk->sk_flags),
  807. test_bit(SK_CONN, &svsk->sk_flags),
  808. test_bit(SK_CLOSE, &svsk->sk_flags));
  809. if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
  810. svc_sock_received(svsk);
  811. return svc_deferred_recv(rqstp);
  812. }
  813. if (test_bit(SK_CLOSE, &svsk->sk_flags)) {
  814. svc_delete_socket(svsk);
  815. return 0;
  816. }
  817. if (test_bit(SK_CONN, &svsk->sk_flags)) {
  818. svc_tcp_accept(svsk);
  819. svc_sock_received(svsk);
  820. return 0;
  821. }
  822. if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags))
  823. /* sndbuf needs to have room for one request
  824. * per thread, otherwise we can stall even when the
  825. * network isn't a bottleneck.
  826. * rcvbuf just needs to be able to hold a few requests.
  827. * Normally they will be removed from the queue
  828. * as soon a a complete request arrives.
  829. */
  830. svc_sock_setbufsize(svsk->sk_sock,
  831. (serv->sv_nrthreads+3) * serv->sv_bufsz,
  832. 3 * serv->sv_bufsz);
  833. clear_bit(SK_DATA, &svsk->sk_flags);
  834. /* Receive data. If we haven't got the record length yet, get
  835. * the next four bytes. Otherwise try to gobble up as much as
  836. * possible up to the complete record length.
  837. */
  838. if (svsk->sk_tcplen < 4) {
  839. unsigned long want = 4 - svsk->sk_tcplen;
  840. struct kvec iov;
  841. iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
  842. iov.iov_len = want;
  843. if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0)
  844. goto error;
  845. svsk->sk_tcplen += len;
  846. if (len < want) {
  847. dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
  848. len, want);
  849. svc_sock_received(svsk);
  850. return -EAGAIN; /* record header not complete */
  851. }
  852. svsk->sk_reclen = ntohl(svsk->sk_reclen);
  853. if (!(svsk->sk_reclen & 0x80000000)) {
  854. /* FIXME: technically, a record can be fragmented,
  855. * and non-terminal fragments will not have the top
  856. * bit set in the fragment length header.
  857. * But apparently no known nfs clients send fragmented
  858. * records. */
  859. printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (non-terminal)\n",
  860. (unsigned long) svsk->sk_reclen);
  861. goto err_delete;
  862. }
  863. svsk->sk_reclen &= 0x7fffffff;
  864. dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
  865. if (svsk->sk_reclen > serv->sv_bufsz) {
  866. printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (large)\n",
  867. (unsigned long) svsk->sk_reclen);
  868. goto err_delete;
  869. }
  870. }
  871. /* Check whether enough data is available */
  872. len = svc_recv_available(svsk);
  873. if (len < 0)
  874. goto error;
  875. if (len < svsk->sk_reclen) {
  876. dprintk("svc: incomplete TCP record (%d of %d)\n",
  877. len, svsk->sk_reclen);
  878. svc_sock_received(svsk);
  879. return -EAGAIN; /* record not complete */
  880. }
  881. len = svsk->sk_reclen;
  882. set_bit(SK_DATA, &svsk->sk_flags);
  883. vec[0] = rqstp->rq_arg.head[0];
  884. vlen = PAGE_SIZE;
  885. pnum = 1;
  886. while (vlen < len) {
  887. vec[pnum].iov_base = page_address(rqstp->rq_argpages[rqstp->rq_argused++]);
  888. vec[pnum].iov_len = PAGE_SIZE;
  889. pnum++;
  890. vlen += PAGE_SIZE;
  891. }
  892. /* Now receive data */
  893. len = svc_recvfrom(rqstp, vec, pnum, len);
  894. if (len < 0)
  895. goto error;
  896. dprintk("svc: TCP complete record (%d bytes)\n", len);
  897. rqstp->rq_arg.len = len;
  898. rqstp->rq_arg.page_base = 0;
  899. if (len <= rqstp->rq_arg.head[0].iov_len) {
  900. rqstp->rq_arg.head[0].iov_len = len;
  901. rqstp->rq_arg.page_len = 0;
  902. } else {
  903. rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
  904. }
  905. rqstp->rq_skbuff = NULL;
  906. rqstp->rq_prot = IPPROTO_TCP;
  907. /* Reset TCP read info */
  908. svsk->sk_reclen = 0;
  909. svsk->sk_tcplen = 0;
  910. svc_sock_received(svsk);
  911. if (serv->sv_stats)
  912. serv->sv_stats->nettcpcnt++;
  913. return len;
  914. err_delete:
  915. svc_delete_socket(svsk);
  916. return -EAGAIN;
  917. error:
  918. if (len == -EAGAIN) {
  919. dprintk("RPC: TCP recvfrom got EAGAIN\n");
  920. svc_sock_received(svsk);
  921. } else {
  922. printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
  923. svsk->sk_server->sv_name, -len);
  924. goto err_delete;
  925. }
  926. return len;
  927. }
  928. /*
  929. * Send out data on TCP socket.
  930. */
  931. static int
  932. svc_tcp_sendto(struct svc_rqst *rqstp)
  933. {
  934. struct xdr_buf *xbufp = &rqstp->rq_res;
  935. int sent;
  936. __be32 reclen;
  937. /* Set up the first element of the reply kvec.
  938. * Any other kvecs that may be in use have been taken
  939. * care of by the server implementation itself.
  940. */
  941. reclen = htonl(0x80000000|((xbufp->len ) - 4));
  942. memcpy(xbufp->head[0].iov_base, &reclen, 4);
  943. if (test_bit(SK_DEAD, &rqstp->rq_sock->sk_flags))
  944. return -ENOTCONN;
  945. sent = svc_sendto(rqstp, &rqstp->rq_res);
  946. if (sent != xbufp->len) {
  947. printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
  948. rqstp->rq_sock->sk_server->sv_name,
  949. (sent<0)?"got error":"sent only",
  950. sent, xbufp->len);
  951. svc_delete_socket(rqstp->rq_sock);
  952. sent = -EAGAIN;
  953. }
  954. return sent;
  955. }
  956. static void
  957. svc_tcp_init(struct svc_sock *svsk)
  958. {
  959. struct sock *sk = svsk->sk_sk;
  960. struct tcp_sock *tp = tcp_sk(sk);
  961. svsk->sk_recvfrom = svc_tcp_recvfrom;
  962. svsk->sk_sendto = svc_tcp_sendto;
  963. if (sk->sk_state == TCP_LISTEN) {
  964. dprintk("setting up TCP socket for listening\n");
  965. sk->sk_data_ready = svc_tcp_listen_data_ready;
  966. set_bit(SK_CONN, &svsk->sk_flags);
  967. } else {
  968. dprintk("setting up TCP socket for reading\n");
  969. sk->sk_state_change = svc_tcp_state_change;
  970. sk->sk_data_ready = svc_tcp_data_ready;
  971. sk->sk_write_space = svc_write_space;
  972. svsk->sk_reclen = 0;
  973. svsk->sk_tcplen = 0;
  974. tp->nonagle = 1; /* disable Nagle's algorithm */
  975. /* initialise setting must have enough space to
  976. * receive and respond to one request.
  977. * svc_tcp_recvfrom will re-adjust if necessary
  978. */
  979. svc_sock_setbufsize(svsk->sk_sock,
  980. 3 * svsk->sk_server->sv_bufsz,
  981. 3 * svsk->sk_server->sv_bufsz);
  982. set_bit(SK_CHNGBUF, &svsk->sk_flags);
  983. set_bit(SK_DATA, &svsk->sk_flags);
  984. if (sk->sk_state != TCP_ESTABLISHED)
  985. set_bit(SK_CLOSE, &svsk->sk_flags);
  986. }
  987. }
  988. void
  989. svc_sock_update_bufs(struct svc_serv *serv)
  990. {
  991. /*
  992. * The number of server threads has changed. Update
  993. * rcvbuf and sndbuf accordingly on all sockets
  994. */
  995. struct list_head *le;
  996. spin_lock_bh(&serv->sv_lock);
  997. list_for_each(le, &serv->sv_permsocks) {
  998. struct svc_sock *svsk =
  999. list_entry(le, struct svc_sock, sk_list);
  1000. set_bit(SK_CHNGBUF, &svsk->sk_flags);
  1001. }
  1002. list_for_each(le, &serv->sv_tempsocks) {
  1003. struct svc_sock *svsk =
  1004. list_entry(le, struct svc_sock, sk_list);
  1005. set_bit(SK_CHNGBUF, &svsk->sk_flags);
  1006. }
  1007. spin_unlock_bh(&serv->sv_lock);
  1008. }
  1009. /*
  1010. * Receive the next request on any socket.
  1011. */
  1012. int
  1013. svc_recv(struct svc_rqst *rqstp, long timeout)
  1014. {
  1015. struct svc_sock *svsk =NULL;
  1016. struct svc_serv *serv = rqstp->rq_server;
  1017. int len;
  1018. int pages;
  1019. struct xdr_buf *arg;
  1020. DECLARE_WAITQUEUE(wait, current);
  1021. dprintk("svc: server %p waiting for data (to = %ld)\n",
  1022. rqstp, timeout);
  1023. if (rqstp->rq_sock)
  1024. printk(KERN_ERR
  1025. "svc_recv: service %p, socket not NULL!\n",
  1026. rqstp);
  1027. if (waitqueue_active(&rqstp->rq_wait))
  1028. printk(KERN_ERR
  1029. "svc_recv: service %p, wait queue active!\n",
  1030. rqstp);
  1031. /* Initialize the buffers */
  1032. /* first reclaim pages that were moved to response list */
  1033. svc_pushback_allpages(rqstp);
  1034. /* now allocate needed pages. If we get a failure, sleep briefly */
  1035. pages = 2 + (serv->sv_bufsz + PAGE_SIZE -1) / PAGE_SIZE;
  1036. while (rqstp->rq_arghi < pages) {
  1037. struct page *p = alloc_page(GFP_KERNEL);
  1038. if (!p) {
  1039. schedule_timeout_uninterruptible(msecs_to_jiffies(500));
  1040. continue;
  1041. }
  1042. rqstp->rq_argpages[rqstp->rq_arghi++] = p;
  1043. }
  1044. /* Make arg->head point to first page and arg->pages point to rest */
  1045. arg = &rqstp->rq_arg;
  1046. arg->head[0].iov_base = page_address(rqstp->rq_argpages[0]);
  1047. arg->head[0].iov_len = PAGE_SIZE;
  1048. rqstp->rq_argused = 1;
  1049. arg->pages = rqstp->rq_argpages + 1;
  1050. arg->page_base = 0;
  1051. /* save at least one page for response */
  1052. arg->page_len = (pages-2)*PAGE_SIZE;
  1053. arg->len = (pages-1)*PAGE_SIZE;
  1054. arg->tail[0].iov_len = 0;
  1055. try_to_freeze();
  1056. cond_resched();
  1057. if (signalled())
  1058. return -EINTR;
  1059. spin_lock_bh(&serv->sv_lock);
  1060. if ((svsk = svc_sock_dequeue(serv)) != NULL) {
  1061. rqstp->rq_sock = svsk;
  1062. atomic_inc(&svsk->sk_inuse);
  1063. rqstp->rq_reserved = serv->sv_bufsz;
  1064. atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
  1065. } else {
  1066. /* No data pending. Go to sleep */
  1067. svc_serv_enqueue(serv, rqstp);
  1068. /*
  1069. * We have to be able to interrupt this wait
  1070. * to bring down the daemons ...
  1071. */
  1072. set_current_state(TASK_INTERRUPTIBLE);
  1073. add_wait_queue(&rqstp->rq_wait, &wait);
  1074. spin_unlock_bh(&serv->sv_lock);
  1075. schedule_timeout(timeout);
  1076. try_to_freeze();
  1077. spin_lock_bh(&serv->sv_lock);
  1078. remove_wait_queue(&rqstp->rq_wait, &wait);
  1079. if (!(svsk = rqstp->rq_sock)) {
  1080. svc_serv_dequeue(serv, rqstp);
  1081. spin_unlock_bh(&serv->sv_lock);
  1082. dprintk("svc: server %p, no data yet\n", rqstp);
  1083. return signalled()? -EINTR : -EAGAIN;
  1084. }
  1085. }
  1086. spin_unlock_bh(&serv->sv_lock);
  1087. dprintk("svc: server %p, socket %p, inuse=%d\n",
  1088. rqstp, svsk, atomic_read(&svsk->sk_inuse));
  1089. len = svsk->sk_recvfrom(rqstp);
  1090. dprintk("svc: got len=%d\n", len);
  1091. /* No data, incomplete (TCP) read, or accept() */
  1092. if (len == 0 || len == -EAGAIN) {
  1093. rqstp->rq_res.len = 0;
  1094. svc_sock_release(rqstp);
  1095. return -EAGAIN;
  1096. }
  1097. svsk->sk_lastrecv = get_seconds();
  1098. clear_bit(SK_OLD, &svsk->sk_flags);
  1099. rqstp->rq_secure = ntohs(rqstp->rq_addr.sin_port) < 1024;
  1100. rqstp->rq_chandle.defer = svc_defer;
  1101. if (serv->sv_stats)
  1102. serv->sv_stats->netcnt++;
  1103. return len;
  1104. }
  1105. /*
  1106. * Drop request
  1107. */
  1108. void
  1109. svc_drop(struct svc_rqst *rqstp)
  1110. {
  1111. dprintk("svc: socket %p dropped request\n", rqstp->rq_sock);
  1112. svc_sock_release(rqstp);
  1113. }
  1114. /*
  1115. * Return reply to client.
  1116. */
  1117. int
  1118. svc_send(struct svc_rqst *rqstp)
  1119. {
  1120. struct svc_sock *svsk;
  1121. int len;
  1122. struct xdr_buf *xb;
  1123. if ((svsk = rqstp->rq_sock) == NULL) {
  1124. printk(KERN_WARNING "NULL socket pointer in %s:%d\n",
  1125. __FILE__, __LINE__);
  1126. return -EFAULT;
  1127. }
  1128. /* release the receive skb before sending the reply */
  1129. svc_release_skb(rqstp);
  1130. /* calculate over-all length */
  1131. xb = & rqstp->rq_res;
  1132. xb->len = xb->head[0].iov_len +
  1133. xb->page_len +
  1134. xb->tail[0].iov_len;
  1135. /* Grab svsk->sk_mutex to serialize outgoing data. */
  1136. mutex_lock(&svsk->sk_mutex);
  1137. if (test_bit(SK_DEAD, &svsk->sk_flags))
  1138. len = -ENOTCONN;
  1139. else
  1140. len = svsk->sk_sendto(rqstp);
  1141. mutex_unlock(&svsk->sk_mutex);
  1142. svc_sock_release(rqstp);
  1143. if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
  1144. return 0;
  1145. return len;
  1146. }
  1147. /*
  1148. * Timer function to close old temporary sockets, using
  1149. * a mark-and-sweep algorithm.
  1150. */
  1151. static void
  1152. svc_age_temp_sockets(unsigned long closure)
  1153. {
  1154. struct svc_serv *serv = (struct svc_serv *)closure;
  1155. struct svc_sock *svsk;
  1156. struct list_head *le, *next;
  1157. LIST_HEAD(to_be_aged);
  1158. dprintk("svc_age_temp_sockets\n");
  1159. if (!spin_trylock_bh(&serv->sv_lock)) {
  1160. /* busy, try again 1 sec later */
  1161. dprintk("svc_age_temp_sockets: busy\n");
  1162. mod_timer(&serv->sv_temptimer, jiffies + HZ);
  1163. return;
  1164. }
  1165. list_for_each_safe(le, next, &serv->sv_tempsocks) {
  1166. svsk = list_entry(le, struct svc_sock, sk_list);
  1167. if (!test_and_set_bit(SK_OLD, &svsk->sk_flags))
  1168. continue;
  1169. if (atomic_read(&svsk->sk_inuse) || test_bit(SK_BUSY, &svsk->sk_flags))
  1170. continue;
  1171. atomic_inc(&svsk->sk_inuse);
  1172. list_move(le, &to_be_aged);
  1173. set_bit(SK_CLOSE, &svsk->sk_flags);
  1174. set_bit(SK_DETACHED, &svsk->sk_flags);
  1175. }
  1176. spin_unlock_bh(&serv->sv_lock);
  1177. while (!list_empty(&to_be_aged)) {
  1178. le = to_be_aged.next;
  1179. /* fiddling the sk_list node is safe 'cos we're SK_DETACHED */
  1180. list_del_init(le);
  1181. svsk = list_entry(le, struct svc_sock, sk_list);
  1182. dprintk("queuing svsk %p for closing, %lu seconds old\n",
  1183. svsk, get_seconds() - svsk->sk_lastrecv);
  1184. /* a thread will dequeue and close it soon */
  1185. svc_sock_enqueue(svsk);
  1186. svc_sock_put(svsk);
  1187. }
  1188. mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
  1189. }
  1190. /*
  1191. * Initialize socket for RPC use and create svc_sock struct
  1192. * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF.
  1193. */
  1194. static struct svc_sock *
  1195. svc_setup_socket(struct svc_serv *serv, struct socket *sock,
  1196. int *errp, int pmap_register)
  1197. {
  1198. struct svc_sock *svsk;
  1199. struct sock *inet;
  1200. dprintk("svc: svc_setup_socket %p\n", sock);
  1201. if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) {
  1202. *errp = -ENOMEM;
  1203. return NULL;
  1204. }
  1205. inet = sock->sk;
  1206. /* Register socket with portmapper */
  1207. if (*errp >= 0 && pmap_register)
  1208. *errp = svc_register(serv, inet->sk_protocol,
  1209. ntohs(inet_sk(inet)->sport));
  1210. if (*errp < 0) {
  1211. kfree(svsk);
  1212. return NULL;
  1213. }
  1214. set_bit(SK_BUSY, &svsk->sk_flags);
  1215. inet->sk_user_data = svsk;
  1216. svsk->sk_sock = sock;
  1217. svsk->sk_sk = inet;
  1218. svsk->sk_ostate = inet->sk_state_change;
  1219. svsk->sk_odata = inet->sk_data_ready;
  1220. svsk->sk_owspace = inet->sk_write_space;
  1221. svsk->sk_server = serv;
  1222. atomic_set(&svsk->sk_inuse, 0);
  1223. svsk->sk_lastrecv = get_seconds();
  1224. spin_lock_init(&svsk->sk_defer_lock);
  1225. INIT_LIST_HEAD(&svsk->sk_deferred);
  1226. INIT_LIST_HEAD(&svsk->sk_ready);
  1227. mutex_init(&svsk->sk_mutex);
  1228. /* Initialize the socket */
  1229. if (sock->type == SOCK_DGRAM)
  1230. svc_udp_init(svsk);
  1231. else
  1232. svc_tcp_init(svsk);
  1233. spin_lock_bh(&serv->sv_lock);
  1234. if (!pmap_register) {
  1235. set_bit(SK_TEMP, &svsk->sk_flags);
  1236. list_add(&svsk->sk_list, &serv->sv_tempsocks);
  1237. serv->sv_tmpcnt++;
  1238. if (serv->sv_temptimer.function == NULL) {
  1239. /* setup timer to age temp sockets */
  1240. setup_timer(&serv->sv_temptimer, svc_age_temp_sockets,
  1241. (unsigned long)serv);
  1242. mod_timer(&serv->sv_temptimer,
  1243. jiffies + svc_conn_age_period * HZ);
  1244. }
  1245. } else {
  1246. clear_bit(SK_TEMP, &svsk->sk_flags);
  1247. list_add(&svsk->sk_list, &serv->sv_permsocks);
  1248. }
  1249. spin_unlock_bh(&serv->sv_lock);
  1250. dprintk("svc: svc_setup_socket created %p (inet %p)\n",
  1251. svsk, svsk->sk_sk);
  1252. clear_bit(SK_BUSY, &svsk->sk_flags);
  1253. svc_sock_enqueue(svsk);
  1254. return svsk;
  1255. }
  1256. int svc_addsock(struct svc_serv *serv,
  1257. int fd,
  1258. char *name_return,
  1259. int *proto)
  1260. {
  1261. int err = 0;
  1262. struct socket *so = sockfd_lookup(fd, &err);
  1263. struct svc_sock *svsk = NULL;
  1264. if (!so)
  1265. return err;
  1266. if (so->sk->sk_family != AF_INET)
  1267. err = -EAFNOSUPPORT;
  1268. else if (so->sk->sk_protocol != IPPROTO_TCP &&
  1269. so->sk->sk_protocol != IPPROTO_UDP)
  1270. err = -EPROTONOSUPPORT;
  1271. else if (so->state > SS_UNCONNECTED)
  1272. err = -EISCONN;
  1273. else {
  1274. svsk = svc_setup_socket(serv, so, &err, 1);
  1275. if (svsk)
  1276. err = 0;
  1277. }
  1278. if (err) {
  1279. sockfd_put(so);
  1280. return err;
  1281. }
  1282. if (proto) *proto = so->sk->sk_protocol;
  1283. return one_sock_name(name_return, svsk);
  1284. }
  1285. EXPORT_SYMBOL_GPL(svc_addsock);
  1286. /*
  1287. * Create socket for RPC service.
  1288. */
  1289. static int
  1290. svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin)
  1291. {
  1292. struct svc_sock *svsk;
  1293. struct socket *sock;
  1294. int error;
  1295. int type;
  1296. dprintk("svc: svc_create_socket(%s, %d, %u.%u.%u.%u:%d)\n",
  1297. serv->sv_program->pg_name, protocol,
  1298. NIPQUAD(sin->sin_addr.s_addr),
  1299. ntohs(sin->sin_port));
  1300. if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) {
  1301. printk(KERN_WARNING "svc: only UDP and TCP "
  1302. "sockets supported\n");
  1303. return -EINVAL;
  1304. }
  1305. type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM;
  1306. if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0)
  1307. return error;
  1308. if (type == SOCK_STREAM)
  1309. sock->sk->sk_reuse = 1; /* allow address reuse */
  1310. error = kernel_bind(sock, (struct sockaddr *) sin,
  1311. sizeof(*sin));
  1312. if (error < 0)
  1313. goto bummer;
  1314. if (protocol == IPPROTO_TCP) {
  1315. if ((error = kernel_listen(sock, 64)) < 0)
  1316. goto bummer;
  1317. }
  1318. if ((svsk = svc_setup_socket(serv, sock, &error, 1)) != NULL)
  1319. return 0;
  1320. bummer:
  1321. dprintk("svc: svc_create_socket error = %d\n", -error);
  1322. sock_release(sock);
  1323. return error;
  1324. }
  1325. /*
  1326. * Remove a dead socket
  1327. */
  1328. void
  1329. svc_delete_socket(struct svc_sock *svsk)
  1330. {
  1331. struct svc_serv *serv;
  1332. struct sock *sk;
  1333. dprintk("svc: svc_delete_socket(%p)\n", svsk);
  1334. serv = svsk->sk_server;
  1335. sk = svsk->sk_sk;
  1336. sk->sk_state_change = svsk->sk_ostate;
  1337. sk->sk_data_ready = svsk->sk_odata;
  1338. sk->sk_write_space = svsk->sk_owspace;
  1339. spin_lock_bh(&serv->sv_lock);
  1340. if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags))
  1341. list_del_init(&svsk->sk_list);
  1342. list_del_init(&svsk->sk_ready);
  1343. if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags))
  1344. if (test_bit(SK_TEMP, &svsk->sk_flags))
  1345. serv->sv_tmpcnt--;
  1346. if (!atomic_read(&svsk->sk_inuse)) {
  1347. spin_unlock_bh(&serv->sv_lock);
  1348. if (svsk->sk_sock->file)
  1349. sockfd_put(svsk->sk_sock);
  1350. else
  1351. sock_release(svsk->sk_sock);
  1352. kfree(svsk);
  1353. } else {
  1354. spin_unlock_bh(&serv->sv_lock);
  1355. dprintk(KERN_NOTICE "svc: server socket destroy delayed\n");
  1356. /* svsk->sk_server = NULL; */
  1357. }
  1358. }
  1359. /*
  1360. * Make a socket for nfsd and lockd
  1361. */
  1362. int
  1363. svc_makesock(struct svc_serv *serv, int protocol, unsigned short port)
  1364. {
  1365. struct sockaddr_in sin;
  1366. dprintk("svc: creating socket proto = %d\n", protocol);
  1367. sin.sin_family = AF_INET;
  1368. sin.sin_addr.s_addr = INADDR_ANY;
  1369. sin.sin_port = htons(port);
  1370. return svc_create_socket(serv, protocol, &sin);
  1371. }
  1372. /*
  1373. * Handle defer and revisit of requests
  1374. */
  1375. static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
  1376. {
  1377. struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle);
  1378. struct svc_sock *svsk;
  1379. if (too_many) {
  1380. svc_sock_put(dr->svsk);
  1381. kfree(dr);
  1382. return;
  1383. }
  1384. dprintk("revisit queued\n");
  1385. svsk = dr->svsk;
  1386. dr->svsk = NULL;
  1387. spin_lock_bh(&svsk->sk_defer_lock);
  1388. list_add(&dr->handle.recent, &svsk->sk_deferred);
  1389. spin_unlock_bh(&svsk->sk_defer_lock);
  1390. set_bit(SK_DEFERRED, &svsk->sk_flags);
  1391. svc_sock_enqueue(svsk);
  1392. svc_sock_put(svsk);
  1393. }
  1394. static struct cache_deferred_req *
  1395. svc_defer(struct cache_req *req)
  1396. {
  1397. struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle);
  1398. int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len);
  1399. struct svc_deferred_req *dr;
  1400. if (rqstp->rq_arg.page_len)
  1401. return NULL; /* if more than a page, give up FIXME */
  1402. if (rqstp->rq_deferred) {
  1403. dr = rqstp->rq_deferred;
  1404. rqstp->rq_deferred = NULL;
  1405. } else {
  1406. int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
  1407. /* FIXME maybe discard if size too large */
  1408. dr = kmalloc(size, GFP_KERNEL);
  1409. if (dr == NULL)
  1410. return NULL;
  1411. dr->handle.owner = rqstp->rq_server;
  1412. dr->prot = rqstp->rq_prot;
  1413. dr->addr = rqstp->rq_addr;
  1414. dr->daddr = rqstp->rq_daddr;
  1415. dr->argslen = rqstp->rq_arg.len >> 2;
  1416. memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2);
  1417. }
  1418. atomic_inc(&rqstp->rq_sock->sk_inuse);
  1419. dr->svsk = rqstp->rq_sock;
  1420. dr->handle.revisit = svc_revisit;
  1421. return &dr->handle;
  1422. }
  1423. /*
  1424. * recv data from a deferred request into an active one
  1425. */
  1426. static int svc_deferred_recv(struct svc_rqst *rqstp)
  1427. {
  1428. struct svc_deferred_req *dr = rqstp->rq_deferred;
  1429. rqstp->rq_arg.head[0].iov_base = dr->args;
  1430. rqstp->rq_arg.head[0].iov_len = dr->argslen<<2;
  1431. rqstp->rq_arg.page_len = 0;
  1432. rqstp->rq_arg.len = dr->argslen<<2;
  1433. rqstp->rq_prot = dr->prot;
  1434. rqstp->rq_addr = dr->addr;
  1435. rqstp->rq_daddr = dr->daddr;
  1436. return dr->argslen<<2;
  1437. }
  1438. static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
  1439. {
  1440. struct svc_deferred_req *dr = NULL;
  1441. if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
  1442. return NULL;
  1443. spin_lock_bh(&svsk->sk_defer_lock);
  1444. clear_bit(SK_DEFERRED, &svsk->sk_flags);
  1445. if (!list_empty(&svsk->sk_deferred)) {
  1446. dr = list_entry(svsk->sk_deferred.next,
  1447. struct svc_deferred_req,
  1448. handle.recent);
  1449. list_del_init(&dr->handle.recent);
  1450. set_bit(SK_DEFERRED, &svsk->sk_flags);
  1451. }
  1452. spin_unlock_bh(&svsk->sk_defer_lock);
  1453. return dr;
  1454. }