xprtsock.c 69 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672
  1. /*
  2. * linux/net/sunrpc/xprtsock.c
  3. *
  4. * Client-side transport implementation for sockets.
  5. *
  6. * TCP callback races fixes (C) 1998 Red Hat
  7. * TCP send fixes (C) 1998 Red Hat
  8. * TCP NFS related read + write fixes
  9. * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
  10. *
  11. * Rewrite of larges part of the code in order to stabilize TCP stuff.
  12. * Fix behaviour when socket buffer is full.
  13. * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
  14. *
  15. * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
  16. *
  17. * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
  18. * <gilles.quillard@bull.net>
  19. */
  20. #include <linux/types.h>
  21. #include <linux/slab.h>
  22. #include <linux/module.h>
  23. #include <linux/capability.h>
  24. #include <linux/pagemap.h>
  25. #include <linux/errno.h>
  26. #include <linux/socket.h>
  27. #include <linux/in.h>
  28. #include <linux/net.h>
  29. #include <linux/mm.h>
  30. #include <linux/udp.h>
  31. #include <linux/tcp.h>
  32. #include <linux/sunrpc/clnt.h>
  33. #include <linux/sunrpc/sched.h>
  34. #include <linux/sunrpc/svcsock.h>
  35. #include <linux/sunrpc/xprtsock.h>
  36. #include <linux/file.h>
  37. #ifdef CONFIG_NFS_V4_1
  38. #include <linux/sunrpc/bc_xprt.h>
  39. #endif
  40. #include <net/sock.h>
  41. #include <net/checksum.h>
  42. #include <net/udp.h>
  43. #include <net/tcp.h>
  44. #include "sunrpc.h"
  45. /*
  46. * xprtsock tunables
  47. */
  48. unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
  49. unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE;
  50. unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
  51. unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
  52. #define XS_TCP_LINGER_TO (15U * HZ)
  53. static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
  54. /*
  55. * We can register our own files under /proc/sys/sunrpc by
  56. * calling register_sysctl_table() again. The files in that
  57. * directory become the union of all files registered there.
  58. *
  59. * We simply need to make sure that we don't collide with
  60. * someone else's file names!
  61. */
  62. #ifdef RPC_DEBUG
  63. static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
  64. static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
  65. static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
  66. static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
  67. static struct ctl_table_header *sunrpc_table_header;
  68. /*
  69. * FIXME: changing the UDP slot table size should also resize the UDP
  70. * socket buffers for existing UDP transports
  71. */
  72. static ctl_table xs_tunables_table[] = {
  73. {
  74. .ctl_name = CTL_SLOTTABLE_UDP,
  75. .procname = "udp_slot_table_entries",
  76. .data = &xprt_udp_slot_table_entries,
  77. .maxlen = sizeof(unsigned int),
  78. .mode = 0644,
  79. .proc_handler = &proc_dointvec_minmax,
  80. .strategy = &sysctl_intvec,
  81. .extra1 = &min_slot_table_size,
  82. .extra2 = &max_slot_table_size
  83. },
  84. {
  85. .ctl_name = CTL_SLOTTABLE_TCP,
  86. .procname = "tcp_slot_table_entries",
  87. .data = &xprt_tcp_slot_table_entries,
  88. .maxlen = sizeof(unsigned int),
  89. .mode = 0644,
  90. .proc_handler = &proc_dointvec_minmax,
  91. .strategy = &sysctl_intvec,
  92. .extra1 = &min_slot_table_size,
  93. .extra2 = &max_slot_table_size
  94. },
  95. {
  96. .ctl_name = CTL_MIN_RESVPORT,
  97. .procname = "min_resvport",
  98. .data = &xprt_min_resvport,
  99. .maxlen = sizeof(unsigned int),
  100. .mode = 0644,
  101. .proc_handler = &proc_dointvec_minmax,
  102. .strategy = &sysctl_intvec,
  103. .extra1 = &xprt_min_resvport_limit,
  104. .extra2 = &xprt_max_resvport_limit
  105. },
  106. {
  107. .ctl_name = CTL_MAX_RESVPORT,
  108. .procname = "max_resvport",
  109. .data = &xprt_max_resvport,
  110. .maxlen = sizeof(unsigned int),
  111. .mode = 0644,
  112. .proc_handler = &proc_dointvec_minmax,
  113. .strategy = &sysctl_intvec,
  114. .extra1 = &xprt_min_resvport_limit,
  115. .extra2 = &xprt_max_resvport_limit
  116. },
  117. {
  118. .procname = "tcp_fin_timeout",
  119. .data = &xs_tcp_fin_timeout,
  120. .maxlen = sizeof(xs_tcp_fin_timeout),
  121. .mode = 0644,
  122. .proc_handler = &proc_dointvec_jiffies,
  123. .strategy = sysctl_jiffies
  124. },
  125. {
  126. .ctl_name = 0,
  127. },
  128. };
  129. static ctl_table sunrpc_table[] = {
  130. {
  131. .ctl_name = CTL_SUNRPC,
  132. .procname = "sunrpc",
  133. .mode = 0555,
  134. .child = xs_tunables_table
  135. },
  136. {
  137. .ctl_name = 0,
  138. },
  139. };
  140. #endif
  141. /*
  142. * Time out for an RPC UDP socket connect. UDP socket connects are
  143. * synchronous, but we set a timeout anyway in case of resource
  144. * exhaustion on the local host.
  145. */
  146. #define XS_UDP_CONN_TO (5U * HZ)
  147. /*
  148. * Wait duration for an RPC TCP connection to be established. Solaris
  149. * NFS over TCP uses 60 seconds, for example, which is in line with how
  150. * long a server takes to reboot.
  151. */
  152. #define XS_TCP_CONN_TO (60U * HZ)
  153. /*
  154. * Wait duration for a reply from the RPC portmapper.
  155. */
  156. #define XS_BIND_TO (60U * HZ)
  157. /*
  158. * Delay if a UDP socket connect error occurs. This is most likely some
  159. * kind of resource problem on the local host.
  160. */
  161. #define XS_UDP_REEST_TO (2U * HZ)
  162. /*
  163. * The reestablish timeout allows clients to delay for a bit before attempting
  164. * to reconnect to a server that just dropped our connection.
  165. *
  166. * We implement an exponential backoff when trying to reestablish a TCP
  167. * transport connection with the server. Some servers like to drop a TCP
  168. * connection when they are overworked, so we start with a short timeout and
  169. * increase over time if the server is down or not responding.
  170. */
  171. #define XS_TCP_INIT_REEST_TO (3U * HZ)
  172. #define XS_TCP_MAX_REEST_TO (5U * 60 * HZ)
  173. /*
  174. * TCP idle timeout; client drops the transport socket if it is idle
  175. * for this long. Note that we also timeout UDP sockets to prevent
  176. * holding port numbers when there is no RPC traffic.
  177. */
  178. #define XS_IDLE_DISC_TO (5U * 60 * HZ)
  179. #ifdef RPC_DEBUG
  180. # undef RPC_DEBUG_DATA
  181. # define RPCDBG_FACILITY RPCDBG_TRANS
  182. #endif
  183. #ifdef RPC_DEBUG_DATA
  184. static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
  185. {
  186. u8 *buf = (u8 *) packet;
  187. int j;
  188. dprintk("RPC: %s\n", msg);
  189. for (j = 0; j < count && j < 128; j += 4) {
  190. if (!(j & 31)) {
  191. if (j)
  192. dprintk("\n");
  193. dprintk("0x%04x ", j);
  194. }
  195. dprintk("%02x%02x%02x%02x ",
  196. buf[j], buf[j+1], buf[j+2], buf[j+3]);
  197. }
  198. dprintk("\n");
  199. }
  200. #else
  201. static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
  202. {
  203. /* NOP */
  204. }
  205. #endif
  206. struct sock_xprt {
  207. struct rpc_xprt xprt;
  208. /*
  209. * Network layer
  210. */
  211. struct socket * sock;
  212. struct sock * inet;
  213. /*
  214. * State of TCP reply receive
  215. */
  216. __be32 tcp_fraghdr,
  217. tcp_xid;
  218. u32 tcp_offset,
  219. tcp_reclen;
  220. unsigned long tcp_copied,
  221. tcp_flags;
  222. /*
  223. * Connection of transports
  224. */
  225. struct delayed_work connect_worker;
  226. struct sockaddr_storage srcaddr;
  227. unsigned short srcport;
  228. /*
  229. * UDP socket buffer size parameters
  230. */
  231. size_t rcvsize,
  232. sndsize;
  233. /*
  234. * Saved socket callback addresses
  235. */
  236. void (*old_data_ready)(struct sock *, int);
  237. void (*old_state_change)(struct sock *);
  238. void (*old_write_space)(struct sock *);
  239. void (*old_error_report)(struct sock *);
  240. };
  241. /*
  242. * TCP receive state flags
  243. */
  244. #define TCP_RCV_LAST_FRAG (1UL << 0)
  245. #define TCP_RCV_COPY_FRAGHDR (1UL << 1)
  246. #define TCP_RCV_COPY_XID (1UL << 2)
  247. #define TCP_RCV_COPY_DATA (1UL << 3)
  248. #define TCP_RCV_READ_CALLDIR (1UL << 4)
  249. #define TCP_RCV_COPY_CALLDIR (1UL << 5)
  250. /*
  251. * TCP RPC flags
  252. */
  253. #define TCP_RPC_REPLY (1UL << 6)
  254. static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
  255. {
  256. return (struct sockaddr *) &xprt->addr;
  257. }
  258. static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
  259. {
  260. return (struct sockaddr_in *) &xprt->addr;
  261. }
  262. static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
  263. {
  264. return (struct sockaddr_in6 *) &xprt->addr;
  265. }
  266. static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
  267. {
  268. struct sockaddr *sap = xs_addr(xprt);
  269. struct sockaddr_in6 *sin6;
  270. struct sockaddr_in *sin;
  271. char buf[128];
  272. (void)rpc_ntop(sap, buf, sizeof(buf));
  273. xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
  274. switch (sap->sa_family) {
  275. case AF_INET:
  276. sin = xs_addr_in(xprt);
  277. (void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x",
  278. NIPQUAD(sin->sin_addr.s_addr));
  279. break;
  280. case AF_INET6:
  281. sin6 = xs_addr_in6(xprt);
  282. (void)snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
  283. break;
  284. default:
  285. BUG();
  286. }
  287. xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
  288. }
  289. static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
  290. {
  291. struct sockaddr *sap = xs_addr(xprt);
  292. char buf[128];
  293. (void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
  294. xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
  295. (void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
  296. xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
  297. }
  298. static void xs_format_peer_addresses(struct rpc_xprt *xprt,
  299. const char *protocol,
  300. const char *netid)
  301. {
  302. xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
  303. xprt->address_strings[RPC_DISPLAY_NETID] = netid;
  304. xs_format_common_peer_addresses(xprt);
  305. xs_format_common_peer_ports(xprt);
  306. }
  307. static void xs_update_peer_port(struct rpc_xprt *xprt)
  308. {
  309. kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
  310. kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
  311. xs_format_common_peer_ports(xprt);
  312. }
  313. static void xs_free_peer_addresses(struct rpc_xprt *xprt)
  314. {
  315. unsigned int i;
  316. for (i = 0; i < RPC_DISPLAY_MAX; i++)
  317. switch (i) {
  318. case RPC_DISPLAY_PROTO:
  319. case RPC_DISPLAY_NETID:
  320. continue;
  321. default:
  322. kfree(xprt->address_strings[i]);
  323. }
  324. }
  325. #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
  326. static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more)
  327. {
  328. struct msghdr msg = {
  329. .msg_name = addr,
  330. .msg_namelen = addrlen,
  331. .msg_flags = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0),
  332. };
  333. struct kvec iov = {
  334. .iov_base = vec->iov_base + base,
  335. .iov_len = vec->iov_len - base,
  336. };
  337. if (iov.iov_len != 0)
  338. return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
  339. return kernel_sendmsg(sock, &msg, NULL, 0, 0);
  340. }
  341. static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more)
  342. {
  343. struct page **ppage;
  344. unsigned int remainder;
  345. int err, sent = 0;
  346. remainder = xdr->page_len - base;
  347. base += xdr->page_base;
  348. ppage = xdr->pages + (base >> PAGE_SHIFT);
  349. base &= ~PAGE_MASK;
  350. for(;;) {
  351. unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
  352. int flags = XS_SENDMSG_FLAGS;
  353. remainder -= len;
  354. if (remainder != 0 || more)
  355. flags |= MSG_MORE;
  356. err = sock->ops->sendpage(sock, *ppage, base, len, flags);
  357. if (remainder == 0 || err != len)
  358. break;
  359. sent += err;
  360. ppage++;
  361. base = 0;
  362. }
  363. if (sent == 0)
  364. return err;
  365. if (err > 0)
  366. sent += err;
  367. return sent;
  368. }
  369. /**
  370. * xs_sendpages - write pages directly to a socket
  371. * @sock: socket to send on
  372. * @addr: UDP only -- address of destination
  373. * @addrlen: UDP only -- length of destination address
  374. * @xdr: buffer containing this request
  375. * @base: starting position in the buffer
  376. *
  377. */
  378. static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
  379. {
  380. unsigned int remainder = xdr->len - base;
  381. int err, sent = 0;
  382. if (unlikely(!sock))
  383. return -ENOTSOCK;
  384. clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
  385. if (base != 0) {
  386. addr = NULL;
  387. addrlen = 0;
  388. }
  389. if (base < xdr->head[0].iov_len || addr != NULL) {
  390. unsigned int len = xdr->head[0].iov_len - base;
  391. remainder -= len;
  392. err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0);
  393. if (remainder == 0 || err != len)
  394. goto out;
  395. sent += err;
  396. base = 0;
  397. } else
  398. base -= xdr->head[0].iov_len;
  399. if (base < xdr->page_len) {
  400. unsigned int len = xdr->page_len - base;
  401. remainder -= len;
  402. err = xs_send_pagedata(sock, xdr, base, remainder != 0);
  403. if (remainder == 0 || err != len)
  404. goto out;
  405. sent += err;
  406. base = 0;
  407. } else
  408. base -= xdr->page_len;
  409. if (base >= xdr->tail[0].iov_len)
  410. return sent;
  411. err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0);
  412. out:
  413. if (sent == 0)
  414. return err;
  415. if (err > 0)
  416. sent += err;
  417. return sent;
  418. }
  419. static void xs_nospace_callback(struct rpc_task *task)
  420. {
  421. struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
  422. transport->inet->sk_write_pending--;
  423. clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
  424. }
  425. /**
  426. * xs_nospace - place task on wait queue if transmit was incomplete
  427. * @task: task to put to sleep
  428. *
  429. */
  430. static int xs_nospace(struct rpc_task *task)
  431. {
  432. struct rpc_rqst *req = task->tk_rqstp;
  433. struct rpc_xprt *xprt = req->rq_xprt;
  434. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  435. int ret = 0;
  436. dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
  437. task->tk_pid, req->rq_slen - req->rq_bytes_sent,
  438. req->rq_slen);
  439. /* Protect against races with write_space */
  440. spin_lock_bh(&xprt->transport_lock);
  441. /* Don't race with disconnect */
  442. if (xprt_connected(xprt)) {
  443. if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
  444. ret = -EAGAIN;
  445. /*
  446. * Notify TCP that we're limited by the application
  447. * window size
  448. */
  449. set_bit(SOCK_NOSPACE, &transport->sock->flags);
  450. transport->inet->sk_write_pending++;
  451. /* ...and wait for more buffer space */
  452. xprt_wait_for_buffer_space(task, xs_nospace_callback);
  453. }
  454. } else {
  455. clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
  456. ret = -ENOTCONN;
  457. }
  458. spin_unlock_bh(&xprt->transport_lock);
  459. return ret;
  460. }
  461. /**
  462. * xs_udp_send_request - write an RPC request to a UDP socket
  463. * @task: address of RPC task that manages the state of an RPC request
  464. *
  465. * Return values:
  466. * 0: The request has been sent
  467. * EAGAIN: The socket was blocked, please call again later to
  468. * complete the request
  469. * ENOTCONN: Caller needs to invoke connect logic then call again
  470. * other: Some other error occured, the request was not sent
  471. */
  472. static int xs_udp_send_request(struct rpc_task *task)
  473. {
  474. struct rpc_rqst *req = task->tk_rqstp;
  475. struct rpc_xprt *xprt = req->rq_xprt;
  476. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  477. struct xdr_buf *xdr = &req->rq_snd_buf;
  478. int status;
  479. xs_pktdump("packet data:",
  480. req->rq_svec->iov_base,
  481. req->rq_svec->iov_len);
  482. if (!xprt_bound(xprt))
  483. return -ENOTCONN;
  484. status = xs_sendpages(transport->sock,
  485. xs_addr(xprt),
  486. xprt->addrlen, xdr,
  487. req->rq_bytes_sent);
  488. dprintk("RPC: xs_udp_send_request(%u) = %d\n",
  489. xdr->len - req->rq_bytes_sent, status);
  490. if (status >= 0) {
  491. task->tk_bytes_sent += status;
  492. if (status >= req->rq_slen)
  493. return 0;
  494. /* Still some bytes left; set up for a retry later. */
  495. status = -EAGAIN;
  496. }
  497. if (!transport->sock)
  498. goto out;
  499. switch (status) {
  500. case -ENOTSOCK:
  501. status = -ENOTCONN;
  502. /* Should we call xs_close() here? */
  503. break;
  504. case -EAGAIN:
  505. status = xs_nospace(task);
  506. break;
  507. default:
  508. dprintk("RPC: sendmsg returned unrecognized error %d\n",
  509. -status);
  510. case -ENETUNREACH:
  511. case -EPIPE:
  512. case -ECONNREFUSED:
  513. /* When the server has died, an ICMP port unreachable message
  514. * prompts ECONNREFUSED. */
  515. clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
  516. }
  517. out:
  518. return status;
  519. }
  520. /**
  521. * xs_tcp_shutdown - gracefully shut down a TCP socket
  522. * @xprt: transport
  523. *
  524. * Initiates a graceful shutdown of the TCP socket by calling the
  525. * equivalent of shutdown(SHUT_WR);
  526. */
  527. static void xs_tcp_shutdown(struct rpc_xprt *xprt)
  528. {
  529. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  530. struct socket *sock = transport->sock;
  531. if (sock != NULL)
  532. kernel_sock_shutdown(sock, SHUT_WR);
  533. }
  534. static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf)
  535. {
  536. u32 reclen = buf->len - sizeof(rpc_fraghdr);
  537. rpc_fraghdr *base = buf->head[0].iov_base;
  538. *base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen);
  539. }
  540. /**
  541. * xs_tcp_send_request - write an RPC request to a TCP socket
  542. * @task: address of RPC task that manages the state of an RPC request
  543. *
  544. * Return values:
  545. * 0: The request has been sent
  546. * EAGAIN: The socket was blocked, please call again later to
  547. * complete the request
  548. * ENOTCONN: Caller needs to invoke connect logic then call again
  549. * other: Some other error occured, the request was not sent
  550. *
  551. * XXX: In the case of soft timeouts, should we eventually give up
  552. * if sendmsg is not able to make progress?
  553. */
  554. static int xs_tcp_send_request(struct rpc_task *task)
  555. {
  556. struct rpc_rqst *req = task->tk_rqstp;
  557. struct rpc_xprt *xprt = req->rq_xprt;
  558. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  559. struct xdr_buf *xdr = &req->rq_snd_buf;
  560. int status;
  561. xs_encode_tcp_record_marker(&req->rq_snd_buf);
  562. xs_pktdump("packet data:",
  563. req->rq_svec->iov_base,
  564. req->rq_svec->iov_len);
  565. /* Continue transmitting the packet/record. We must be careful
  566. * to cope with writespace callbacks arriving _after_ we have
  567. * called sendmsg(). */
  568. while (1) {
  569. status = xs_sendpages(transport->sock,
  570. NULL, 0, xdr, req->rq_bytes_sent);
  571. dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
  572. xdr->len - req->rq_bytes_sent, status);
  573. if (unlikely(status < 0))
  574. break;
  575. /* If we've sent the entire packet, immediately
  576. * reset the count of bytes sent. */
  577. req->rq_bytes_sent += status;
  578. task->tk_bytes_sent += status;
  579. if (likely(req->rq_bytes_sent >= req->rq_slen)) {
  580. req->rq_bytes_sent = 0;
  581. return 0;
  582. }
  583. if (status != 0)
  584. continue;
  585. status = -EAGAIN;
  586. break;
  587. }
  588. if (!transport->sock)
  589. goto out;
  590. switch (status) {
  591. case -ENOTSOCK:
  592. status = -ENOTCONN;
  593. /* Should we call xs_close() here? */
  594. break;
  595. case -EAGAIN:
  596. status = xs_nospace(task);
  597. break;
  598. default:
  599. dprintk("RPC: sendmsg returned unrecognized error %d\n",
  600. -status);
  601. case -ECONNRESET:
  602. case -EPIPE:
  603. xs_tcp_shutdown(xprt);
  604. case -ECONNREFUSED:
  605. case -ENOTCONN:
  606. clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
  607. }
  608. out:
  609. return status;
  610. }
  611. /**
  612. * xs_tcp_release_xprt - clean up after a tcp transmission
  613. * @xprt: transport
  614. * @task: rpc task
  615. *
  616. * This cleans up if an error causes us to abort the transmission of a request.
  617. * In this case, the socket may need to be reset in order to avoid confusing
  618. * the server.
  619. */
  620. static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
  621. {
  622. struct rpc_rqst *req;
  623. if (task != xprt->snd_task)
  624. return;
  625. if (task == NULL)
  626. goto out_release;
  627. req = task->tk_rqstp;
  628. if (req->rq_bytes_sent == 0)
  629. goto out_release;
  630. if (req->rq_bytes_sent == req->rq_snd_buf.len)
  631. goto out_release;
  632. set_bit(XPRT_CLOSE_WAIT, &task->tk_xprt->state);
  633. out_release:
  634. xprt_release_xprt(xprt, task);
  635. }
  636. static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
  637. {
  638. transport->old_data_ready = sk->sk_data_ready;
  639. transport->old_state_change = sk->sk_state_change;
  640. transport->old_write_space = sk->sk_write_space;
  641. transport->old_error_report = sk->sk_error_report;
  642. }
  643. static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
  644. {
  645. sk->sk_data_ready = transport->old_data_ready;
  646. sk->sk_state_change = transport->old_state_change;
  647. sk->sk_write_space = transport->old_write_space;
  648. sk->sk_error_report = transport->old_error_report;
  649. }
  650. static void xs_reset_transport(struct sock_xprt *transport)
  651. {
  652. struct socket *sock = transport->sock;
  653. struct sock *sk = transport->inet;
  654. if (sk == NULL)
  655. return;
  656. write_lock_bh(&sk->sk_callback_lock);
  657. transport->inet = NULL;
  658. transport->sock = NULL;
  659. sk->sk_user_data = NULL;
  660. xs_restore_old_callbacks(transport, sk);
  661. write_unlock_bh(&sk->sk_callback_lock);
  662. sk->sk_no_check = 0;
  663. sock_release(sock);
  664. }
  665. /**
  666. * xs_close - close a socket
  667. * @xprt: transport
  668. *
  669. * This is used when all requests are complete; ie, no DRC state remains
  670. * on the server we want to save.
  671. *
  672. * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
  673. * xs_reset_transport() zeroing the socket from underneath a writer.
  674. */
  675. static void xs_close(struct rpc_xprt *xprt)
  676. {
  677. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  678. dprintk("RPC: xs_close xprt %p\n", xprt);
  679. xs_reset_transport(transport);
  680. xprt->reestablish_timeout = 0;
  681. smp_mb__before_clear_bit();
  682. clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
  683. clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
  684. clear_bit(XPRT_CLOSING, &xprt->state);
  685. smp_mb__after_clear_bit();
  686. xprt_disconnect_done(xprt);
  687. }
  688. static void xs_tcp_close(struct rpc_xprt *xprt)
  689. {
  690. if (test_and_clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state))
  691. xs_close(xprt);
  692. else
  693. xs_tcp_shutdown(xprt);
  694. }
  695. /**
  696. * xs_destroy - prepare to shutdown a transport
  697. * @xprt: doomed transport
  698. *
  699. */
  700. static void xs_destroy(struct rpc_xprt *xprt)
  701. {
  702. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  703. dprintk("RPC: xs_destroy xprt %p\n", xprt);
  704. cancel_rearming_delayed_work(&transport->connect_worker);
  705. xs_close(xprt);
  706. xs_free_peer_addresses(xprt);
  707. kfree(xprt->slot);
  708. kfree(xprt);
  709. module_put(THIS_MODULE);
  710. }
  711. static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
  712. {
  713. return (struct rpc_xprt *) sk->sk_user_data;
  714. }
  715. /**
  716. * xs_udp_data_ready - "data ready" callback for UDP sockets
  717. * @sk: socket with data to read
  718. * @len: how much data to read
  719. *
  720. */
  721. static void xs_udp_data_ready(struct sock *sk, int len)
  722. {
  723. struct rpc_task *task;
  724. struct rpc_xprt *xprt;
  725. struct rpc_rqst *rovr;
  726. struct sk_buff *skb;
  727. int err, repsize, copied;
  728. u32 _xid;
  729. __be32 *xp;
  730. read_lock(&sk->sk_callback_lock);
  731. dprintk("RPC: xs_udp_data_ready...\n");
  732. if (!(xprt = xprt_from_sock(sk)))
  733. goto out;
  734. if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
  735. goto out;
  736. if (xprt->shutdown)
  737. goto dropit;
  738. repsize = skb->len - sizeof(struct udphdr);
  739. if (repsize < 4) {
  740. dprintk("RPC: impossible RPC reply size %d!\n", repsize);
  741. goto dropit;
  742. }
  743. /* Copy the XID from the skb... */
  744. xp = skb_header_pointer(skb, sizeof(struct udphdr),
  745. sizeof(_xid), &_xid);
  746. if (xp == NULL)
  747. goto dropit;
  748. /* Look up and lock the request corresponding to the given XID */
  749. spin_lock(&xprt->transport_lock);
  750. rovr = xprt_lookup_rqst(xprt, *xp);
  751. if (!rovr)
  752. goto out_unlock;
  753. task = rovr->rq_task;
  754. if ((copied = rovr->rq_private_buf.buflen) > repsize)
  755. copied = repsize;
  756. /* Suck it into the iovec, verify checksum if not done by hw. */
  757. if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
  758. UDPX_INC_STATS_BH(sk, UDP_MIB_INERRORS);
  759. goto out_unlock;
  760. }
  761. UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS);
  762. /* Something worked... */
  763. dst_confirm(skb_dst(skb));
  764. xprt_adjust_cwnd(task, copied);
  765. xprt_update_rtt(task);
  766. xprt_complete_rqst(task, copied);
  767. out_unlock:
  768. spin_unlock(&xprt->transport_lock);
  769. dropit:
  770. skb_free_datagram(sk, skb);
  771. out:
  772. read_unlock(&sk->sk_callback_lock);
  773. }
  774. static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
  775. {
  776. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  777. size_t len, used;
  778. char *p;
  779. p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset;
  780. len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset;
  781. used = xdr_skb_read_bits(desc, p, len);
  782. transport->tcp_offset += used;
  783. if (used != len)
  784. return;
  785. transport->tcp_reclen = ntohl(transport->tcp_fraghdr);
  786. if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
  787. transport->tcp_flags |= TCP_RCV_LAST_FRAG;
  788. else
  789. transport->tcp_flags &= ~TCP_RCV_LAST_FRAG;
  790. transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
  791. transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR;
  792. transport->tcp_offset = 0;
  793. /* Sanity check of the record length */
  794. if (unlikely(transport->tcp_reclen < 8)) {
  795. dprintk("RPC: invalid TCP record fragment length\n");
  796. xprt_force_disconnect(xprt);
  797. return;
  798. }
  799. dprintk("RPC: reading TCP record fragment of length %d\n",
  800. transport->tcp_reclen);
  801. }
  802. static void xs_tcp_check_fraghdr(struct sock_xprt *transport)
  803. {
  804. if (transport->tcp_offset == transport->tcp_reclen) {
  805. transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR;
  806. transport->tcp_offset = 0;
  807. if (transport->tcp_flags & TCP_RCV_LAST_FRAG) {
  808. transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
  809. transport->tcp_flags |= TCP_RCV_COPY_XID;
  810. transport->tcp_copied = 0;
  811. }
  812. }
  813. }
  814. static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc)
  815. {
  816. size_t len, used;
  817. char *p;
  818. len = sizeof(transport->tcp_xid) - transport->tcp_offset;
  819. dprintk("RPC: reading XID (%Zu bytes)\n", len);
  820. p = ((char *) &transport->tcp_xid) + transport->tcp_offset;
  821. used = xdr_skb_read_bits(desc, p, len);
  822. transport->tcp_offset += used;
  823. if (used != len)
  824. return;
  825. transport->tcp_flags &= ~TCP_RCV_COPY_XID;
  826. transport->tcp_flags |= TCP_RCV_READ_CALLDIR;
  827. transport->tcp_copied = 4;
  828. dprintk("RPC: reading %s XID %08x\n",
  829. (transport->tcp_flags & TCP_RPC_REPLY) ? "reply for"
  830. : "request with",
  831. ntohl(transport->tcp_xid));
  832. xs_tcp_check_fraghdr(transport);
  833. }
  834. static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
  835. struct xdr_skb_reader *desc)
  836. {
  837. size_t len, used;
  838. u32 offset;
  839. __be32 calldir;
  840. /*
  841. * We want transport->tcp_offset to be 8 at the end of this routine
  842. * (4 bytes for the xid and 4 bytes for the call/reply flag).
  843. * When this function is called for the first time,
  844. * transport->tcp_offset is 4 (after having already read the xid).
  845. */
  846. offset = transport->tcp_offset - sizeof(transport->tcp_xid);
  847. len = sizeof(calldir) - offset;
  848. dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len);
  849. used = xdr_skb_read_bits(desc, &calldir, len);
  850. transport->tcp_offset += used;
  851. if (used != len)
  852. return;
  853. transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
  854. transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
  855. transport->tcp_flags |= TCP_RCV_COPY_DATA;
  856. /*
  857. * We don't yet have the XDR buffer, so we will write the calldir
  858. * out after we get the buffer from the 'struct rpc_rqst'
  859. */
  860. if (ntohl(calldir) == RPC_REPLY)
  861. transport->tcp_flags |= TCP_RPC_REPLY;
  862. else
  863. transport->tcp_flags &= ~TCP_RPC_REPLY;
  864. dprintk("RPC: reading %s CALL/REPLY flag %08x\n",
  865. (transport->tcp_flags & TCP_RPC_REPLY) ?
  866. "reply for" : "request with", calldir);
  867. xs_tcp_check_fraghdr(transport);
  868. }
  869. static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
  870. struct xdr_skb_reader *desc,
  871. struct rpc_rqst *req)
  872. {
  873. struct sock_xprt *transport =
  874. container_of(xprt, struct sock_xprt, xprt);
  875. struct xdr_buf *rcvbuf;
  876. size_t len;
  877. ssize_t r;
  878. rcvbuf = &req->rq_private_buf;
  879. if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) {
  880. /*
  881. * Save the RPC direction in the XDR buffer
  882. */
  883. __be32 calldir = transport->tcp_flags & TCP_RPC_REPLY ?
  884. htonl(RPC_REPLY) : 0;
  885. memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
  886. &calldir, sizeof(calldir));
  887. transport->tcp_copied += sizeof(calldir);
  888. transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
  889. }
  890. len = desc->count;
  891. if (len > transport->tcp_reclen - transport->tcp_offset) {
  892. struct xdr_skb_reader my_desc;
  893. len = transport->tcp_reclen - transport->tcp_offset;
  894. memcpy(&my_desc, desc, sizeof(my_desc));
  895. my_desc.count = len;
  896. r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
  897. &my_desc, xdr_skb_read_bits);
  898. desc->count -= r;
  899. desc->offset += r;
  900. } else
  901. r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
  902. desc, xdr_skb_read_bits);
  903. if (r > 0) {
  904. transport->tcp_copied += r;
  905. transport->tcp_offset += r;
  906. }
  907. if (r != len) {
  908. /* Error when copying to the receive buffer,
  909. * usually because we weren't able to allocate
  910. * additional buffer pages. All we can do now
  911. * is turn off TCP_RCV_COPY_DATA, so the request
  912. * will not receive any additional updates,
  913. * and time out.
  914. * Any remaining data from this record will
  915. * be discarded.
  916. */
  917. transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
  918. dprintk("RPC: XID %08x truncated request\n",
  919. ntohl(transport->tcp_xid));
  920. dprintk("RPC: xprt = %p, tcp_copied = %lu, "
  921. "tcp_offset = %u, tcp_reclen = %u\n",
  922. xprt, transport->tcp_copied,
  923. transport->tcp_offset, transport->tcp_reclen);
  924. return;
  925. }
  926. dprintk("RPC: XID %08x read %Zd bytes\n",
  927. ntohl(transport->tcp_xid), r);
  928. dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, "
  929. "tcp_reclen = %u\n", xprt, transport->tcp_copied,
  930. transport->tcp_offset, transport->tcp_reclen);
  931. if (transport->tcp_copied == req->rq_private_buf.buflen)
  932. transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
  933. else if (transport->tcp_offset == transport->tcp_reclen) {
  934. if (transport->tcp_flags & TCP_RCV_LAST_FRAG)
  935. transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
  936. }
  937. return;
  938. }
  939. /*
  940. * Finds the request corresponding to the RPC xid and invokes the common
  941. * tcp read code to read the data.
  942. */
  943. static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
  944. struct xdr_skb_reader *desc)
  945. {
  946. struct sock_xprt *transport =
  947. container_of(xprt, struct sock_xprt, xprt);
  948. struct rpc_rqst *req;
  949. dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid));
  950. /* Find and lock the request corresponding to this xid */
  951. spin_lock(&xprt->transport_lock);
  952. req = xprt_lookup_rqst(xprt, transport->tcp_xid);
  953. if (!req) {
  954. dprintk("RPC: XID %08x request not found!\n",
  955. ntohl(transport->tcp_xid));
  956. spin_unlock(&xprt->transport_lock);
  957. return -1;
  958. }
  959. xs_tcp_read_common(xprt, desc, req);
  960. if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
  961. xprt_complete_rqst(req->rq_task, transport->tcp_copied);
  962. spin_unlock(&xprt->transport_lock);
  963. return 0;
  964. }
  965. #if defined(CONFIG_NFS_V4_1)
  966. /*
  967. * Obtains an rpc_rqst previously allocated and invokes the common
  968. * tcp read code to read the data. The result is placed in the callback
  969. * queue.
  970. * If we're unable to obtain the rpc_rqst we schedule the closing of the
  971. * connection and return -1.
  972. */
  973. static inline int xs_tcp_read_callback(struct rpc_xprt *xprt,
  974. struct xdr_skb_reader *desc)
  975. {
  976. struct sock_xprt *transport =
  977. container_of(xprt, struct sock_xprt, xprt);
  978. struct rpc_rqst *req;
  979. req = xprt_alloc_bc_request(xprt);
  980. if (req == NULL) {
  981. printk(KERN_WARNING "Callback slot table overflowed\n");
  982. xprt_force_disconnect(xprt);
  983. return -1;
  984. }
  985. req->rq_xid = transport->tcp_xid;
  986. dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid));
  987. xs_tcp_read_common(xprt, desc, req);
  988. if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) {
  989. struct svc_serv *bc_serv = xprt->bc_serv;
  990. /*
  991. * Add callback request to callback list. The callback
  992. * service sleeps on the sv_cb_waitq waiting for new
  993. * requests. Wake it up after adding enqueing the
  994. * request.
  995. */
  996. dprintk("RPC: add callback request to list\n");
  997. spin_lock(&bc_serv->sv_cb_lock);
  998. list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
  999. spin_unlock(&bc_serv->sv_cb_lock);
  1000. wake_up(&bc_serv->sv_cb_waitq);
  1001. }
  1002. req->rq_private_buf.len = transport->tcp_copied;
  1003. return 0;
  1004. }
  1005. static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
  1006. struct xdr_skb_reader *desc)
  1007. {
  1008. struct sock_xprt *transport =
  1009. container_of(xprt, struct sock_xprt, xprt);
  1010. return (transport->tcp_flags & TCP_RPC_REPLY) ?
  1011. xs_tcp_read_reply(xprt, desc) :
  1012. xs_tcp_read_callback(xprt, desc);
  1013. }
  1014. #else
  1015. static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
  1016. struct xdr_skb_reader *desc)
  1017. {
  1018. return xs_tcp_read_reply(xprt, desc);
  1019. }
  1020. #endif /* CONFIG_NFS_V4_1 */
  1021. /*
  1022. * Read data off the transport. This can be either an RPC_CALL or an
  1023. * RPC_REPLY. Relay the processing to helper functions.
  1024. */
  1025. static void xs_tcp_read_data(struct rpc_xprt *xprt,
  1026. struct xdr_skb_reader *desc)
  1027. {
  1028. struct sock_xprt *transport =
  1029. container_of(xprt, struct sock_xprt, xprt);
  1030. if (_xs_tcp_read_data(xprt, desc) == 0)
  1031. xs_tcp_check_fraghdr(transport);
  1032. else {
  1033. /*
  1034. * The transport_lock protects the request handling.
  1035. * There's no need to hold it to update the tcp_flags.
  1036. */
  1037. transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
  1038. }
  1039. }
  1040. static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc)
  1041. {
  1042. size_t len;
  1043. len = transport->tcp_reclen - transport->tcp_offset;
  1044. if (len > desc->count)
  1045. len = desc->count;
  1046. desc->count -= len;
  1047. desc->offset += len;
  1048. transport->tcp_offset += len;
  1049. dprintk("RPC: discarded %Zu bytes\n", len);
  1050. xs_tcp_check_fraghdr(transport);
  1051. }
  1052. static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
  1053. {
  1054. struct rpc_xprt *xprt = rd_desc->arg.data;
  1055. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  1056. struct xdr_skb_reader desc = {
  1057. .skb = skb,
  1058. .offset = offset,
  1059. .count = len,
  1060. };
  1061. dprintk("RPC: xs_tcp_data_recv started\n");
  1062. do {
  1063. /* Read in a new fragment marker if necessary */
  1064. /* Can we ever really expect to get completely empty fragments? */
  1065. if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) {
  1066. xs_tcp_read_fraghdr(xprt, &desc);
  1067. continue;
  1068. }
  1069. /* Read in the xid if necessary */
  1070. if (transport->tcp_flags & TCP_RCV_COPY_XID) {
  1071. xs_tcp_read_xid(transport, &desc);
  1072. continue;
  1073. }
  1074. /* Read in the call/reply flag */
  1075. if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) {
  1076. xs_tcp_read_calldir(transport, &desc);
  1077. continue;
  1078. }
  1079. /* Read in the request data */
  1080. if (transport->tcp_flags & TCP_RCV_COPY_DATA) {
  1081. xs_tcp_read_data(xprt, &desc);
  1082. continue;
  1083. }
  1084. /* Skip over any trailing bytes on short reads */
  1085. xs_tcp_read_discard(transport, &desc);
  1086. } while (desc.count);
  1087. dprintk("RPC: xs_tcp_data_recv done\n");
  1088. return len - desc.count;
  1089. }
  1090. /**
  1091. * xs_tcp_data_ready - "data ready" callback for TCP sockets
  1092. * @sk: socket with data to read
  1093. * @bytes: how much data to read
  1094. *
  1095. */
  1096. static void xs_tcp_data_ready(struct sock *sk, int bytes)
  1097. {
  1098. struct rpc_xprt *xprt;
  1099. read_descriptor_t rd_desc;
  1100. int read;
  1101. dprintk("RPC: xs_tcp_data_ready...\n");
  1102. read_lock(&sk->sk_callback_lock);
  1103. if (!(xprt = xprt_from_sock(sk)))
  1104. goto out;
  1105. if (xprt->shutdown)
  1106. goto out;
  1107. /* Any data means we had a useful conversation, so
  1108. * the we don't need to delay the next reconnect
  1109. */
  1110. if (xprt->reestablish_timeout)
  1111. xprt->reestablish_timeout = 0;
  1112. /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
  1113. rd_desc.arg.data = xprt;
  1114. do {
  1115. rd_desc.count = 65536;
  1116. read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
  1117. } while (read > 0);
  1118. out:
  1119. read_unlock(&sk->sk_callback_lock);
  1120. }
  1121. /*
  1122. * Do the equivalent of linger/linger2 handling for dealing with
  1123. * broken servers that don't close the socket in a timely
  1124. * fashion
  1125. */
  1126. static void xs_tcp_schedule_linger_timeout(struct rpc_xprt *xprt,
  1127. unsigned long timeout)
  1128. {
  1129. struct sock_xprt *transport;
  1130. if (xprt_test_and_set_connecting(xprt))
  1131. return;
  1132. set_bit(XPRT_CONNECTION_ABORT, &xprt->state);
  1133. transport = container_of(xprt, struct sock_xprt, xprt);
  1134. queue_delayed_work(rpciod_workqueue, &transport->connect_worker,
  1135. timeout);
  1136. }
  1137. static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
  1138. {
  1139. struct sock_xprt *transport;
  1140. transport = container_of(xprt, struct sock_xprt, xprt);
  1141. if (!test_bit(XPRT_CONNECTION_ABORT, &xprt->state) ||
  1142. !cancel_delayed_work(&transport->connect_worker))
  1143. return;
  1144. clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
  1145. xprt_clear_connecting(xprt);
  1146. }
  1147. static void xs_sock_mark_closed(struct rpc_xprt *xprt)
  1148. {
  1149. smp_mb__before_clear_bit();
  1150. clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
  1151. clear_bit(XPRT_CLOSING, &xprt->state);
  1152. smp_mb__after_clear_bit();
  1153. /* Mark transport as closed and wake up all pending tasks */
  1154. xprt_disconnect_done(xprt);
  1155. }
  1156. /**
  1157. * xs_tcp_state_change - callback to handle TCP socket state changes
  1158. * @sk: socket whose state has changed
  1159. *
  1160. */
  1161. static void xs_tcp_state_change(struct sock *sk)
  1162. {
  1163. struct rpc_xprt *xprt;
  1164. read_lock(&sk->sk_callback_lock);
  1165. if (!(xprt = xprt_from_sock(sk)))
  1166. goto out;
  1167. dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
  1168. dprintk("RPC: state %x conn %d dead %d zapped %d\n",
  1169. sk->sk_state, xprt_connected(xprt),
  1170. sock_flag(sk, SOCK_DEAD),
  1171. sock_flag(sk, SOCK_ZAPPED));
  1172. switch (sk->sk_state) {
  1173. case TCP_ESTABLISHED:
  1174. spin_lock_bh(&xprt->transport_lock);
  1175. if (!xprt_test_and_set_connected(xprt)) {
  1176. struct sock_xprt *transport = container_of(xprt,
  1177. struct sock_xprt, xprt);
  1178. /* Reset TCP record info */
  1179. transport->tcp_offset = 0;
  1180. transport->tcp_reclen = 0;
  1181. transport->tcp_copied = 0;
  1182. transport->tcp_flags =
  1183. TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
  1184. xprt_wake_pending_tasks(xprt, -EAGAIN);
  1185. }
  1186. spin_unlock_bh(&xprt->transport_lock);
  1187. break;
  1188. case TCP_FIN_WAIT1:
  1189. /* The client initiated a shutdown of the socket */
  1190. xprt->connect_cookie++;
  1191. xprt->reestablish_timeout = 0;
  1192. set_bit(XPRT_CLOSING, &xprt->state);
  1193. smp_mb__before_clear_bit();
  1194. clear_bit(XPRT_CONNECTED, &xprt->state);
  1195. clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
  1196. smp_mb__after_clear_bit();
  1197. xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
  1198. break;
  1199. case TCP_CLOSE_WAIT:
  1200. /* The server initiated a shutdown of the socket */
  1201. xprt_force_disconnect(xprt);
  1202. case TCP_SYN_SENT:
  1203. xprt->connect_cookie++;
  1204. case TCP_CLOSING:
  1205. /*
  1206. * If the server closed down the connection, make sure that
  1207. * we back off before reconnecting
  1208. */
  1209. if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
  1210. xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
  1211. break;
  1212. case TCP_LAST_ACK:
  1213. set_bit(XPRT_CLOSING, &xprt->state);
  1214. xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
  1215. smp_mb__before_clear_bit();
  1216. clear_bit(XPRT_CONNECTED, &xprt->state);
  1217. smp_mb__after_clear_bit();
  1218. break;
  1219. case TCP_CLOSE:
  1220. xs_tcp_cancel_linger_timeout(xprt);
  1221. xs_sock_mark_closed(xprt);
  1222. }
  1223. out:
  1224. read_unlock(&sk->sk_callback_lock);
  1225. }
  1226. /**
  1227. * xs_error_report - callback mainly for catching socket errors
  1228. * @sk: socket
  1229. */
  1230. static void xs_error_report(struct sock *sk)
  1231. {
  1232. struct rpc_xprt *xprt;
  1233. read_lock(&sk->sk_callback_lock);
  1234. if (!(xprt = xprt_from_sock(sk)))
  1235. goto out;
  1236. dprintk("RPC: %s client %p...\n"
  1237. "RPC: error %d\n",
  1238. __func__, xprt, sk->sk_err);
  1239. xprt_wake_pending_tasks(xprt, -EAGAIN);
  1240. out:
  1241. read_unlock(&sk->sk_callback_lock);
  1242. }
  1243. static void xs_write_space(struct sock *sk)
  1244. {
  1245. struct socket *sock;
  1246. struct rpc_xprt *xprt;
  1247. if (unlikely(!(sock = sk->sk_socket)))
  1248. return;
  1249. clear_bit(SOCK_NOSPACE, &sock->flags);
  1250. if (unlikely(!(xprt = xprt_from_sock(sk))))
  1251. return;
  1252. if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0)
  1253. return;
  1254. xprt_write_space(xprt);
  1255. }
  1256. /**
  1257. * xs_udp_write_space - callback invoked when socket buffer space
  1258. * becomes available
  1259. * @sk: socket whose state has changed
  1260. *
  1261. * Called when more output buffer space is available for this socket.
  1262. * We try not to wake our writers until they can make "significant"
  1263. * progress, otherwise we'll waste resources thrashing kernel_sendmsg
  1264. * with a bunch of small requests.
  1265. */
  1266. static void xs_udp_write_space(struct sock *sk)
  1267. {
  1268. read_lock(&sk->sk_callback_lock);
  1269. /* from net/core/sock.c:sock_def_write_space */
  1270. if (sock_writeable(sk))
  1271. xs_write_space(sk);
  1272. read_unlock(&sk->sk_callback_lock);
  1273. }
  1274. /**
  1275. * xs_tcp_write_space - callback invoked when socket buffer space
  1276. * becomes available
  1277. * @sk: socket whose state has changed
  1278. *
  1279. * Called when more output buffer space is available for this socket.
  1280. * We try not to wake our writers until they can make "significant"
  1281. * progress, otherwise we'll waste resources thrashing kernel_sendmsg
  1282. * with a bunch of small requests.
  1283. */
  1284. static void xs_tcp_write_space(struct sock *sk)
  1285. {
  1286. read_lock(&sk->sk_callback_lock);
  1287. /* from net/core/stream.c:sk_stream_write_space */
  1288. if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
  1289. xs_write_space(sk);
  1290. read_unlock(&sk->sk_callback_lock);
  1291. }
  1292. static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
  1293. {
  1294. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  1295. struct sock *sk = transport->inet;
  1296. if (transport->rcvsize) {
  1297. sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
  1298. sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
  1299. }
  1300. if (transport->sndsize) {
  1301. sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
  1302. sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
  1303. sk->sk_write_space(sk);
  1304. }
  1305. }
  1306. /**
  1307. * xs_udp_set_buffer_size - set send and receive limits
  1308. * @xprt: generic transport
  1309. * @sndsize: requested size of send buffer, in bytes
  1310. * @rcvsize: requested size of receive buffer, in bytes
  1311. *
  1312. * Set socket send and receive buffer size limits.
  1313. */
  1314. static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
  1315. {
  1316. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  1317. transport->sndsize = 0;
  1318. if (sndsize)
  1319. transport->sndsize = sndsize + 1024;
  1320. transport->rcvsize = 0;
  1321. if (rcvsize)
  1322. transport->rcvsize = rcvsize + 1024;
  1323. xs_udp_do_set_buffer_size(xprt);
  1324. }
  1325. /**
  1326. * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
  1327. * @task: task that timed out
  1328. *
  1329. * Adjust the congestion window after a retransmit timeout has occurred.
  1330. */
  1331. static void xs_udp_timer(struct rpc_task *task)
  1332. {
  1333. xprt_adjust_cwnd(task, -ETIMEDOUT);
  1334. }
  1335. static unsigned short xs_get_random_port(void)
  1336. {
  1337. unsigned short range = xprt_max_resvport - xprt_min_resvport;
  1338. unsigned short rand = (unsigned short) net_random() % range;
  1339. return rand + xprt_min_resvport;
  1340. }
  1341. /**
  1342. * xs_set_port - reset the port number in the remote endpoint address
  1343. * @xprt: generic transport
  1344. * @port: new port number
  1345. *
  1346. */
  1347. static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
  1348. {
  1349. dprintk("RPC: setting port for xprt %p to %u\n", xprt, port);
  1350. rpc_set_port(xs_addr(xprt), port);
  1351. xs_update_peer_port(xprt);
  1352. }
  1353. static unsigned short xs_get_srcport(struct sock_xprt *transport, struct socket *sock)
  1354. {
  1355. unsigned short port = transport->srcport;
  1356. if (port == 0 && transport->xprt.resvport)
  1357. port = xs_get_random_port();
  1358. return port;
  1359. }
  1360. static unsigned short xs_next_srcport(struct sock_xprt *transport, struct socket *sock, unsigned short port)
  1361. {
  1362. if (transport->srcport != 0)
  1363. transport->srcport = 0;
  1364. if (!transport->xprt.resvport)
  1365. return 0;
  1366. if (port <= xprt_min_resvport || port > xprt_max_resvport)
  1367. return xprt_max_resvport;
  1368. return --port;
  1369. }
  1370. static int xs_bind4(struct sock_xprt *transport, struct socket *sock)
  1371. {
  1372. struct sockaddr_in myaddr = {
  1373. .sin_family = AF_INET,
  1374. };
  1375. struct sockaddr_in *sa;
  1376. int err, nloop = 0;
  1377. unsigned short port = xs_get_srcport(transport, sock);
  1378. unsigned short last;
  1379. sa = (struct sockaddr_in *)&transport->srcaddr;
  1380. myaddr.sin_addr = sa->sin_addr;
  1381. do {
  1382. myaddr.sin_port = htons(port);
  1383. err = kernel_bind(sock, (struct sockaddr *) &myaddr,
  1384. sizeof(myaddr));
  1385. if (port == 0)
  1386. break;
  1387. if (err == 0) {
  1388. transport->srcport = port;
  1389. break;
  1390. }
  1391. last = port;
  1392. port = xs_next_srcport(transport, sock, port);
  1393. if (port > last)
  1394. nloop++;
  1395. } while (err == -EADDRINUSE && nloop != 2);
  1396. dprintk("RPC: %s %pI4:%u: %s (%d)\n",
  1397. __func__, &myaddr.sin_addr,
  1398. port, err ? "failed" : "ok", err);
  1399. return err;
  1400. }
  1401. static int xs_bind6(struct sock_xprt *transport, struct socket *sock)
  1402. {
  1403. struct sockaddr_in6 myaddr = {
  1404. .sin6_family = AF_INET6,
  1405. };
  1406. struct sockaddr_in6 *sa;
  1407. int err, nloop = 0;
  1408. unsigned short port = xs_get_srcport(transport, sock);
  1409. unsigned short last;
  1410. sa = (struct sockaddr_in6 *)&transport->srcaddr;
  1411. myaddr.sin6_addr = sa->sin6_addr;
  1412. do {
  1413. myaddr.sin6_port = htons(port);
  1414. err = kernel_bind(sock, (struct sockaddr *) &myaddr,
  1415. sizeof(myaddr));
  1416. if (port == 0)
  1417. break;
  1418. if (err == 0) {
  1419. transport->srcport = port;
  1420. break;
  1421. }
  1422. last = port;
  1423. port = xs_next_srcport(transport, sock, port);
  1424. if (port > last)
  1425. nloop++;
  1426. } while (err == -EADDRINUSE && nloop != 2);
  1427. dprintk("RPC: xs_bind6 %pI6:%u: %s (%d)\n",
  1428. &myaddr.sin6_addr, port, err ? "failed" : "ok", err);
  1429. return err;
  1430. }
  1431. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  1432. static struct lock_class_key xs_key[2];
  1433. static struct lock_class_key xs_slock_key[2];
  1434. static inline void xs_reclassify_socket4(struct socket *sock)
  1435. {
  1436. struct sock *sk = sock->sk;
  1437. BUG_ON(sock_owned_by_user(sk));
  1438. sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
  1439. &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
  1440. }
  1441. static inline void xs_reclassify_socket6(struct socket *sock)
  1442. {
  1443. struct sock *sk = sock->sk;
  1444. BUG_ON(sock_owned_by_user(sk));
  1445. sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
  1446. &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
  1447. }
  1448. #else
  1449. static inline void xs_reclassify_socket4(struct socket *sock)
  1450. {
  1451. }
  1452. static inline void xs_reclassify_socket6(struct socket *sock)
  1453. {
  1454. }
  1455. #endif
  1456. static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
  1457. {
  1458. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  1459. if (!transport->inet) {
  1460. struct sock *sk = sock->sk;
  1461. write_lock_bh(&sk->sk_callback_lock);
  1462. xs_save_old_callbacks(transport, sk);
  1463. sk->sk_user_data = xprt;
  1464. sk->sk_data_ready = xs_udp_data_ready;
  1465. sk->sk_write_space = xs_udp_write_space;
  1466. sk->sk_error_report = xs_error_report;
  1467. sk->sk_no_check = UDP_CSUM_NORCV;
  1468. sk->sk_allocation = GFP_ATOMIC;
  1469. xprt_set_connected(xprt);
  1470. /* Reset to new socket */
  1471. transport->sock = sock;
  1472. transport->inet = sk;
  1473. write_unlock_bh(&sk->sk_callback_lock);
  1474. }
  1475. xs_udp_do_set_buffer_size(xprt);
  1476. }
  1477. /**
  1478. * xs_udp_connect_worker4 - set up a UDP socket
  1479. * @work: RPC transport to connect
  1480. *
  1481. * Invoked by a work queue tasklet.
  1482. */
  1483. static void xs_udp_connect_worker4(struct work_struct *work)
  1484. {
  1485. struct sock_xprt *transport =
  1486. container_of(work, struct sock_xprt, connect_worker.work);
  1487. struct rpc_xprt *xprt = &transport->xprt;
  1488. struct socket *sock = transport->sock;
  1489. int err, status = -EIO;
  1490. if (xprt->shutdown)
  1491. goto out;
  1492. /* Start by resetting any existing state */
  1493. xs_reset_transport(transport);
  1494. err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
  1495. if (err < 0) {
  1496. dprintk("RPC: can't create UDP transport socket (%d).\n", -err);
  1497. goto out;
  1498. }
  1499. xs_reclassify_socket4(sock);
  1500. if (xs_bind4(transport, sock)) {
  1501. sock_release(sock);
  1502. goto out;
  1503. }
  1504. dprintk("RPC: worker connecting xprt %p via %s to "
  1505. "%s (port %s)\n", xprt,
  1506. xprt->address_strings[RPC_DISPLAY_PROTO],
  1507. xprt->address_strings[RPC_DISPLAY_ADDR],
  1508. xprt->address_strings[RPC_DISPLAY_PORT]);
  1509. xs_udp_finish_connecting(xprt, sock);
  1510. status = 0;
  1511. out:
  1512. xprt_clear_connecting(xprt);
  1513. xprt_wake_pending_tasks(xprt, status);
  1514. }
  1515. /**
  1516. * xs_udp_connect_worker6 - set up a UDP socket
  1517. * @work: RPC transport to connect
  1518. *
  1519. * Invoked by a work queue tasklet.
  1520. */
  1521. static void xs_udp_connect_worker6(struct work_struct *work)
  1522. {
  1523. struct sock_xprt *transport =
  1524. container_of(work, struct sock_xprt, connect_worker.work);
  1525. struct rpc_xprt *xprt = &transport->xprt;
  1526. struct socket *sock = transport->sock;
  1527. int err, status = -EIO;
  1528. if (xprt->shutdown)
  1529. goto out;
  1530. /* Start by resetting any existing state */
  1531. xs_reset_transport(transport);
  1532. err = sock_create_kern(PF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock);
  1533. if (err < 0) {
  1534. dprintk("RPC: can't create UDP transport socket (%d).\n", -err);
  1535. goto out;
  1536. }
  1537. xs_reclassify_socket6(sock);
  1538. if (xs_bind6(transport, sock) < 0) {
  1539. sock_release(sock);
  1540. goto out;
  1541. }
  1542. dprintk("RPC: worker connecting xprt %p via %s to "
  1543. "%s (port %s)\n", xprt,
  1544. xprt->address_strings[RPC_DISPLAY_PROTO],
  1545. xprt->address_strings[RPC_DISPLAY_ADDR],
  1546. xprt->address_strings[RPC_DISPLAY_PORT]);
  1547. xs_udp_finish_connecting(xprt, sock);
  1548. status = 0;
  1549. out:
  1550. xprt_clear_connecting(xprt);
  1551. xprt_wake_pending_tasks(xprt, status);
  1552. }
  1553. /*
  1554. * We need to preserve the port number so the reply cache on the server can
  1555. * find our cached RPC replies when we get around to reconnecting.
  1556. */
  1557. static void xs_abort_connection(struct rpc_xprt *xprt, struct sock_xprt *transport)
  1558. {
  1559. int result;
  1560. struct sockaddr any;
  1561. dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt);
  1562. /*
  1563. * Disconnect the transport socket by doing a connect operation
  1564. * with AF_UNSPEC. This should return immediately...
  1565. */
  1566. memset(&any, 0, sizeof(any));
  1567. any.sa_family = AF_UNSPEC;
  1568. result = kernel_connect(transport->sock, &any, sizeof(any), 0);
  1569. if (!result)
  1570. xs_sock_mark_closed(xprt);
  1571. else
  1572. dprintk("RPC: AF_UNSPEC connect return code %d\n",
  1573. result);
  1574. }
  1575. static void xs_tcp_reuse_connection(struct rpc_xprt *xprt, struct sock_xprt *transport)
  1576. {
  1577. unsigned int state = transport->inet->sk_state;
  1578. if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED)
  1579. return;
  1580. if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT))
  1581. return;
  1582. xs_abort_connection(xprt, transport);
  1583. }
  1584. static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
  1585. {
  1586. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  1587. if (!transport->inet) {
  1588. struct sock *sk = sock->sk;
  1589. write_lock_bh(&sk->sk_callback_lock);
  1590. xs_save_old_callbacks(transport, sk);
  1591. sk->sk_user_data = xprt;
  1592. sk->sk_data_ready = xs_tcp_data_ready;
  1593. sk->sk_state_change = xs_tcp_state_change;
  1594. sk->sk_write_space = xs_tcp_write_space;
  1595. sk->sk_error_report = xs_error_report;
  1596. sk->sk_allocation = GFP_ATOMIC;
  1597. /* socket options */
  1598. sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
  1599. sock_reset_flag(sk, SOCK_LINGER);
  1600. tcp_sk(sk)->linger2 = 0;
  1601. tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
  1602. xprt_clear_connected(xprt);
  1603. /* Reset to new socket */
  1604. transport->sock = sock;
  1605. transport->inet = sk;
  1606. write_unlock_bh(&sk->sk_callback_lock);
  1607. }
  1608. if (!xprt_bound(xprt))
  1609. return -ENOTCONN;
  1610. /* Tell the socket layer to start connecting... */
  1611. xprt->stat.connect_count++;
  1612. xprt->stat.connect_start = jiffies;
  1613. return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
  1614. }
  1615. /**
  1616. * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
  1617. * @xprt: RPC transport to connect
  1618. * @transport: socket transport to connect
  1619. * @create_sock: function to create a socket of the correct type
  1620. *
  1621. * Invoked by a work queue tasklet.
  1622. */
  1623. static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
  1624. struct sock_xprt *transport,
  1625. struct socket *(*create_sock)(struct rpc_xprt *,
  1626. struct sock_xprt *))
  1627. {
  1628. struct socket *sock = transport->sock;
  1629. int status = -EIO;
  1630. if (xprt->shutdown)
  1631. goto out;
  1632. if (!sock) {
  1633. clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
  1634. sock = create_sock(xprt, transport);
  1635. if (IS_ERR(sock)) {
  1636. status = PTR_ERR(sock);
  1637. goto out;
  1638. }
  1639. } else {
  1640. int abort_and_exit;
  1641. abort_and_exit = test_and_clear_bit(XPRT_CONNECTION_ABORT,
  1642. &xprt->state);
  1643. /* "close" the socket, preserving the local port */
  1644. xs_tcp_reuse_connection(xprt, transport);
  1645. if (abort_and_exit)
  1646. goto out_eagain;
  1647. }
  1648. dprintk("RPC: worker connecting xprt %p via %s to "
  1649. "%s (port %s)\n", xprt,
  1650. xprt->address_strings[RPC_DISPLAY_PROTO],
  1651. xprt->address_strings[RPC_DISPLAY_ADDR],
  1652. xprt->address_strings[RPC_DISPLAY_PORT]);
  1653. status = xs_tcp_finish_connecting(xprt, sock);
  1654. dprintk("RPC: %p connect status %d connected %d sock state %d\n",
  1655. xprt, -status, xprt_connected(xprt),
  1656. sock->sk->sk_state);
  1657. switch (status) {
  1658. default:
  1659. printk("%s: connect returned unhandled error %d\n",
  1660. __func__, status);
  1661. case -EADDRNOTAVAIL:
  1662. /* We're probably in TIME_WAIT. Get rid of existing socket,
  1663. * and retry
  1664. */
  1665. set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
  1666. xprt_force_disconnect(xprt);
  1667. break;
  1668. case -ECONNREFUSED:
  1669. case -ECONNRESET:
  1670. case -ENETUNREACH:
  1671. /* retry with existing socket, after a delay */
  1672. case 0:
  1673. case -EINPROGRESS:
  1674. case -EALREADY:
  1675. xprt_clear_connecting(xprt);
  1676. return;
  1677. }
  1678. out_eagain:
  1679. status = -EAGAIN;
  1680. out:
  1681. xprt_clear_connecting(xprt);
  1682. xprt_wake_pending_tasks(xprt, status);
  1683. }
  1684. static struct socket *xs_create_tcp_sock4(struct rpc_xprt *xprt,
  1685. struct sock_xprt *transport)
  1686. {
  1687. struct socket *sock;
  1688. int err;
  1689. /* start from scratch */
  1690. err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
  1691. if (err < 0) {
  1692. dprintk("RPC: can't create TCP transport socket (%d).\n",
  1693. -err);
  1694. goto out_err;
  1695. }
  1696. xs_reclassify_socket4(sock);
  1697. if (xs_bind4(transport, sock) < 0) {
  1698. sock_release(sock);
  1699. goto out_err;
  1700. }
  1701. return sock;
  1702. out_err:
  1703. return ERR_PTR(-EIO);
  1704. }
  1705. /**
  1706. * xs_tcp_connect_worker4 - connect a TCP socket to a remote endpoint
  1707. * @work: RPC transport to connect
  1708. *
  1709. * Invoked by a work queue tasklet.
  1710. */
  1711. static void xs_tcp_connect_worker4(struct work_struct *work)
  1712. {
  1713. struct sock_xprt *transport =
  1714. container_of(work, struct sock_xprt, connect_worker.work);
  1715. struct rpc_xprt *xprt = &transport->xprt;
  1716. xs_tcp_setup_socket(xprt, transport, xs_create_tcp_sock4);
  1717. }
  1718. static struct socket *xs_create_tcp_sock6(struct rpc_xprt *xprt,
  1719. struct sock_xprt *transport)
  1720. {
  1721. struct socket *sock;
  1722. int err;
  1723. /* start from scratch */
  1724. err = sock_create_kern(PF_INET6, SOCK_STREAM, IPPROTO_TCP, &sock);
  1725. if (err < 0) {
  1726. dprintk("RPC: can't create TCP transport socket (%d).\n",
  1727. -err);
  1728. goto out_err;
  1729. }
  1730. xs_reclassify_socket6(sock);
  1731. if (xs_bind6(transport, sock) < 0) {
  1732. sock_release(sock);
  1733. goto out_err;
  1734. }
  1735. return sock;
  1736. out_err:
  1737. return ERR_PTR(-EIO);
  1738. }
  1739. /**
  1740. * xs_tcp_connect_worker6 - connect a TCP socket to a remote endpoint
  1741. * @work: RPC transport to connect
  1742. *
  1743. * Invoked by a work queue tasklet.
  1744. */
  1745. static void xs_tcp_connect_worker6(struct work_struct *work)
  1746. {
  1747. struct sock_xprt *transport =
  1748. container_of(work, struct sock_xprt, connect_worker.work);
  1749. struct rpc_xprt *xprt = &transport->xprt;
  1750. xs_tcp_setup_socket(xprt, transport, xs_create_tcp_sock6);
  1751. }
  1752. /**
  1753. * xs_connect - connect a socket to a remote endpoint
  1754. * @task: address of RPC task that manages state of connect request
  1755. *
  1756. * TCP: If the remote end dropped the connection, delay reconnecting.
  1757. *
  1758. * UDP socket connects are synchronous, but we use a work queue anyway
  1759. * to guarantee that even unprivileged user processes can set up a
  1760. * socket on a privileged port.
  1761. *
  1762. * If a UDP socket connect fails, the delay behavior here prevents
  1763. * retry floods (hard mounts).
  1764. */
  1765. static void xs_connect(struct rpc_task *task)
  1766. {
  1767. struct rpc_xprt *xprt = task->tk_xprt;
  1768. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  1769. if (xprt_test_and_set_connecting(xprt))
  1770. return;
  1771. if (transport->sock != NULL) {
  1772. dprintk("RPC: xs_connect delayed xprt %p for %lu "
  1773. "seconds\n",
  1774. xprt, xprt->reestablish_timeout / HZ);
  1775. queue_delayed_work(rpciod_workqueue,
  1776. &transport->connect_worker,
  1777. xprt->reestablish_timeout);
  1778. xprt->reestablish_timeout <<= 1;
  1779. if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
  1780. xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
  1781. if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
  1782. xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
  1783. } else {
  1784. dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
  1785. queue_delayed_work(rpciod_workqueue,
  1786. &transport->connect_worker, 0);
  1787. }
  1788. }
  1789. static void xs_tcp_connect(struct rpc_task *task)
  1790. {
  1791. struct rpc_xprt *xprt = task->tk_xprt;
  1792. /* Exit if we need to wait for socket shutdown to complete */
  1793. if (test_bit(XPRT_CLOSING, &xprt->state))
  1794. return;
  1795. xs_connect(task);
  1796. }
  1797. /**
  1798. * xs_udp_print_stats - display UDP socket-specifc stats
  1799. * @xprt: rpc_xprt struct containing statistics
  1800. * @seq: output file
  1801. *
  1802. */
  1803. static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
  1804. {
  1805. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  1806. seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %Lu %Lu\n",
  1807. transport->srcport,
  1808. xprt->stat.bind_count,
  1809. xprt->stat.sends,
  1810. xprt->stat.recvs,
  1811. xprt->stat.bad_xids,
  1812. xprt->stat.req_u,
  1813. xprt->stat.bklog_u);
  1814. }
  1815. /**
  1816. * xs_tcp_print_stats - display TCP socket-specifc stats
  1817. * @xprt: rpc_xprt struct containing statistics
  1818. * @seq: output file
  1819. *
  1820. */
  1821. static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
  1822. {
  1823. struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
  1824. long idle_time = 0;
  1825. if (xprt_connected(xprt))
  1826. idle_time = (long)(jiffies - xprt->last_used) / HZ;
  1827. seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu\n",
  1828. transport->srcport,
  1829. xprt->stat.bind_count,
  1830. xprt->stat.connect_count,
  1831. xprt->stat.connect_time,
  1832. idle_time,
  1833. xprt->stat.sends,
  1834. xprt->stat.recvs,
  1835. xprt->stat.bad_xids,
  1836. xprt->stat.req_u,
  1837. xprt->stat.bklog_u);
  1838. }
  1839. /*
  1840. * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
  1841. * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
  1842. * to use the server side send routines.
  1843. */
  1844. void *bc_malloc(struct rpc_task *task, size_t size)
  1845. {
  1846. struct page *page;
  1847. struct rpc_buffer *buf;
  1848. BUG_ON(size > PAGE_SIZE - sizeof(struct rpc_buffer));
  1849. page = alloc_page(GFP_KERNEL);
  1850. if (!page)
  1851. return NULL;
  1852. buf = page_address(page);
  1853. buf->len = PAGE_SIZE;
  1854. return buf->data;
  1855. }
  1856. /*
  1857. * Free the space allocated in the bc_alloc routine
  1858. */
  1859. void bc_free(void *buffer)
  1860. {
  1861. struct rpc_buffer *buf;
  1862. if (!buffer)
  1863. return;
  1864. buf = container_of(buffer, struct rpc_buffer, data);
  1865. free_page((unsigned long)buf);
  1866. }
  1867. /*
  1868. * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
  1869. * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request.
  1870. */
  1871. static int bc_sendto(struct rpc_rqst *req)
  1872. {
  1873. int len;
  1874. struct xdr_buf *xbufp = &req->rq_snd_buf;
  1875. struct rpc_xprt *xprt = req->rq_xprt;
  1876. struct sock_xprt *transport =
  1877. container_of(xprt, struct sock_xprt, xprt);
  1878. struct socket *sock = transport->sock;
  1879. unsigned long headoff;
  1880. unsigned long tailoff;
  1881. /*
  1882. * Set up the rpc header and record marker stuff
  1883. */
  1884. xs_encode_tcp_record_marker(xbufp);
  1885. tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
  1886. headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
  1887. len = svc_send_common(sock, xbufp,
  1888. virt_to_page(xbufp->head[0].iov_base), headoff,
  1889. xbufp->tail[0].iov_base, tailoff);
  1890. if (len != xbufp->len) {
  1891. printk(KERN_NOTICE "Error sending entire callback!\n");
  1892. len = -EAGAIN;
  1893. }
  1894. return len;
  1895. }
  1896. /*
  1897. * The send routine. Borrows from svc_send
  1898. */
  1899. static int bc_send_request(struct rpc_task *task)
  1900. {
  1901. struct rpc_rqst *req = task->tk_rqstp;
  1902. struct svc_xprt *xprt;
  1903. struct svc_sock *svsk;
  1904. u32 len;
  1905. dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
  1906. /*
  1907. * Get the server socket associated with this callback xprt
  1908. */
  1909. xprt = req->rq_xprt->bc_xprt;
  1910. svsk = container_of(xprt, struct svc_sock, sk_xprt);
  1911. /*
  1912. * Grab the mutex to serialize data as the connection is shared
  1913. * with the fore channel
  1914. */
  1915. if (!mutex_trylock(&xprt->xpt_mutex)) {
  1916. rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL);
  1917. if (!mutex_trylock(&xprt->xpt_mutex))
  1918. return -EAGAIN;
  1919. rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task);
  1920. }
  1921. if (test_bit(XPT_DEAD, &xprt->xpt_flags))
  1922. len = -ENOTCONN;
  1923. else
  1924. len = bc_sendto(req);
  1925. mutex_unlock(&xprt->xpt_mutex);
  1926. if (len > 0)
  1927. len = 0;
  1928. return len;
  1929. }
  1930. /*
  1931. * The close routine. Since this is client initiated, we do nothing
  1932. */
  1933. static void bc_close(struct rpc_xprt *xprt)
  1934. {
  1935. return;
  1936. }
  1937. /*
  1938. * The xprt destroy routine. Again, because this connection is client
  1939. * initiated, we do nothing
  1940. */
  1941. static void bc_destroy(struct rpc_xprt *xprt)
  1942. {
  1943. return;
  1944. }
  1945. static struct rpc_xprt_ops xs_udp_ops = {
  1946. .set_buffer_size = xs_udp_set_buffer_size,
  1947. .reserve_xprt = xprt_reserve_xprt_cong,
  1948. .release_xprt = xprt_release_xprt_cong,
  1949. .rpcbind = rpcb_getport_async,
  1950. .set_port = xs_set_port,
  1951. .connect = xs_connect,
  1952. .buf_alloc = rpc_malloc,
  1953. .buf_free = rpc_free,
  1954. .send_request = xs_udp_send_request,
  1955. .set_retrans_timeout = xprt_set_retrans_timeout_rtt,
  1956. .timer = xs_udp_timer,
  1957. .release_request = xprt_release_rqst_cong,
  1958. .close = xs_close,
  1959. .destroy = xs_destroy,
  1960. .print_stats = xs_udp_print_stats,
  1961. };
  1962. static struct rpc_xprt_ops xs_tcp_ops = {
  1963. .reserve_xprt = xprt_reserve_xprt,
  1964. .release_xprt = xs_tcp_release_xprt,
  1965. .rpcbind = rpcb_getport_async,
  1966. .set_port = xs_set_port,
  1967. .connect = xs_tcp_connect,
  1968. .buf_alloc = rpc_malloc,
  1969. .buf_free = rpc_free,
  1970. .send_request = xs_tcp_send_request,
  1971. .set_retrans_timeout = xprt_set_retrans_timeout_def,
  1972. #if defined(CONFIG_NFS_V4_1)
  1973. .release_request = bc_release_request,
  1974. #endif /* CONFIG_NFS_V4_1 */
  1975. .close = xs_tcp_close,
  1976. .destroy = xs_destroy,
  1977. .print_stats = xs_tcp_print_stats,
  1978. };
  1979. /*
  1980. * The rpc_xprt_ops for the server backchannel
  1981. */
  1982. static struct rpc_xprt_ops bc_tcp_ops = {
  1983. .reserve_xprt = xprt_reserve_xprt,
  1984. .release_xprt = xprt_release_xprt,
  1985. .buf_alloc = bc_malloc,
  1986. .buf_free = bc_free,
  1987. .send_request = bc_send_request,
  1988. .set_retrans_timeout = xprt_set_retrans_timeout_def,
  1989. .close = bc_close,
  1990. .destroy = bc_destroy,
  1991. .print_stats = xs_tcp_print_stats,
  1992. };
  1993. static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
  1994. unsigned int slot_table_size)
  1995. {
  1996. struct rpc_xprt *xprt;
  1997. struct sock_xprt *new;
  1998. if (args->addrlen > sizeof(xprt->addr)) {
  1999. dprintk("RPC: xs_setup_xprt: address too large\n");
  2000. return ERR_PTR(-EBADF);
  2001. }
  2002. new = kzalloc(sizeof(*new), GFP_KERNEL);
  2003. if (new == NULL) {
  2004. dprintk("RPC: xs_setup_xprt: couldn't allocate "
  2005. "rpc_xprt\n");
  2006. return ERR_PTR(-ENOMEM);
  2007. }
  2008. xprt = &new->xprt;
  2009. xprt->max_reqs = slot_table_size;
  2010. xprt->slot = kcalloc(xprt->max_reqs, sizeof(struct rpc_rqst), GFP_KERNEL);
  2011. if (xprt->slot == NULL) {
  2012. kfree(xprt);
  2013. dprintk("RPC: xs_setup_xprt: couldn't allocate slot "
  2014. "table\n");
  2015. return ERR_PTR(-ENOMEM);
  2016. }
  2017. memcpy(&xprt->addr, args->dstaddr, args->addrlen);
  2018. xprt->addrlen = args->addrlen;
  2019. if (args->srcaddr)
  2020. memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
  2021. return xprt;
  2022. }
  2023. static const struct rpc_timeout xs_udp_default_timeout = {
  2024. .to_initval = 5 * HZ,
  2025. .to_maxval = 30 * HZ,
  2026. .to_increment = 5 * HZ,
  2027. .to_retries = 5,
  2028. };
  2029. /**
  2030. * xs_setup_udp - Set up transport to use a UDP socket
  2031. * @args: rpc transport creation arguments
  2032. *
  2033. */
  2034. static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
  2035. {
  2036. struct sockaddr *addr = args->dstaddr;
  2037. struct rpc_xprt *xprt;
  2038. struct sock_xprt *transport;
  2039. xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries);
  2040. if (IS_ERR(xprt))
  2041. return xprt;
  2042. transport = container_of(xprt, struct sock_xprt, xprt);
  2043. xprt->prot = IPPROTO_UDP;
  2044. xprt->tsh_size = 0;
  2045. /* XXX: header size can vary due to auth type, IPv6, etc. */
  2046. xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
  2047. xprt->bind_timeout = XS_BIND_TO;
  2048. xprt->connect_timeout = XS_UDP_CONN_TO;
  2049. xprt->reestablish_timeout = XS_UDP_REEST_TO;
  2050. xprt->idle_timeout = XS_IDLE_DISC_TO;
  2051. xprt->ops = &xs_udp_ops;
  2052. xprt->timeout = &xs_udp_default_timeout;
  2053. switch (addr->sa_family) {
  2054. case AF_INET:
  2055. if (((struct sockaddr_in *)addr)->sin_port != htons(0))
  2056. xprt_set_bound(xprt);
  2057. INIT_DELAYED_WORK(&transport->connect_worker,
  2058. xs_udp_connect_worker4);
  2059. xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
  2060. break;
  2061. case AF_INET6:
  2062. if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
  2063. xprt_set_bound(xprt);
  2064. INIT_DELAYED_WORK(&transport->connect_worker,
  2065. xs_udp_connect_worker6);
  2066. xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
  2067. break;
  2068. default:
  2069. kfree(xprt);
  2070. return ERR_PTR(-EAFNOSUPPORT);
  2071. }
  2072. if (xprt_bound(xprt))
  2073. dprintk("RPC: set up xprt to %s (port %s) via %s\n",
  2074. xprt->address_strings[RPC_DISPLAY_ADDR],
  2075. xprt->address_strings[RPC_DISPLAY_PORT],
  2076. xprt->address_strings[RPC_DISPLAY_PROTO]);
  2077. else
  2078. dprintk("RPC: set up xprt to %s (autobind) via %s\n",
  2079. xprt->address_strings[RPC_DISPLAY_ADDR],
  2080. xprt->address_strings[RPC_DISPLAY_PROTO]);
  2081. if (try_module_get(THIS_MODULE))
  2082. return xprt;
  2083. kfree(xprt->slot);
  2084. kfree(xprt);
  2085. return ERR_PTR(-EINVAL);
  2086. }
  2087. static const struct rpc_timeout xs_tcp_default_timeout = {
  2088. .to_initval = 60 * HZ,
  2089. .to_maxval = 60 * HZ,
  2090. .to_retries = 2,
  2091. };
  2092. /**
  2093. * xs_setup_tcp - Set up transport to use a TCP socket
  2094. * @args: rpc transport creation arguments
  2095. *
  2096. */
  2097. static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
  2098. {
  2099. struct sockaddr *addr = args->dstaddr;
  2100. struct rpc_xprt *xprt;
  2101. struct sock_xprt *transport;
  2102. xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
  2103. if (IS_ERR(xprt))
  2104. return xprt;
  2105. transport = container_of(xprt, struct sock_xprt, xprt);
  2106. xprt->prot = IPPROTO_TCP;
  2107. xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
  2108. xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
  2109. xprt->bind_timeout = XS_BIND_TO;
  2110. xprt->connect_timeout = XS_TCP_CONN_TO;
  2111. xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
  2112. xprt->idle_timeout = XS_IDLE_DISC_TO;
  2113. xprt->ops = &xs_tcp_ops;
  2114. xprt->timeout = &xs_tcp_default_timeout;
  2115. switch (addr->sa_family) {
  2116. case AF_INET:
  2117. if (((struct sockaddr_in *)addr)->sin_port != htons(0))
  2118. xprt_set_bound(xprt);
  2119. INIT_DELAYED_WORK(&transport->connect_worker,
  2120. xs_tcp_connect_worker4);
  2121. xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
  2122. break;
  2123. case AF_INET6:
  2124. if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
  2125. xprt_set_bound(xprt);
  2126. INIT_DELAYED_WORK(&transport->connect_worker,
  2127. xs_tcp_connect_worker6);
  2128. xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
  2129. break;
  2130. default:
  2131. kfree(xprt);
  2132. return ERR_PTR(-EAFNOSUPPORT);
  2133. }
  2134. if (xprt_bound(xprt))
  2135. dprintk("RPC: set up xprt to %s (port %s) via %s\n",
  2136. xprt->address_strings[RPC_DISPLAY_ADDR],
  2137. xprt->address_strings[RPC_DISPLAY_PORT],
  2138. xprt->address_strings[RPC_DISPLAY_PROTO]);
  2139. else
  2140. dprintk("RPC: set up xprt to %s (autobind) via %s\n",
  2141. xprt->address_strings[RPC_DISPLAY_ADDR],
  2142. xprt->address_strings[RPC_DISPLAY_PROTO]);
  2143. if (try_module_get(THIS_MODULE))
  2144. return xprt;
  2145. kfree(xprt->slot);
  2146. kfree(xprt);
  2147. return ERR_PTR(-EINVAL);
  2148. }
  2149. /**
  2150. * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
  2151. * @args: rpc transport creation arguments
  2152. *
  2153. */
  2154. static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
  2155. {
  2156. struct sockaddr *addr = args->dstaddr;
  2157. struct rpc_xprt *xprt;
  2158. struct sock_xprt *transport;
  2159. struct svc_sock *bc_sock;
  2160. if (!args->bc_xprt)
  2161. ERR_PTR(-EINVAL);
  2162. xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
  2163. if (IS_ERR(xprt))
  2164. return xprt;
  2165. transport = container_of(xprt, struct sock_xprt, xprt);
  2166. xprt->prot = IPPROTO_TCP;
  2167. xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
  2168. xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
  2169. xprt->timeout = &xs_tcp_default_timeout;
  2170. /* backchannel */
  2171. xprt_set_bound(xprt);
  2172. xprt->bind_timeout = 0;
  2173. xprt->connect_timeout = 0;
  2174. xprt->reestablish_timeout = 0;
  2175. xprt->idle_timeout = 0;
  2176. /*
  2177. * The backchannel uses the same socket connection as the
  2178. * forechannel
  2179. */
  2180. xprt->bc_xprt = args->bc_xprt;
  2181. bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
  2182. bc_sock->sk_bc_xprt = xprt;
  2183. transport->sock = bc_sock->sk_sock;
  2184. transport->inet = bc_sock->sk_sk;
  2185. xprt->ops = &bc_tcp_ops;
  2186. switch (addr->sa_family) {
  2187. case AF_INET:
  2188. xs_format_peer_addresses(xprt, "tcp",
  2189. RPCBIND_NETID_TCP);
  2190. break;
  2191. case AF_INET6:
  2192. xs_format_peer_addresses(xprt, "tcp",
  2193. RPCBIND_NETID_TCP6);
  2194. break;
  2195. default:
  2196. kfree(xprt);
  2197. return ERR_PTR(-EAFNOSUPPORT);
  2198. }
  2199. if (xprt_bound(xprt))
  2200. dprintk("RPC: set up xprt to %s (port %s) via %s\n",
  2201. xprt->address_strings[RPC_DISPLAY_ADDR],
  2202. xprt->address_strings[RPC_DISPLAY_PORT],
  2203. xprt->address_strings[RPC_DISPLAY_PROTO]);
  2204. else
  2205. dprintk("RPC: set up xprt to %s (autobind) via %s\n",
  2206. xprt->address_strings[RPC_DISPLAY_ADDR],
  2207. xprt->address_strings[RPC_DISPLAY_PROTO]);
  2208. /*
  2209. * Since we don't want connections for the backchannel, we set
  2210. * the xprt status to connected
  2211. */
  2212. xprt_set_connected(xprt);
  2213. if (try_module_get(THIS_MODULE))
  2214. return xprt;
  2215. kfree(xprt->slot);
  2216. kfree(xprt);
  2217. return ERR_PTR(-EINVAL);
  2218. }
  2219. static struct xprt_class xs_udp_transport = {
  2220. .list = LIST_HEAD_INIT(xs_udp_transport.list),
  2221. .name = "udp",
  2222. .owner = THIS_MODULE,
  2223. .ident = XPRT_TRANSPORT_UDP,
  2224. .setup = xs_setup_udp,
  2225. };
  2226. static struct xprt_class xs_tcp_transport = {
  2227. .list = LIST_HEAD_INIT(xs_tcp_transport.list),
  2228. .name = "tcp",
  2229. .owner = THIS_MODULE,
  2230. .ident = XPRT_TRANSPORT_TCP,
  2231. .setup = xs_setup_tcp,
  2232. };
  2233. static struct xprt_class xs_bc_tcp_transport = {
  2234. .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list),
  2235. .name = "tcp NFSv4.1 backchannel",
  2236. .owner = THIS_MODULE,
  2237. .ident = XPRT_TRANSPORT_BC_TCP,
  2238. .setup = xs_setup_bc_tcp,
  2239. };
  2240. /**
  2241. * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
  2242. *
  2243. */
  2244. int init_socket_xprt(void)
  2245. {
  2246. #ifdef RPC_DEBUG
  2247. if (!sunrpc_table_header)
  2248. sunrpc_table_header = register_sysctl_table(sunrpc_table);
  2249. #endif
  2250. xprt_register_transport(&xs_udp_transport);
  2251. xprt_register_transport(&xs_tcp_transport);
  2252. xprt_register_transport(&xs_bc_tcp_transport);
  2253. return 0;
  2254. }
  2255. /**
  2256. * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
  2257. *
  2258. */
  2259. void cleanup_socket_xprt(void)
  2260. {
  2261. #ifdef RPC_DEBUG
  2262. if (sunrpc_table_header) {
  2263. unregister_sysctl_table(sunrpc_table_header);
  2264. sunrpc_table_header = NULL;
  2265. }
  2266. #endif
  2267. xprt_unregister_transport(&xs_udp_transport);
  2268. xprt_unregister_transport(&xs_tcp_transport);
  2269. xprt_unregister_transport(&xs_bc_tcp_transport);
  2270. }
  2271. static int param_set_uint_minmax(const char *val, struct kernel_param *kp,
  2272. unsigned int min, unsigned int max)
  2273. {
  2274. unsigned long num;
  2275. int ret;
  2276. if (!val)
  2277. return -EINVAL;
  2278. ret = strict_strtoul(val, 0, &num);
  2279. if (ret == -EINVAL || num < min || num > max)
  2280. return -EINVAL;
  2281. *((unsigned int *)kp->arg) = num;
  2282. return 0;
  2283. }
  2284. static int param_set_portnr(const char *val, struct kernel_param *kp)
  2285. {
  2286. return param_set_uint_minmax(val, kp,
  2287. RPC_MIN_RESVPORT,
  2288. RPC_MAX_RESVPORT);
  2289. }
  2290. static int param_get_portnr(char *buffer, struct kernel_param *kp)
  2291. {
  2292. return param_get_uint(buffer, kp);
  2293. }
  2294. #define param_check_portnr(name, p) \
  2295. __param_check(name, p, unsigned int);
  2296. module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
  2297. module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
  2298. static int param_set_slot_table_size(const char *val, struct kernel_param *kp)
  2299. {
  2300. return param_set_uint_minmax(val, kp,
  2301. RPC_MIN_SLOT_TABLE,
  2302. RPC_MAX_SLOT_TABLE);
  2303. }
  2304. static int param_get_slot_table_size(char *buffer, struct kernel_param *kp)
  2305. {
  2306. return param_get_uint(buffer, kp);
  2307. }
  2308. #define param_check_slot_table_size(name, p) \
  2309. __param_check(name, p, unsigned int);
  2310. module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
  2311. slot_table_size, 0644);
  2312. module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
  2313. slot_table_size, 0644);