tcp.c 76 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Implementation of the Transmission Control Protocol(TCP).
  7. *
  8. * Authors: Ross Biro
  9. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10. * Mark Evans, <evansmp@uhura.aston.ac.uk>
  11. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  12. * Florian La Roche, <flla@stud.uni-sb.de>
  13. * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  14. * Linus Torvalds, <torvalds@cs.helsinki.fi>
  15. * Alan Cox, <gw4pts@gw4pts.ampr.org>
  16. * Matthew Dillon, <dillon@apollo.west.oic.com>
  17. * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  18. * Jorge Cwik, <jorge@laser.satlink.net>
  19. *
  20. * Fixes:
  21. * Alan Cox : Numerous verify_area() calls
  22. * Alan Cox : Set the ACK bit on a reset
  23. * Alan Cox : Stopped it crashing if it closed while
  24. * sk->inuse=1 and was trying to connect
  25. * (tcp_err()).
  26. * Alan Cox : All icmp error handling was broken
  27. * pointers passed where wrong and the
  28. * socket was looked up backwards. Nobody
  29. * tested any icmp error code obviously.
  30. * Alan Cox : tcp_err() now handled properly. It
  31. * wakes people on errors. poll
  32. * behaves and the icmp error race
  33. * has gone by moving it into sock.c
  34. * Alan Cox : tcp_send_reset() fixed to work for
  35. * everything not just packets for
  36. * unknown sockets.
  37. * Alan Cox : tcp option processing.
  38. * Alan Cox : Reset tweaked (still not 100%) [Had
  39. * syn rule wrong]
  40. * Herp Rosmanith : More reset fixes
  41. * Alan Cox : No longer acks invalid rst frames.
  42. * Acking any kind of RST is right out.
  43. * Alan Cox : Sets an ignore me flag on an rst
  44. * receive otherwise odd bits of prattle
  45. * escape still
  46. * Alan Cox : Fixed another acking RST frame bug.
  47. * Should stop LAN workplace lockups.
  48. * Alan Cox : Some tidyups using the new skb list
  49. * facilities
  50. * Alan Cox : sk->keepopen now seems to work
  51. * Alan Cox : Pulls options out correctly on accepts
  52. * Alan Cox : Fixed assorted sk->rqueue->next errors
  53. * Alan Cox : PSH doesn't end a TCP read. Switched a
  54. * bit to skb ops.
  55. * Alan Cox : Tidied tcp_data to avoid a potential
  56. * nasty.
  57. * Alan Cox : Added some better commenting, as the
  58. * tcp is hard to follow
  59. * Alan Cox : Removed incorrect check for 20 * psh
  60. * Michael O'Reilly : ack < copied bug fix.
  61. * Johannes Stille : Misc tcp fixes (not all in yet).
  62. * Alan Cox : FIN with no memory -> CRASH
  63. * Alan Cox : Added socket option proto entries.
  64. * Also added awareness of them to accept.
  65. * Alan Cox : Added TCP options (SOL_TCP)
  66. * Alan Cox : Switched wakeup calls to callbacks,
  67. * so the kernel can layer network
  68. * sockets.
  69. * Alan Cox : Use ip_tos/ip_ttl settings.
  70. * Alan Cox : Handle FIN (more) properly (we hope).
  71. * Alan Cox : RST frames sent on unsynchronised
  72. * state ack error.
  73. * Alan Cox : Put in missing check for SYN bit.
  74. * Alan Cox : Added tcp_select_window() aka NET2E
  75. * window non shrink trick.
  76. * Alan Cox : Added a couple of small NET2E timer
  77. * fixes
  78. * Charles Hedrick : TCP fixes
  79. * Toomas Tamm : TCP window fixes
  80. * Alan Cox : Small URG fix to rlogin ^C ack fight
  81. * Charles Hedrick : Rewrote most of it to actually work
  82. * Linus : Rewrote tcp_read() and URG handling
  83. * completely
  84. * Gerhard Koerting: Fixed some missing timer handling
  85. * Matthew Dillon : Reworked TCP machine states as per RFC
  86. * Gerhard Koerting: PC/TCP workarounds
  87. * Adam Caldwell : Assorted timer/timing errors
  88. * Matthew Dillon : Fixed another RST bug
  89. * Alan Cox : Move to kernel side addressing changes.
  90. * Alan Cox : Beginning work on TCP fastpathing
  91. * (not yet usable)
  92. * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
  93. * Alan Cox : TCP fast path debugging
  94. * Alan Cox : Window clamping
  95. * Michael Riepe : Bug in tcp_check()
  96. * Matt Dillon : More TCP improvements and RST bug fixes
  97. * Matt Dillon : Yet more small nasties remove from the
  98. * TCP code (Be very nice to this man if
  99. * tcp finally works 100%) 8)
  100. * Alan Cox : BSD accept semantics.
  101. * Alan Cox : Reset on closedown bug.
  102. * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
  103. * Michael Pall : Handle poll() after URG properly in
  104. * all cases.
  105. * Michael Pall : Undo the last fix in tcp_read_urg()
  106. * (multi URG PUSH broke rlogin).
  107. * Michael Pall : Fix the multi URG PUSH problem in
  108. * tcp_readable(), poll() after URG
  109. * works now.
  110. * Michael Pall : recv(...,MSG_OOB) never blocks in the
  111. * BSD api.
  112. * Alan Cox : Changed the semantics of sk->socket to
  113. * fix a race and a signal problem with
  114. * accept() and async I/O.
  115. * Alan Cox : Relaxed the rules on tcp_sendto().
  116. * Yury Shevchuk : Really fixed accept() blocking problem.
  117. * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
  118. * clients/servers which listen in on
  119. * fixed ports.
  120. * Alan Cox : Cleaned the above up and shrank it to
  121. * a sensible code size.
  122. * Alan Cox : Self connect lockup fix.
  123. * Alan Cox : No connect to multicast.
  124. * Ross Biro : Close unaccepted children on master
  125. * socket close.
  126. * Alan Cox : Reset tracing code.
  127. * Alan Cox : Spurious resets on shutdown.
  128. * Alan Cox : Giant 15 minute/60 second timer error
  129. * Alan Cox : Small whoops in polling before an
  130. * accept.
  131. * Alan Cox : Kept the state trace facility since
  132. * it's handy for debugging.
  133. * Alan Cox : More reset handler fixes.
  134. * Alan Cox : Started rewriting the code based on
  135. * the RFC's for other useful protocol
  136. * references see: Comer, KA9Q NOS, and
  137. * for a reference on the difference
  138. * between specifications and how BSD
  139. * works see the 4.4lite source.
  140. * A.N.Kuznetsov : Don't time wait on completion of tidy
  141. * close.
  142. * Linus Torvalds : Fin/Shutdown & copied_seq changes.
  143. * Linus Torvalds : Fixed BSD port reuse to work first syn
  144. * Alan Cox : Reimplemented timers as per the RFC
  145. * and using multiple timers for sanity.
  146. * Alan Cox : Small bug fixes, and a lot of new
  147. * comments.
  148. * Alan Cox : Fixed dual reader crash by locking
  149. * the buffers (much like datagram.c)
  150. * Alan Cox : Fixed stuck sockets in probe. A probe
  151. * now gets fed up of retrying without
  152. * (even a no space) answer.
  153. * Alan Cox : Extracted closing code better
  154. * Alan Cox : Fixed the closing state machine to
  155. * resemble the RFC.
  156. * Alan Cox : More 'per spec' fixes.
  157. * Jorge Cwik : Even faster checksumming.
  158. * Alan Cox : tcp_data() doesn't ack illegal PSH
  159. * only frames. At least one pc tcp stack
  160. * generates them.
  161. * Alan Cox : Cache last socket.
  162. * Alan Cox : Per route irtt.
  163. * Matt Day : poll()->select() match BSD precisely on error
  164. * Alan Cox : New buffers
  165. * Marc Tamsky : Various sk->prot->retransmits and
  166. * sk->retransmits misupdating fixed.
  167. * Fixed tcp_write_timeout: stuck close,
  168. * and TCP syn retries gets used now.
  169. * Mark Yarvis : In tcp_read_wakeup(), don't send an
  170. * ack if state is TCP_CLOSED.
  171. * Alan Cox : Look up device on a retransmit - routes may
  172. * change. Doesn't yet cope with MSS shrink right
  173. * but it's a start!
  174. * Marc Tamsky : Closing in closing fixes.
  175. * Mike Shaver : RFC1122 verifications.
  176. * Alan Cox : rcv_saddr errors.
  177. * Alan Cox : Block double connect().
  178. * Alan Cox : Small hooks for enSKIP.
  179. * Alexey Kuznetsov: Path MTU discovery.
  180. * Alan Cox : Support soft errors.
  181. * Alan Cox : Fix MTU discovery pathological case
  182. * when the remote claims no mtu!
  183. * Marc Tamsky : TCP_CLOSE fix.
  184. * Colin (G3TNE) : Send a reset on syn ack replies in
  185. * window but wrong (fixes NT lpd problems)
  186. * Pedro Roque : Better TCP window handling, delayed ack.
  187. * Joerg Reuter : No modification of locked buffers in
  188. * tcp_do_retransmit()
  189. * Eric Schenk : Changed receiver side silly window
  190. * avoidance algorithm to BSD style
  191. * algorithm. This doubles throughput
  192. * against machines running Solaris,
  193. * and seems to result in general
  194. * improvement.
  195. * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
  196. * Willy Konynenberg : Transparent proxying support.
  197. * Mike McLagan : Routing by source
  198. * Keith Owens : Do proper merging with partial SKB's in
  199. * tcp_do_sendmsg to avoid burstiness.
  200. * Eric Schenk : Fix fast close down bug with
  201. * shutdown() followed by close().
  202. * Andi Kleen : Make poll agree with SIGIO
  203. * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
  204. * lingertime == 0 (RFC 793 ABORT Call)
  205. * Hirokazu Takahashi : Use copy_from_user() instead of
  206. * csum_and_copy_from_user() if possible.
  207. *
  208. * This program is free software; you can redistribute it and/or
  209. * modify it under the terms of the GNU General Public License
  210. * as published by the Free Software Foundation; either version
  211. * 2 of the License, or(at your option) any later version.
  212. *
  213. * Description of States:
  214. *
  215. * TCP_SYN_SENT sent a connection request, waiting for ack
  216. *
  217. * TCP_SYN_RECV received a connection request, sent ack,
  218. * waiting for final ack in three-way handshake.
  219. *
  220. * TCP_ESTABLISHED connection established
  221. *
  222. * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
  223. * transmission of remaining buffered data
  224. *
  225. * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
  226. * to shutdown
  227. *
  228. * TCP_CLOSING both sides have shutdown but we still have
  229. * data we have to finish sending
  230. *
  231. * TCP_TIME_WAIT timeout to catch resent junk before entering
  232. * closed, can only be entered from FIN_WAIT2
  233. * or CLOSING. Required because the other end
  234. * may not have gotten our last ACK causing it
  235. * to retransmit the data packet (which we ignore)
  236. *
  237. * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
  238. * us to finish writing our data and to shutdown
  239. * (we have to close() to move on to LAST_ACK)
  240. *
  241. * TCP_LAST_ACK out side has shutdown after remote has
  242. * shutdown. There may still be data in our
  243. * buffer that we have to finish sending
  244. *
  245. * TCP_CLOSE socket is finished
  246. */
  247. #include <linux/kernel.h>
  248. #include <linux/module.h>
  249. #include <linux/types.h>
  250. #include <linux/fcntl.h>
  251. #include <linux/poll.h>
  252. #include <linux/init.h>
  253. #include <linux/fs.h>
  254. #include <linux/skbuff.h>
  255. #include <linux/scatterlist.h>
  256. #include <linux/splice.h>
  257. #include <linux/net.h>
  258. #include <linux/socket.h>
  259. #include <linux/random.h>
  260. #include <linux/bootmem.h>
  261. #include <linux/highmem.h>
  262. #include <linux/swap.h>
  263. #include <linux/cache.h>
  264. #include <linux/err.h>
  265. #include <linux/crypto.h>
  266. #include <net/icmp.h>
  267. #include <net/tcp.h>
  268. #include <net/xfrm.h>
  269. #include <net/ip.h>
  270. #include <net/netdma.h>
  271. #include <net/sock.h>
  272. #include <asm/uaccess.h>
  273. #include <asm/ioctls.h>
  274. int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
  275. struct percpu_counter tcp_orphan_count;
  276. EXPORT_SYMBOL_GPL(tcp_orphan_count);
  277. int sysctl_tcp_mem[3] __read_mostly;
  278. int sysctl_tcp_wmem[3] __read_mostly;
  279. int sysctl_tcp_rmem[3] __read_mostly;
  280. EXPORT_SYMBOL(sysctl_tcp_mem);
  281. EXPORT_SYMBOL(sysctl_tcp_rmem);
  282. EXPORT_SYMBOL(sysctl_tcp_wmem);
  283. atomic_t tcp_memory_allocated; /* Current allocated memory. */
  284. EXPORT_SYMBOL(tcp_memory_allocated);
  285. /*
  286. * Current number of TCP sockets.
  287. */
  288. struct percpu_counter tcp_sockets_allocated;
  289. EXPORT_SYMBOL(tcp_sockets_allocated);
  290. /*
  291. * TCP splice context
  292. */
  293. struct tcp_splice_state {
  294. struct pipe_inode_info *pipe;
  295. size_t len;
  296. unsigned int flags;
  297. };
  298. /*
  299. * Pressure flag: try to collapse.
  300. * Technical note: it is used by multiple contexts non atomically.
  301. * All the __sk_mem_schedule() is of this nature: accounting
  302. * is strict, actions are advisory and have some latency.
  303. */
  304. int tcp_memory_pressure __read_mostly;
  305. EXPORT_SYMBOL(tcp_memory_pressure);
  306. void tcp_enter_memory_pressure(struct sock *sk)
  307. {
  308. if (!tcp_memory_pressure) {
  309. NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
  310. tcp_memory_pressure = 1;
  311. }
  312. }
  313. EXPORT_SYMBOL(tcp_enter_memory_pressure);
  314. /* Convert seconds to retransmits based on initial and max timeout */
  315. static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
  316. {
  317. u8 res = 0;
  318. if (seconds > 0) {
  319. int period = timeout;
  320. res = 1;
  321. while (seconds > period && res < 255) {
  322. res++;
  323. timeout <<= 1;
  324. if (timeout > rto_max)
  325. timeout = rto_max;
  326. period += timeout;
  327. }
  328. }
  329. return res;
  330. }
  331. /* Convert retransmits to seconds based on initial and max timeout */
  332. static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
  333. {
  334. int period = 0;
  335. if (retrans > 0) {
  336. period = timeout;
  337. while (--retrans) {
  338. timeout <<= 1;
  339. if (timeout > rto_max)
  340. timeout = rto_max;
  341. period += timeout;
  342. }
  343. }
  344. return period;
  345. }
  346. /*
  347. * Wait for a TCP event.
  348. *
  349. * Note that we don't need to lock the socket, as the upper poll layers
  350. * take care of normal races (between the test and the event) and we don't
  351. * go look at any of the socket buffers directly.
  352. */
  353. unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
  354. {
  355. unsigned int mask;
  356. struct sock *sk = sock->sk;
  357. struct tcp_sock *tp = tcp_sk(sk);
  358. sock_poll_wait(file, sk->sk_sleep, wait);
  359. if (sk->sk_state == TCP_LISTEN)
  360. return inet_csk_listen_poll(sk);
  361. /* Socket is not locked. We are protected from async events
  362. * by poll logic and correct handling of state changes
  363. * made by other threads is impossible in any case.
  364. */
  365. mask = 0;
  366. if (sk->sk_err)
  367. mask = POLLERR;
  368. /*
  369. * POLLHUP is certainly not done right. But poll() doesn't
  370. * have a notion of HUP in just one direction, and for a
  371. * socket the read side is more interesting.
  372. *
  373. * Some poll() documentation says that POLLHUP is incompatible
  374. * with the POLLOUT/POLLWR flags, so somebody should check this
  375. * all. But careful, it tends to be safer to return too many
  376. * bits than too few, and you can easily break real applications
  377. * if you don't tell them that something has hung up!
  378. *
  379. * Check-me.
  380. *
  381. * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
  382. * our fs/select.c). It means that after we received EOF,
  383. * poll always returns immediately, making impossible poll() on write()
  384. * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
  385. * if and only if shutdown has been made in both directions.
  386. * Actually, it is interesting to look how Solaris and DUX
  387. * solve this dilemma. I would prefer, if POLLHUP were maskable,
  388. * then we could set it on SND_SHUTDOWN. BTW examples given
  389. * in Stevens' books assume exactly this behaviour, it explains
  390. * why POLLHUP is incompatible with POLLOUT. --ANK
  391. *
  392. * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
  393. * blocking on fresh not-connected or disconnected socket. --ANK
  394. */
  395. if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
  396. mask |= POLLHUP;
  397. if (sk->sk_shutdown & RCV_SHUTDOWN)
  398. mask |= POLLIN | POLLRDNORM | POLLRDHUP;
  399. /* Connected? */
  400. if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
  401. int target = sock_rcvlowat(sk, 0, INT_MAX);
  402. if (tp->urg_seq == tp->copied_seq &&
  403. !sock_flag(sk, SOCK_URGINLINE) &&
  404. tp->urg_data)
  405. target--;
  406. /* Potential race condition. If read of tp below will
  407. * escape above sk->sk_state, we can be illegally awaken
  408. * in SYN_* states. */
  409. if (tp->rcv_nxt - tp->copied_seq >= target)
  410. mask |= POLLIN | POLLRDNORM;
  411. if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
  412. if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
  413. mask |= POLLOUT | POLLWRNORM;
  414. } else { /* send SIGIO later */
  415. set_bit(SOCK_ASYNC_NOSPACE,
  416. &sk->sk_socket->flags);
  417. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  418. /* Race breaker. If space is freed after
  419. * wspace test but before the flags are set,
  420. * IO signal will be lost.
  421. */
  422. if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
  423. mask |= POLLOUT | POLLWRNORM;
  424. }
  425. }
  426. if (tp->urg_data & TCP_URG_VALID)
  427. mask |= POLLPRI;
  428. }
  429. return mask;
  430. }
  431. int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
  432. {
  433. struct tcp_sock *tp = tcp_sk(sk);
  434. int answ;
  435. switch (cmd) {
  436. case SIOCINQ:
  437. if (sk->sk_state == TCP_LISTEN)
  438. return -EINVAL;
  439. lock_sock(sk);
  440. if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
  441. answ = 0;
  442. else if (sock_flag(sk, SOCK_URGINLINE) ||
  443. !tp->urg_data ||
  444. before(tp->urg_seq, tp->copied_seq) ||
  445. !before(tp->urg_seq, tp->rcv_nxt)) {
  446. struct sk_buff *skb;
  447. answ = tp->rcv_nxt - tp->copied_seq;
  448. /* Subtract 1, if FIN is in queue. */
  449. skb = skb_peek_tail(&sk->sk_receive_queue);
  450. if (answ && skb)
  451. answ -= tcp_hdr(skb)->fin;
  452. } else
  453. answ = tp->urg_seq - tp->copied_seq;
  454. release_sock(sk);
  455. break;
  456. case SIOCATMARK:
  457. answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
  458. break;
  459. case SIOCOUTQ:
  460. if (sk->sk_state == TCP_LISTEN)
  461. return -EINVAL;
  462. if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
  463. answ = 0;
  464. else
  465. answ = tp->write_seq - tp->snd_una;
  466. break;
  467. default:
  468. return -ENOIOCTLCMD;
  469. }
  470. return put_user(answ, (int __user *)arg);
  471. }
  472. static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
  473. {
  474. TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
  475. tp->pushed_seq = tp->write_seq;
  476. }
  477. static inline int forced_push(struct tcp_sock *tp)
  478. {
  479. return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
  480. }
  481. static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
  482. {
  483. struct tcp_sock *tp = tcp_sk(sk);
  484. struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
  485. skb->csum = 0;
  486. tcb->seq = tcb->end_seq = tp->write_seq;
  487. tcb->flags = TCPCB_FLAG_ACK;
  488. tcb->sacked = 0;
  489. skb_header_release(skb);
  490. tcp_add_write_queue_tail(sk, skb);
  491. sk->sk_wmem_queued += skb->truesize;
  492. sk_mem_charge(sk, skb->truesize);
  493. if (tp->nonagle & TCP_NAGLE_PUSH)
  494. tp->nonagle &= ~TCP_NAGLE_PUSH;
  495. }
  496. static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
  497. struct sk_buff *skb)
  498. {
  499. if (flags & MSG_OOB)
  500. tp->snd_up = tp->write_seq;
  501. }
  502. static inline void tcp_push(struct sock *sk, int flags, int mss_now,
  503. int nonagle)
  504. {
  505. struct tcp_sock *tp = tcp_sk(sk);
  506. if (tcp_send_head(sk)) {
  507. struct sk_buff *skb = tcp_write_queue_tail(sk);
  508. if (!(flags & MSG_MORE) || forced_push(tp))
  509. tcp_mark_push(tp, skb);
  510. tcp_mark_urg(tp, flags, skb);
  511. __tcp_push_pending_frames(sk, mss_now,
  512. (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
  513. }
  514. }
  515. static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
  516. unsigned int offset, size_t len)
  517. {
  518. struct tcp_splice_state *tss = rd_desc->arg.data;
  519. int ret;
  520. ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
  521. tss->flags);
  522. if (ret > 0)
  523. rd_desc->count -= ret;
  524. return ret;
  525. }
  526. static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
  527. {
  528. /* Store TCP splice context information in read_descriptor_t. */
  529. read_descriptor_t rd_desc = {
  530. .arg.data = tss,
  531. .count = tss->len,
  532. };
  533. return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
  534. }
  535. /**
  536. * tcp_splice_read - splice data from TCP socket to a pipe
  537. * @sock: socket to splice from
  538. * @ppos: position (not valid)
  539. * @pipe: pipe to splice to
  540. * @len: number of bytes to splice
  541. * @flags: splice modifier flags
  542. *
  543. * Description:
  544. * Will read pages from given socket and fill them into a pipe.
  545. *
  546. **/
  547. ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
  548. struct pipe_inode_info *pipe, size_t len,
  549. unsigned int flags)
  550. {
  551. struct sock *sk = sock->sk;
  552. struct tcp_splice_state tss = {
  553. .pipe = pipe,
  554. .len = len,
  555. .flags = flags,
  556. };
  557. long timeo;
  558. ssize_t spliced;
  559. int ret;
  560. /*
  561. * We can't seek on a socket input
  562. */
  563. if (unlikely(*ppos))
  564. return -ESPIPE;
  565. ret = spliced = 0;
  566. lock_sock(sk);
  567. timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
  568. while (tss.len) {
  569. ret = __tcp_splice_read(sk, &tss);
  570. if (ret < 0)
  571. break;
  572. else if (!ret) {
  573. if (spliced)
  574. break;
  575. if (sock_flag(sk, SOCK_DONE))
  576. break;
  577. if (sk->sk_err) {
  578. ret = sock_error(sk);
  579. break;
  580. }
  581. if (sk->sk_shutdown & RCV_SHUTDOWN)
  582. break;
  583. if (sk->sk_state == TCP_CLOSE) {
  584. /*
  585. * This occurs when user tries to read
  586. * from never connected socket.
  587. */
  588. if (!sock_flag(sk, SOCK_DONE))
  589. ret = -ENOTCONN;
  590. break;
  591. }
  592. if (!timeo) {
  593. ret = -EAGAIN;
  594. break;
  595. }
  596. sk_wait_data(sk, &timeo);
  597. if (signal_pending(current)) {
  598. ret = sock_intr_errno(timeo);
  599. break;
  600. }
  601. continue;
  602. }
  603. tss.len -= ret;
  604. spliced += ret;
  605. if (!timeo)
  606. break;
  607. release_sock(sk);
  608. lock_sock(sk);
  609. if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
  610. (sk->sk_shutdown & RCV_SHUTDOWN) ||
  611. signal_pending(current))
  612. break;
  613. }
  614. release_sock(sk);
  615. if (spliced)
  616. return spliced;
  617. return ret;
  618. }
  619. struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
  620. {
  621. struct sk_buff *skb;
  622. /* The TCP header must be at least 32-bit aligned. */
  623. size = ALIGN(size, 4);
  624. skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
  625. if (skb) {
  626. if (sk_wmem_schedule(sk, skb->truesize)) {
  627. /*
  628. * Make sure that we have exactly size bytes
  629. * available to the caller, no more, no less.
  630. */
  631. skb_reserve(skb, skb_tailroom(skb) - size);
  632. return skb;
  633. }
  634. __kfree_skb(skb);
  635. } else {
  636. sk->sk_prot->enter_memory_pressure(sk);
  637. sk_stream_moderate_sndbuf(sk);
  638. }
  639. return NULL;
  640. }
  641. static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
  642. int large_allowed)
  643. {
  644. struct tcp_sock *tp = tcp_sk(sk);
  645. u32 xmit_size_goal, old_size_goal;
  646. xmit_size_goal = mss_now;
  647. if (large_allowed && sk_can_gso(sk)) {
  648. xmit_size_goal = ((sk->sk_gso_max_size - 1) -
  649. inet_csk(sk)->icsk_af_ops->net_header_len -
  650. inet_csk(sk)->icsk_ext_hdr_len -
  651. tp->tcp_header_len);
  652. xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
  653. /* We try hard to avoid divides here */
  654. old_size_goal = tp->xmit_size_goal_segs * mss_now;
  655. if (likely(old_size_goal <= xmit_size_goal &&
  656. old_size_goal + mss_now > xmit_size_goal)) {
  657. xmit_size_goal = old_size_goal;
  658. } else {
  659. tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
  660. xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
  661. }
  662. }
  663. return max(xmit_size_goal, mss_now);
  664. }
  665. static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
  666. {
  667. int mss_now;
  668. mss_now = tcp_current_mss(sk);
  669. *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
  670. return mss_now;
  671. }
  672. static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
  673. size_t psize, int flags)
  674. {
  675. struct tcp_sock *tp = tcp_sk(sk);
  676. int mss_now, size_goal;
  677. int err;
  678. ssize_t copied;
  679. long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  680. /* Wait for a connection to finish. */
  681. if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
  682. if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
  683. goto out_err;
  684. clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
  685. mss_now = tcp_send_mss(sk, &size_goal, flags);
  686. copied = 0;
  687. err = -EPIPE;
  688. if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
  689. goto out_err;
  690. while (psize > 0) {
  691. struct sk_buff *skb = tcp_write_queue_tail(sk);
  692. struct page *page = pages[poffset / PAGE_SIZE];
  693. int copy, i, can_coalesce;
  694. int offset = poffset % PAGE_SIZE;
  695. int size = min_t(size_t, psize, PAGE_SIZE - offset);
  696. if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
  697. new_segment:
  698. if (!sk_stream_memory_free(sk))
  699. goto wait_for_sndbuf;
  700. skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
  701. if (!skb)
  702. goto wait_for_memory;
  703. skb_entail(sk, skb);
  704. copy = size_goal;
  705. }
  706. if (copy > size)
  707. copy = size;
  708. i = skb_shinfo(skb)->nr_frags;
  709. can_coalesce = skb_can_coalesce(skb, i, page, offset);
  710. if (!can_coalesce && i >= MAX_SKB_FRAGS) {
  711. tcp_mark_push(tp, skb);
  712. goto new_segment;
  713. }
  714. if (!sk_wmem_schedule(sk, copy))
  715. goto wait_for_memory;
  716. if (can_coalesce) {
  717. skb_shinfo(skb)->frags[i - 1].size += copy;
  718. } else {
  719. get_page(page);
  720. skb_fill_page_desc(skb, i, page, offset, copy);
  721. }
  722. skb->len += copy;
  723. skb->data_len += copy;
  724. skb->truesize += copy;
  725. sk->sk_wmem_queued += copy;
  726. sk_mem_charge(sk, copy);
  727. skb->ip_summed = CHECKSUM_PARTIAL;
  728. tp->write_seq += copy;
  729. TCP_SKB_CB(skb)->end_seq += copy;
  730. skb_shinfo(skb)->gso_segs = 0;
  731. if (!copied)
  732. TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
  733. copied += copy;
  734. poffset += copy;
  735. if (!(psize -= copy))
  736. goto out;
  737. if (skb->len < size_goal || (flags & MSG_OOB))
  738. continue;
  739. if (forced_push(tp)) {
  740. tcp_mark_push(tp, skb);
  741. __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
  742. } else if (skb == tcp_send_head(sk))
  743. tcp_push_one(sk, mss_now);
  744. continue;
  745. wait_for_sndbuf:
  746. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  747. wait_for_memory:
  748. if (copied)
  749. tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
  750. if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
  751. goto do_error;
  752. mss_now = tcp_send_mss(sk, &size_goal, flags);
  753. }
  754. out:
  755. if (copied)
  756. tcp_push(sk, flags, mss_now, tp->nonagle);
  757. return copied;
  758. do_error:
  759. if (copied)
  760. goto out;
  761. out_err:
  762. return sk_stream_error(sk, flags, err);
  763. }
  764. ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
  765. size_t size, int flags)
  766. {
  767. ssize_t res;
  768. struct sock *sk = sock->sk;
  769. if (!(sk->sk_route_caps & NETIF_F_SG) ||
  770. !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
  771. return sock_no_sendpage(sock, page, offset, size, flags);
  772. lock_sock(sk);
  773. TCP_CHECK_TIMER(sk);
  774. res = do_tcp_sendpages(sk, &page, offset, size, flags);
  775. TCP_CHECK_TIMER(sk);
  776. release_sock(sk);
  777. return res;
  778. }
  779. #define TCP_PAGE(sk) (sk->sk_sndmsg_page)
  780. #define TCP_OFF(sk) (sk->sk_sndmsg_off)
  781. static inline int select_size(struct sock *sk)
  782. {
  783. struct tcp_sock *tp = tcp_sk(sk);
  784. int tmp = tp->mss_cache;
  785. if (sk->sk_route_caps & NETIF_F_SG) {
  786. if (sk_can_gso(sk))
  787. tmp = 0;
  788. else {
  789. int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
  790. if (tmp >= pgbreak &&
  791. tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
  792. tmp = pgbreak;
  793. }
  794. }
  795. return tmp;
  796. }
  797. int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
  798. size_t size)
  799. {
  800. struct sock *sk = sock->sk;
  801. struct iovec *iov;
  802. struct tcp_sock *tp = tcp_sk(sk);
  803. struct sk_buff *skb;
  804. int iovlen, flags;
  805. int mss_now, size_goal;
  806. int err, copied;
  807. long timeo;
  808. lock_sock(sk);
  809. TCP_CHECK_TIMER(sk);
  810. flags = msg->msg_flags;
  811. timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
  812. /* Wait for a connection to finish. */
  813. if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
  814. if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
  815. goto out_err;
  816. /* This should be in poll */
  817. clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
  818. mss_now = tcp_send_mss(sk, &size_goal, flags);
  819. /* Ok commence sending. */
  820. iovlen = msg->msg_iovlen;
  821. iov = msg->msg_iov;
  822. copied = 0;
  823. err = -EPIPE;
  824. if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
  825. goto out_err;
  826. while (--iovlen >= 0) {
  827. int seglen = iov->iov_len;
  828. unsigned char __user *from = iov->iov_base;
  829. iov++;
  830. while (seglen > 0) {
  831. int copy = 0;
  832. int max = size_goal;
  833. skb = tcp_write_queue_tail(sk);
  834. if (tcp_send_head(sk)) {
  835. if (skb->ip_summed == CHECKSUM_NONE)
  836. max = mss_now;
  837. copy = max - skb->len;
  838. }
  839. if (copy <= 0) {
  840. new_segment:
  841. /* Allocate new segment. If the interface is SG,
  842. * allocate skb fitting to single page.
  843. */
  844. if (!sk_stream_memory_free(sk))
  845. goto wait_for_sndbuf;
  846. skb = sk_stream_alloc_skb(sk, select_size(sk),
  847. sk->sk_allocation);
  848. if (!skb)
  849. goto wait_for_memory;
  850. /*
  851. * Check whether we can use HW checksum.
  852. */
  853. if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
  854. skb->ip_summed = CHECKSUM_PARTIAL;
  855. skb_entail(sk, skb);
  856. copy = size_goal;
  857. max = size_goal;
  858. }
  859. /* Try to append data to the end of skb. */
  860. if (copy > seglen)
  861. copy = seglen;
  862. /* Where to copy to? */
  863. if (skb_tailroom(skb) > 0) {
  864. /* We have some space in skb head. Superb! */
  865. if (copy > skb_tailroom(skb))
  866. copy = skb_tailroom(skb);
  867. if ((err = skb_add_data(skb, from, copy)) != 0)
  868. goto do_fault;
  869. } else {
  870. int merge = 0;
  871. int i = skb_shinfo(skb)->nr_frags;
  872. struct page *page = TCP_PAGE(sk);
  873. int off = TCP_OFF(sk);
  874. if (skb_can_coalesce(skb, i, page, off) &&
  875. off != PAGE_SIZE) {
  876. /* We can extend the last page
  877. * fragment. */
  878. merge = 1;
  879. } else if (i == MAX_SKB_FRAGS ||
  880. (!i &&
  881. !(sk->sk_route_caps & NETIF_F_SG))) {
  882. /* Need to add new fragment and cannot
  883. * do this because interface is non-SG,
  884. * or because all the page slots are
  885. * busy. */
  886. tcp_mark_push(tp, skb);
  887. goto new_segment;
  888. } else if (page) {
  889. if (off == PAGE_SIZE) {
  890. put_page(page);
  891. TCP_PAGE(sk) = page = NULL;
  892. off = 0;
  893. }
  894. } else
  895. off = 0;
  896. if (copy > PAGE_SIZE - off)
  897. copy = PAGE_SIZE - off;
  898. if (!sk_wmem_schedule(sk, copy))
  899. goto wait_for_memory;
  900. if (!page) {
  901. /* Allocate new cache page. */
  902. if (!(page = sk_stream_alloc_page(sk)))
  903. goto wait_for_memory;
  904. }
  905. /* Time to copy data. We are close to
  906. * the end! */
  907. err = skb_copy_to_page(sk, from, skb, page,
  908. off, copy);
  909. if (err) {
  910. /* If this page was new, give it to the
  911. * socket so it does not get leaked.
  912. */
  913. if (!TCP_PAGE(sk)) {
  914. TCP_PAGE(sk) = page;
  915. TCP_OFF(sk) = 0;
  916. }
  917. goto do_error;
  918. }
  919. /* Update the skb. */
  920. if (merge) {
  921. skb_shinfo(skb)->frags[i - 1].size +=
  922. copy;
  923. } else {
  924. skb_fill_page_desc(skb, i, page, off, copy);
  925. if (TCP_PAGE(sk)) {
  926. get_page(page);
  927. } else if (off + copy < PAGE_SIZE) {
  928. get_page(page);
  929. TCP_PAGE(sk) = page;
  930. }
  931. }
  932. TCP_OFF(sk) = off + copy;
  933. }
  934. if (!copied)
  935. TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
  936. tp->write_seq += copy;
  937. TCP_SKB_CB(skb)->end_seq += copy;
  938. skb_shinfo(skb)->gso_segs = 0;
  939. from += copy;
  940. copied += copy;
  941. if ((seglen -= copy) == 0 && iovlen == 0)
  942. goto out;
  943. if (skb->len < max || (flags & MSG_OOB))
  944. continue;
  945. if (forced_push(tp)) {
  946. tcp_mark_push(tp, skb);
  947. __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
  948. } else if (skb == tcp_send_head(sk))
  949. tcp_push_one(sk, mss_now);
  950. continue;
  951. wait_for_sndbuf:
  952. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  953. wait_for_memory:
  954. if (copied)
  955. tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
  956. if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
  957. goto do_error;
  958. mss_now = tcp_send_mss(sk, &size_goal, flags);
  959. }
  960. }
  961. out:
  962. if (copied)
  963. tcp_push(sk, flags, mss_now, tp->nonagle);
  964. TCP_CHECK_TIMER(sk);
  965. release_sock(sk);
  966. return copied;
  967. do_fault:
  968. if (!skb->len) {
  969. tcp_unlink_write_queue(skb, sk);
  970. /* It is the one place in all of TCP, except connection
  971. * reset, where we can be unlinking the send_head.
  972. */
  973. tcp_check_send_head(sk, skb);
  974. sk_wmem_free_skb(sk, skb);
  975. }
  976. do_error:
  977. if (copied)
  978. goto out;
  979. out_err:
  980. err = sk_stream_error(sk, flags, err);
  981. TCP_CHECK_TIMER(sk);
  982. release_sock(sk);
  983. return err;
  984. }
  985. /*
  986. * Handle reading urgent data. BSD has very simple semantics for
  987. * this, no blocking and very strange errors 8)
  988. */
  989. static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
  990. {
  991. struct tcp_sock *tp = tcp_sk(sk);
  992. /* No URG data to read. */
  993. if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
  994. tp->urg_data == TCP_URG_READ)
  995. return -EINVAL; /* Yes this is right ! */
  996. if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
  997. return -ENOTCONN;
  998. if (tp->urg_data & TCP_URG_VALID) {
  999. int err = 0;
  1000. char c = tp->urg_data;
  1001. if (!(flags & MSG_PEEK))
  1002. tp->urg_data = TCP_URG_READ;
  1003. /* Read urgent data. */
  1004. msg->msg_flags |= MSG_OOB;
  1005. if (len > 0) {
  1006. if (!(flags & MSG_TRUNC))
  1007. err = memcpy_toiovec(msg->msg_iov, &c, 1);
  1008. len = 1;
  1009. } else
  1010. msg->msg_flags |= MSG_TRUNC;
  1011. return err ? -EFAULT : len;
  1012. }
  1013. if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
  1014. return 0;
  1015. /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
  1016. * the available implementations agree in this case:
  1017. * this call should never block, independent of the
  1018. * blocking state of the socket.
  1019. * Mike <pall@rz.uni-karlsruhe.de>
  1020. */
  1021. return -EAGAIN;
  1022. }
  1023. /* Clean up the receive buffer for full frames taken by the user,
  1024. * then send an ACK if necessary. COPIED is the number of bytes
  1025. * tcp_recvmsg has given to the user so far, it speeds up the
  1026. * calculation of whether or not we must ACK for the sake of
  1027. * a window update.
  1028. */
  1029. void tcp_cleanup_rbuf(struct sock *sk, int copied)
  1030. {
  1031. struct tcp_sock *tp = tcp_sk(sk);
  1032. int time_to_ack = 0;
  1033. #if TCP_DEBUG
  1034. struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
  1035. WARN_ON(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
  1036. #endif
  1037. if (inet_csk_ack_scheduled(sk)) {
  1038. const struct inet_connection_sock *icsk = inet_csk(sk);
  1039. /* Delayed ACKs frequently hit locked sockets during bulk
  1040. * receive. */
  1041. if (icsk->icsk_ack.blocked ||
  1042. /* Once-per-two-segments ACK was not sent by tcp_input.c */
  1043. tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
  1044. /*
  1045. * If this read emptied read buffer, we send ACK, if
  1046. * connection is not bidirectional, user drained
  1047. * receive buffer and there was a small segment
  1048. * in queue.
  1049. */
  1050. (copied > 0 &&
  1051. ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
  1052. ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
  1053. !icsk->icsk_ack.pingpong)) &&
  1054. !atomic_read(&sk->sk_rmem_alloc)))
  1055. time_to_ack = 1;
  1056. }
  1057. /* We send an ACK if we can now advertise a non-zero window
  1058. * which has been raised "significantly".
  1059. *
  1060. * Even if window raised up to infinity, do not send window open ACK
  1061. * in states, where we will not receive more. It is useless.
  1062. */
  1063. if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
  1064. __u32 rcv_window_now = tcp_receive_window(tp);
  1065. /* Optimize, __tcp_select_window() is not cheap. */
  1066. if (2*rcv_window_now <= tp->window_clamp) {
  1067. __u32 new_window = __tcp_select_window(sk);
  1068. /* Send ACK now, if this read freed lots of space
  1069. * in our buffer. Certainly, new_window is new window.
  1070. * We can advertise it now, if it is not less than current one.
  1071. * "Lots" means "at least twice" here.
  1072. */
  1073. if (new_window && new_window >= 2 * rcv_window_now)
  1074. time_to_ack = 1;
  1075. }
  1076. }
  1077. if (time_to_ack)
  1078. tcp_send_ack(sk);
  1079. }
  1080. static void tcp_prequeue_process(struct sock *sk)
  1081. {
  1082. struct sk_buff *skb;
  1083. struct tcp_sock *tp = tcp_sk(sk);
  1084. NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
  1085. /* RX process wants to run with disabled BHs, though it is not
  1086. * necessary */
  1087. local_bh_disable();
  1088. while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
  1089. sk_backlog_rcv(sk, skb);
  1090. local_bh_enable();
  1091. /* Clear memory counter. */
  1092. tp->ucopy.memory = 0;
  1093. }
  1094. static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
  1095. {
  1096. struct sk_buff *skb;
  1097. u32 offset;
  1098. skb_queue_walk(&sk->sk_receive_queue, skb) {
  1099. offset = seq - TCP_SKB_CB(skb)->seq;
  1100. if (tcp_hdr(skb)->syn)
  1101. offset--;
  1102. if (offset < skb->len || tcp_hdr(skb)->fin) {
  1103. *off = offset;
  1104. return skb;
  1105. }
  1106. }
  1107. return NULL;
  1108. }
  1109. /*
  1110. * This routine provides an alternative to tcp_recvmsg() for routines
  1111. * that would like to handle copying from skbuffs directly in 'sendfile'
  1112. * fashion.
  1113. * Note:
  1114. * - It is assumed that the socket was locked by the caller.
  1115. * - The routine does not block.
  1116. * - At present, there is no support for reading OOB data
  1117. * or for 'peeking' the socket using this routine
  1118. * (although both would be easy to implement).
  1119. */
  1120. int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
  1121. sk_read_actor_t recv_actor)
  1122. {
  1123. struct sk_buff *skb;
  1124. struct tcp_sock *tp = tcp_sk(sk);
  1125. u32 seq = tp->copied_seq;
  1126. u32 offset;
  1127. int copied = 0;
  1128. if (sk->sk_state == TCP_LISTEN)
  1129. return -ENOTCONN;
  1130. while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
  1131. if (offset < skb->len) {
  1132. int used;
  1133. size_t len;
  1134. len = skb->len - offset;
  1135. /* Stop reading if we hit a patch of urgent data */
  1136. if (tp->urg_data) {
  1137. u32 urg_offset = tp->urg_seq - seq;
  1138. if (urg_offset < len)
  1139. len = urg_offset;
  1140. if (!len)
  1141. break;
  1142. }
  1143. used = recv_actor(desc, skb, offset, len);
  1144. if (used < 0) {
  1145. if (!copied)
  1146. copied = used;
  1147. break;
  1148. } else if (used <= len) {
  1149. seq += used;
  1150. copied += used;
  1151. offset += used;
  1152. }
  1153. /*
  1154. * If recv_actor drops the lock (e.g. TCP splice
  1155. * receive) the skb pointer might be invalid when
  1156. * getting here: tcp_collapse might have deleted it
  1157. * while aggregating skbs from the socket queue.
  1158. */
  1159. skb = tcp_recv_skb(sk, seq-1, &offset);
  1160. if (!skb || (offset+1 != skb->len))
  1161. break;
  1162. }
  1163. if (tcp_hdr(skb)->fin) {
  1164. sk_eat_skb(sk, skb, 0);
  1165. ++seq;
  1166. break;
  1167. }
  1168. sk_eat_skb(sk, skb, 0);
  1169. if (!desc->count)
  1170. break;
  1171. }
  1172. tp->copied_seq = seq;
  1173. tcp_rcv_space_adjust(sk);
  1174. /* Clean up data we have read: This will do ACK frames. */
  1175. if (copied > 0)
  1176. tcp_cleanup_rbuf(sk, copied);
  1177. return copied;
  1178. }
  1179. /*
  1180. * This routine copies from a sock struct into the user buffer.
  1181. *
  1182. * Technical note: in 2.3 we work on _locked_ socket, so that
  1183. * tricks with *seq access order and skb->users are not required.
  1184. * Probably, code can be easily improved even more.
  1185. */
  1186. int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
  1187. size_t len, int nonblock, int flags, int *addr_len)
  1188. {
  1189. struct tcp_sock *tp = tcp_sk(sk);
  1190. int copied = 0;
  1191. u32 peek_seq;
  1192. u32 *seq;
  1193. unsigned long used;
  1194. int err;
  1195. int target; /* Read at least this many bytes */
  1196. long timeo;
  1197. struct task_struct *user_recv = NULL;
  1198. int copied_early = 0;
  1199. struct sk_buff *skb;
  1200. u32 urg_hole = 0;
  1201. lock_sock(sk);
  1202. TCP_CHECK_TIMER(sk);
  1203. err = -ENOTCONN;
  1204. if (sk->sk_state == TCP_LISTEN)
  1205. goto out;
  1206. timeo = sock_rcvtimeo(sk, nonblock);
  1207. /* Urgent data needs to be handled specially. */
  1208. if (flags & MSG_OOB)
  1209. goto recv_urg;
  1210. seq = &tp->copied_seq;
  1211. if (flags & MSG_PEEK) {
  1212. peek_seq = tp->copied_seq;
  1213. seq = &peek_seq;
  1214. }
  1215. target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
  1216. #ifdef CONFIG_NET_DMA
  1217. tp->ucopy.dma_chan = NULL;
  1218. preempt_disable();
  1219. skb = skb_peek_tail(&sk->sk_receive_queue);
  1220. {
  1221. int available = 0;
  1222. if (skb)
  1223. available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
  1224. if ((available < target) &&
  1225. (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
  1226. !sysctl_tcp_low_latency &&
  1227. dma_find_channel(DMA_MEMCPY)) {
  1228. preempt_enable_no_resched();
  1229. tp->ucopy.pinned_list =
  1230. dma_pin_iovec_pages(msg->msg_iov, len);
  1231. } else {
  1232. preempt_enable_no_resched();
  1233. }
  1234. }
  1235. #endif
  1236. do {
  1237. u32 offset;
  1238. /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
  1239. if (tp->urg_data && tp->urg_seq == *seq) {
  1240. if (copied)
  1241. break;
  1242. if (signal_pending(current)) {
  1243. copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
  1244. break;
  1245. }
  1246. }
  1247. /* Next get a buffer. */
  1248. skb_queue_walk(&sk->sk_receive_queue, skb) {
  1249. /* Now that we have two receive queues this
  1250. * shouldn't happen.
  1251. */
  1252. if (before(*seq, TCP_SKB_CB(skb)->seq)) {
  1253. printk(KERN_INFO "recvmsg bug: copied %X "
  1254. "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
  1255. break;
  1256. }
  1257. offset = *seq - TCP_SKB_CB(skb)->seq;
  1258. if (tcp_hdr(skb)->syn)
  1259. offset--;
  1260. if (offset < skb->len)
  1261. goto found_ok_skb;
  1262. if (tcp_hdr(skb)->fin)
  1263. goto found_fin_ok;
  1264. WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: "
  1265. "copied %X seq %X\n", *seq,
  1266. TCP_SKB_CB(skb)->seq);
  1267. }
  1268. /* Well, if we have backlog, try to process it now yet. */
  1269. if (copied >= target && !sk->sk_backlog.tail)
  1270. break;
  1271. if (copied) {
  1272. if (sk->sk_err ||
  1273. sk->sk_state == TCP_CLOSE ||
  1274. (sk->sk_shutdown & RCV_SHUTDOWN) ||
  1275. !timeo ||
  1276. signal_pending(current))
  1277. break;
  1278. } else {
  1279. if (sock_flag(sk, SOCK_DONE))
  1280. break;
  1281. if (sk->sk_err) {
  1282. copied = sock_error(sk);
  1283. break;
  1284. }
  1285. if (sk->sk_shutdown & RCV_SHUTDOWN)
  1286. break;
  1287. if (sk->sk_state == TCP_CLOSE) {
  1288. if (!sock_flag(sk, SOCK_DONE)) {
  1289. /* This occurs when user tries to read
  1290. * from never connected socket.
  1291. */
  1292. copied = -ENOTCONN;
  1293. break;
  1294. }
  1295. break;
  1296. }
  1297. if (!timeo) {
  1298. copied = -EAGAIN;
  1299. break;
  1300. }
  1301. if (signal_pending(current)) {
  1302. copied = sock_intr_errno(timeo);
  1303. break;
  1304. }
  1305. }
  1306. tcp_cleanup_rbuf(sk, copied);
  1307. if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
  1308. /* Install new reader */
  1309. if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
  1310. user_recv = current;
  1311. tp->ucopy.task = user_recv;
  1312. tp->ucopy.iov = msg->msg_iov;
  1313. }
  1314. tp->ucopy.len = len;
  1315. WARN_ON(tp->copied_seq != tp->rcv_nxt &&
  1316. !(flags & (MSG_PEEK | MSG_TRUNC)));
  1317. /* Ugly... If prequeue is not empty, we have to
  1318. * process it before releasing socket, otherwise
  1319. * order will be broken at second iteration.
  1320. * More elegant solution is required!!!
  1321. *
  1322. * Look: we have the following (pseudo)queues:
  1323. *
  1324. * 1. packets in flight
  1325. * 2. backlog
  1326. * 3. prequeue
  1327. * 4. receive_queue
  1328. *
  1329. * Each queue can be processed only if the next ones
  1330. * are empty. At this point we have empty receive_queue.
  1331. * But prequeue _can_ be not empty after 2nd iteration,
  1332. * when we jumped to start of loop because backlog
  1333. * processing added something to receive_queue.
  1334. * We cannot release_sock(), because backlog contains
  1335. * packets arrived _after_ prequeued ones.
  1336. *
  1337. * Shortly, algorithm is clear --- to process all
  1338. * the queues in order. We could make it more directly,
  1339. * requeueing packets from backlog to prequeue, if
  1340. * is not empty. It is more elegant, but eats cycles,
  1341. * unfortunately.
  1342. */
  1343. if (!skb_queue_empty(&tp->ucopy.prequeue))
  1344. goto do_prequeue;
  1345. /* __ Set realtime policy in scheduler __ */
  1346. }
  1347. if (copied >= target) {
  1348. /* Do not sleep, just process backlog. */
  1349. release_sock(sk);
  1350. lock_sock(sk);
  1351. } else
  1352. sk_wait_data(sk, &timeo);
  1353. #ifdef CONFIG_NET_DMA
  1354. tp->ucopy.wakeup = 0;
  1355. #endif
  1356. if (user_recv) {
  1357. int chunk;
  1358. /* __ Restore normal policy in scheduler __ */
  1359. if ((chunk = len - tp->ucopy.len) != 0) {
  1360. NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
  1361. len -= chunk;
  1362. copied += chunk;
  1363. }
  1364. if (tp->rcv_nxt == tp->copied_seq &&
  1365. !skb_queue_empty(&tp->ucopy.prequeue)) {
  1366. do_prequeue:
  1367. tcp_prequeue_process(sk);
  1368. if ((chunk = len - tp->ucopy.len) != 0) {
  1369. NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
  1370. len -= chunk;
  1371. copied += chunk;
  1372. }
  1373. }
  1374. }
  1375. if ((flags & MSG_PEEK) &&
  1376. (peek_seq - copied - urg_hole != tp->copied_seq)) {
  1377. if (net_ratelimit())
  1378. printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
  1379. current->comm, task_pid_nr(current));
  1380. peek_seq = tp->copied_seq;
  1381. }
  1382. continue;
  1383. found_ok_skb:
  1384. /* Ok so how much can we use? */
  1385. used = skb->len - offset;
  1386. if (len < used)
  1387. used = len;
  1388. /* Do we have urgent data here? */
  1389. if (tp->urg_data) {
  1390. u32 urg_offset = tp->urg_seq - *seq;
  1391. if (urg_offset < used) {
  1392. if (!urg_offset) {
  1393. if (!sock_flag(sk, SOCK_URGINLINE)) {
  1394. ++*seq;
  1395. urg_hole++;
  1396. offset++;
  1397. used--;
  1398. if (!used)
  1399. goto skip_copy;
  1400. }
  1401. } else
  1402. used = urg_offset;
  1403. }
  1404. }
  1405. if (!(flags & MSG_TRUNC)) {
  1406. #ifdef CONFIG_NET_DMA
  1407. if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
  1408. tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
  1409. if (tp->ucopy.dma_chan) {
  1410. tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
  1411. tp->ucopy.dma_chan, skb, offset,
  1412. msg->msg_iov, used,
  1413. tp->ucopy.pinned_list);
  1414. if (tp->ucopy.dma_cookie < 0) {
  1415. printk(KERN_ALERT "dma_cookie < 0\n");
  1416. /* Exception. Bailout! */
  1417. if (!copied)
  1418. copied = -EFAULT;
  1419. break;
  1420. }
  1421. if ((offset + used) == skb->len)
  1422. copied_early = 1;
  1423. } else
  1424. #endif
  1425. {
  1426. err = skb_copy_datagram_iovec(skb, offset,
  1427. msg->msg_iov, used);
  1428. if (err) {
  1429. /* Exception. Bailout! */
  1430. if (!copied)
  1431. copied = -EFAULT;
  1432. break;
  1433. }
  1434. }
  1435. }
  1436. *seq += used;
  1437. copied += used;
  1438. len -= used;
  1439. tcp_rcv_space_adjust(sk);
  1440. skip_copy:
  1441. if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
  1442. tp->urg_data = 0;
  1443. tcp_fast_path_check(sk);
  1444. }
  1445. if (used + offset < skb->len)
  1446. continue;
  1447. if (tcp_hdr(skb)->fin)
  1448. goto found_fin_ok;
  1449. if (!(flags & MSG_PEEK)) {
  1450. sk_eat_skb(sk, skb, copied_early);
  1451. copied_early = 0;
  1452. }
  1453. continue;
  1454. found_fin_ok:
  1455. /* Process the FIN. */
  1456. ++*seq;
  1457. if (!(flags & MSG_PEEK)) {
  1458. sk_eat_skb(sk, skb, copied_early);
  1459. copied_early = 0;
  1460. }
  1461. break;
  1462. } while (len > 0);
  1463. if (user_recv) {
  1464. if (!skb_queue_empty(&tp->ucopy.prequeue)) {
  1465. int chunk;
  1466. tp->ucopy.len = copied > 0 ? len : 0;
  1467. tcp_prequeue_process(sk);
  1468. if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
  1469. NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
  1470. len -= chunk;
  1471. copied += chunk;
  1472. }
  1473. }
  1474. tp->ucopy.task = NULL;
  1475. tp->ucopy.len = 0;
  1476. }
  1477. #ifdef CONFIG_NET_DMA
  1478. if (tp->ucopy.dma_chan) {
  1479. dma_cookie_t done, used;
  1480. dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
  1481. while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
  1482. tp->ucopy.dma_cookie, &done,
  1483. &used) == DMA_IN_PROGRESS) {
  1484. /* do partial cleanup of sk_async_wait_queue */
  1485. while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
  1486. (dma_async_is_complete(skb->dma_cookie, done,
  1487. used) == DMA_SUCCESS)) {
  1488. __skb_dequeue(&sk->sk_async_wait_queue);
  1489. kfree_skb(skb);
  1490. }
  1491. }
  1492. /* Safe to free early-copied skbs now */
  1493. __skb_queue_purge(&sk->sk_async_wait_queue);
  1494. tp->ucopy.dma_chan = NULL;
  1495. }
  1496. if (tp->ucopy.pinned_list) {
  1497. dma_unpin_iovec_pages(tp->ucopy.pinned_list);
  1498. tp->ucopy.pinned_list = NULL;
  1499. }
  1500. #endif
  1501. /* According to UNIX98, msg_name/msg_namelen are ignored
  1502. * on connected socket. I was just happy when found this 8) --ANK
  1503. */
  1504. /* Clean up data we have read: This will do ACK frames. */
  1505. tcp_cleanup_rbuf(sk, copied);
  1506. TCP_CHECK_TIMER(sk);
  1507. release_sock(sk);
  1508. return copied;
  1509. out:
  1510. TCP_CHECK_TIMER(sk);
  1511. release_sock(sk);
  1512. return err;
  1513. recv_urg:
  1514. err = tcp_recv_urg(sk, msg, len, flags);
  1515. goto out;
  1516. }
  1517. void tcp_set_state(struct sock *sk, int state)
  1518. {
  1519. int oldstate = sk->sk_state;
  1520. switch (state) {
  1521. case TCP_ESTABLISHED:
  1522. if (oldstate != TCP_ESTABLISHED)
  1523. TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
  1524. break;
  1525. case TCP_CLOSE:
  1526. if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
  1527. TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
  1528. sk->sk_prot->unhash(sk);
  1529. if (inet_csk(sk)->icsk_bind_hash &&
  1530. !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
  1531. inet_put_port(sk);
  1532. /* fall through */
  1533. default:
  1534. if (oldstate == TCP_ESTABLISHED)
  1535. TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
  1536. }
  1537. /* Change state AFTER socket is unhashed to avoid closed
  1538. * socket sitting in hash tables.
  1539. */
  1540. sk->sk_state = state;
  1541. #ifdef STATE_TRACE
  1542. SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
  1543. #endif
  1544. }
  1545. EXPORT_SYMBOL_GPL(tcp_set_state);
  1546. /*
  1547. * State processing on a close. This implements the state shift for
  1548. * sending our FIN frame. Note that we only send a FIN for some
  1549. * states. A shutdown() may have already sent the FIN, or we may be
  1550. * closed.
  1551. */
  1552. static const unsigned char new_state[16] = {
  1553. /* current state: new state: action: */
  1554. /* (Invalid) */ TCP_CLOSE,
  1555. /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
  1556. /* TCP_SYN_SENT */ TCP_CLOSE,
  1557. /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
  1558. /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
  1559. /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
  1560. /* TCP_TIME_WAIT */ TCP_CLOSE,
  1561. /* TCP_CLOSE */ TCP_CLOSE,
  1562. /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
  1563. /* TCP_LAST_ACK */ TCP_LAST_ACK,
  1564. /* TCP_LISTEN */ TCP_CLOSE,
  1565. /* TCP_CLOSING */ TCP_CLOSING,
  1566. };
  1567. static int tcp_close_state(struct sock *sk)
  1568. {
  1569. int next = (int)new_state[sk->sk_state];
  1570. int ns = next & TCP_STATE_MASK;
  1571. tcp_set_state(sk, ns);
  1572. return next & TCP_ACTION_FIN;
  1573. }
  1574. /*
  1575. * Shutdown the sending side of a connection. Much like close except
  1576. * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
  1577. */
  1578. void tcp_shutdown(struct sock *sk, int how)
  1579. {
  1580. /* We need to grab some memory, and put together a FIN,
  1581. * and then put it into the queue to be sent.
  1582. * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
  1583. */
  1584. if (!(how & SEND_SHUTDOWN))
  1585. return;
  1586. /* If we've already sent a FIN, or it's a closed state, skip this. */
  1587. if ((1 << sk->sk_state) &
  1588. (TCPF_ESTABLISHED | TCPF_SYN_SENT |
  1589. TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
  1590. /* Clear out any half completed packets. FIN if needed. */
  1591. if (tcp_close_state(sk))
  1592. tcp_send_fin(sk);
  1593. }
  1594. }
  1595. void tcp_close(struct sock *sk, long timeout)
  1596. {
  1597. struct sk_buff *skb;
  1598. int data_was_unread = 0;
  1599. int state;
  1600. lock_sock(sk);
  1601. sk->sk_shutdown = SHUTDOWN_MASK;
  1602. if (sk->sk_state == TCP_LISTEN) {
  1603. tcp_set_state(sk, TCP_CLOSE);
  1604. /* Special case. */
  1605. inet_csk_listen_stop(sk);
  1606. goto adjudge_to_death;
  1607. }
  1608. /* We need to flush the recv. buffs. We do this only on the
  1609. * descriptor close, not protocol-sourced closes, because the
  1610. * reader process may not have drained the data yet!
  1611. */
  1612. while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
  1613. u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
  1614. tcp_hdr(skb)->fin;
  1615. data_was_unread += len;
  1616. __kfree_skb(skb);
  1617. }
  1618. sk_mem_reclaim(sk);
  1619. /* As outlined in RFC 2525, section 2.17, we send a RST here because
  1620. * data was lost. To witness the awful effects of the old behavior of
  1621. * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
  1622. * GET in an FTP client, suspend the process, wait for the client to
  1623. * advertise a zero window, then kill -9 the FTP client, wheee...
  1624. * Note: timeout is always zero in such a case.
  1625. */
  1626. if (data_was_unread) {
  1627. /* Unread data was tossed, zap the connection. */
  1628. NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
  1629. tcp_set_state(sk, TCP_CLOSE);
  1630. tcp_send_active_reset(sk, sk->sk_allocation);
  1631. } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
  1632. /* Check zero linger _after_ checking for unread data. */
  1633. sk->sk_prot->disconnect(sk, 0);
  1634. NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
  1635. } else if (tcp_close_state(sk)) {
  1636. /* We FIN if the application ate all the data before
  1637. * zapping the connection.
  1638. */
  1639. /* RED-PEN. Formally speaking, we have broken TCP state
  1640. * machine. State transitions:
  1641. *
  1642. * TCP_ESTABLISHED -> TCP_FIN_WAIT1
  1643. * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
  1644. * TCP_CLOSE_WAIT -> TCP_LAST_ACK
  1645. *
  1646. * are legal only when FIN has been sent (i.e. in window),
  1647. * rather than queued out of window. Purists blame.
  1648. *
  1649. * F.e. "RFC state" is ESTABLISHED,
  1650. * if Linux state is FIN-WAIT-1, but FIN is still not sent.
  1651. *
  1652. * The visible declinations are that sometimes
  1653. * we enter time-wait state, when it is not required really
  1654. * (harmless), do not send active resets, when they are
  1655. * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
  1656. * they look as CLOSING or LAST_ACK for Linux)
  1657. * Probably, I missed some more holelets.
  1658. * --ANK
  1659. */
  1660. tcp_send_fin(sk);
  1661. }
  1662. sk_stream_wait_close(sk, timeout);
  1663. adjudge_to_death:
  1664. state = sk->sk_state;
  1665. sock_hold(sk);
  1666. sock_orphan(sk);
  1667. /* It is the last release_sock in its life. It will remove backlog. */
  1668. release_sock(sk);
  1669. /* Now socket is owned by kernel and we acquire BH lock
  1670. to finish close. No need to check for user refs.
  1671. */
  1672. local_bh_disable();
  1673. bh_lock_sock(sk);
  1674. WARN_ON(sock_owned_by_user(sk));
  1675. percpu_counter_inc(sk->sk_prot->orphan_count);
  1676. /* Have we already been destroyed by a softirq or backlog? */
  1677. if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
  1678. goto out;
  1679. /* This is a (useful) BSD violating of the RFC. There is a
  1680. * problem with TCP as specified in that the other end could
  1681. * keep a socket open forever with no application left this end.
  1682. * We use a 3 minute timeout (about the same as BSD) then kill
  1683. * our end. If they send after that then tough - BUT: long enough
  1684. * that we won't make the old 4*rto = almost no time - whoops
  1685. * reset mistake.
  1686. *
  1687. * Nope, it was not mistake. It is really desired behaviour
  1688. * f.e. on http servers, when such sockets are useless, but
  1689. * consume significant resources. Let's do it with special
  1690. * linger2 option. --ANK
  1691. */
  1692. if (sk->sk_state == TCP_FIN_WAIT2) {
  1693. struct tcp_sock *tp = tcp_sk(sk);
  1694. if (tp->linger2 < 0) {
  1695. tcp_set_state(sk, TCP_CLOSE);
  1696. tcp_send_active_reset(sk, GFP_ATOMIC);
  1697. NET_INC_STATS_BH(sock_net(sk),
  1698. LINUX_MIB_TCPABORTONLINGER);
  1699. } else {
  1700. const int tmo = tcp_fin_time(sk);
  1701. if (tmo > TCP_TIMEWAIT_LEN) {
  1702. inet_csk_reset_keepalive_timer(sk,
  1703. tmo - TCP_TIMEWAIT_LEN);
  1704. } else {
  1705. tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
  1706. goto out;
  1707. }
  1708. }
  1709. }
  1710. if (sk->sk_state != TCP_CLOSE) {
  1711. int orphan_count = percpu_counter_read_positive(
  1712. sk->sk_prot->orphan_count);
  1713. sk_mem_reclaim(sk);
  1714. if (tcp_too_many_orphans(sk, orphan_count)) {
  1715. if (net_ratelimit())
  1716. printk(KERN_INFO "TCP: too many of orphaned "
  1717. "sockets\n");
  1718. tcp_set_state(sk, TCP_CLOSE);
  1719. tcp_send_active_reset(sk, GFP_ATOMIC);
  1720. NET_INC_STATS_BH(sock_net(sk),
  1721. LINUX_MIB_TCPABORTONMEMORY);
  1722. }
  1723. }
  1724. if (sk->sk_state == TCP_CLOSE)
  1725. inet_csk_destroy_sock(sk);
  1726. /* Otherwise, socket is reprieved until protocol close. */
  1727. out:
  1728. bh_unlock_sock(sk);
  1729. local_bh_enable();
  1730. sock_put(sk);
  1731. }
  1732. /* These states need RST on ABORT according to RFC793 */
  1733. static inline int tcp_need_reset(int state)
  1734. {
  1735. return (1 << state) &
  1736. (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
  1737. TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
  1738. }
  1739. int tcp_disconnect(struct sock *sk, int flags)
  1740. {
  1741. struct inet_sock *inet = inet_sk(sk);
  1742. struct inet_connection_sock *icsk = inet_csk(sk);
  1743. struct tcp_sock *tp = tcp_sk(sk);
  1744. int err = 0;
  1745. int old_state = sk->sk_state;
  1746. if (old_state != TCP_CLOSE)
  1747. tcp_set_state(sk, TCP_CLOSE);
  1748. /* ABORT function of RFC793 */
  1749. if (old_state == TCP_LISTEN) {
  1750. inet_csk_listen_stop(sk);
  1751. } else if (tcp_need_reset(old_state) ||
  1752. (tp->snd_nxt != tp->write_seq &&
  1753. (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
  1754. /* The last check adjusts for discrepancy of Linux wrt. RFC
  1755. * states
  1756. */
  1757. tcp_send_active_reset(sk, gfp_any());
  1758. sk->sk_err = ECONNRESET;
  1759. } else if (old_state == TCP_SYN_SENT)
  1760. sk->sk_err = ECONNRESET;
  1761. tcp_clear_xmit_timers(sk);
  1762. __skb_queue_purge(&sk->sk_receive_queue);
  1763. tcp_write_queue_purge(sk);
  1764. __skb_queue_purge(&tp->out_of_order_queue);
  1765. #ifdef CONFIG_NET_DMA
  1766. __skb_queue_purge(&sk->sk_async_wait_queue);
  1767. #endif
  1768. inet->dport = 0;
  1769. if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
  1770. inet_reset_saddr(sk);
  1771. sk->sk_shutdown = 0;
  1772. sock_reset_flag(sk, SOCK_DONE);
  1773. tp->srtt = 0;
  1774. if ((tp->write_seq += tp->max_window + 2) == 0)
  1775. tp->write_seq = 1;
  1776. icsk->icsk_backoff = 0;
  1777. tp->snd_cwnd = 2;
  1778. icsk->icsk_probes_out = 0;
  1779. tp->packets_out = 0;
  1780. tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
  1781. tp->snd_cwnd_cnt = 0;
  1782. tp->bytes_acked = 0;
  1783. tcp_set_ca_state(sk, TCP_CA_Open);
  1784. tcp_clear_retrans(tp);
  1785. inet_csk_delack_init(sk);
  1786. tcp_init_send_head(sk);
  1787. memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
  1788. __sk_dst_reset(sk);
  1789. WARN_ON(inet->num && !icsk->icsk_bind_hash);
  1790. sk->sk_error_report(sk);
  1791. return err;
  1792. }
  1793. /*
  1794. * Socket option code for TCP.
  1795. */
  1796. static int do_tcp_setsockopt(struct sock *sk, int level,
  1797. int optname, char __user *optval, unsigned int optlen)
  1798. {
  1799. struct tcp_sock *tp = tcp_sk(sk);
  1800. struct inet_connection_sock *icsk = inet_csk(sk);
  1801. int val;
  1802. int err = 0;
  1803. /* This is a string value all the others are int's */
  1804. if (optname == TCP_CONGESTION) {
  1805. char name[TCP_CA_NAME_MAX];
  1806. if (optlen < 1)
  1807. return -EINVAL;
  1808. val = strncpy_from_user(name, optval,
  1809. min_t(long, TCP_CA_NAME_MAX-1, optlen));
  1810. if (val < 0)
  1811. return -EFAULT;
  1812. name[val] = 0;
  1813. lock_sock(sk);
  1814. err = tcp_set_congestion_control(sk, name);
  1815. release_sock(sk);
  1816. return err;
  1817. }
  1818. if (optlen < sizeof(int))
  1819. return -EINVAL;
  1820. if (get_user(val, (int __user *)optval))
  1821. return -EFAULT;
  1822. lock_sock(sk);
  1823. switch (optname) {
  1824. case TCP_MAXSEG:
  1825. /* Values greater than interface MTU won't take effect. However
  1826. * at the point when this call is done we typically don't yet
  1827. * know which interface is going to be used */
  1828. if (val < 8 || val > MAX_TCP_WINDOW) {
  1829. err = -EINVAL;
  1830. break;
  1831. }
  1832. tp->rx_opt.user_mss = val;
  1833. break;
  1834. case TCP_NODELAY:
  1835. if (val) {
  1836. /* TCP_NODELAY is weaker than TCP_CORK, so that
  1837. * this option on corked socket is remembered, but
  1838. * it is not activated until cork is cleared.
  1839. *
  1840. * However, when TCP_NODELAY is set we make
  1841. * an explicit push, which overrides even TCP_CORK
  1842. * for currently queued segments.
  1843. */
  1844. tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
  1845. tcp_push_pending_frames(sk);
  1846. } else {
  1847. tp->nonagle &= ~TCP_NAGLE_OFF;
  1848. }
  1849. break;
  1850. case TCP_CORK:
  1851. /* When set indicates to always queue non-full frames.
  1852. * Later the user clears this option and we transmit
  1853. * any pending partial frames in the queue. This is
  1854. * meant to be used alongside sendfile() to get properly
  1855. * filled frames when the user (for example) must write
  1856. * out headers with a write() call first and then use
  1857. * sendfile to send out the data parts.
  1858. *
  1859. * TCP_CORK can be set together with TCP_NODELAY and it is
  1860. * stronger than TCP_NODELAY.
  1861. */
  1862. if (val) {
  1863. tp->nonagle |= TCP_NAGLE_CORK;
  1864. } else {
  1865. tp->nonagle &= ~TCP_NAGLE_CORK;
  1866. if (tp->nonagle&TCP_NAGLE_OFF)
  1867. tp->nonagle |= TCP_NAGLE_PUSH;
  1868. tcp_push_pending_frames(sk);
  1869. }
  1870. break;
  1871. case TCP_KEEPIDLE:
  1872. if (val < 1 || val > MAX_TCP_KEEPIDLE)
  1873. err = -EINVAL;
  1874. else {
  1875. tp->keepalive_time = val * HZ;
  1876. if (sock_flag(sk, SOCK_KEEPOPEN) &&
  1877. !((1 << sk->sk_state) &
  1878. (TCPF_CLOSE | TCPF_LISTEN))) {
  1879. __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
  1880. if (tp->keepalive_time > elapsed)
  1881. elapsed = tp->keepalive_time - elapsed;
  1882. else
  1883. elapsed = 0;
  1884. inet_csk_reset_keepalive_timer(sk, elapsed);
  1885. }
  1886. }
  1887. break;
  1888. case TCP_KEEPINTVL:
  1889. if (val < 1 || val > MAX_TCP_KEEPINTVL)
  1890. err = -EINVAL;
  1891. else
  1892. tp->keepalive_intvl = val * HZ;
  1893. break;
  1894. case TCP_KEEPCNT:
  1895. if (val < 1 || val > MAX_TCP_KEEPCNT)
  1896. err = -EINVAL;
  1897. else
  1898. tp->keepalive_probes = val;
  1899. break;
  1900. case TCP_SYNCNT:
  1901. if (val < 1 || val > MAX_TCP_SYNCNT)
  1902. err = -EINVAL;
  1903. else
  1904. icsk->icsk_syn_retries = val;
  1905. break;
  1906. case TCP_LINGER2:
  1907. if (val < 0)
  1908. tp->linger2 = -1;
  1909. else if (val > sysctl_tcp_fin_timeout / HZ)
  1910. tp->linger2 = 0;
  1911. else
  1912. tp->linger2 = val * HZ;
  1913. break;
  1914. case TCP_DEFER_ACCEPT:
  1915. /* Translate value in seconds to number of retransmits */
  1916. icsk->icsk_accept_queue.rskq_defer_accept =
  1917. secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
  1918. TCP_RTO_MAX / HZ);
  1919. break;
  1920. case TCP_WINDOW_CLAMP:
  1921. if (!val) {
  1922. if (sk->sk_state != TCP_CLOSE) {
  1923. err = -EINVAL;
  1924. break;
  1925. }
  1926. tp->window_clamp = 0;
  1927. } else
  1928. tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
  1929. SOCK_MIN_RCVBUF / 2 : val;
  1930. break;
  1931. case TCP_QUICKACK:
  1932. if (!val) {
  1933. icsk->icsk_ack.pingpong = 1;
  1934. } else {
  1935. icsk->icsk_ack.pingpong = 0;
  1936. if ((1 << sk->sk_state) &
  1937. (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
  1938. inet_csk_ack_scheduled(sk)) {
  1939. icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
  1940. tcp_cleanup_rbuf(sk, 1);
  1941. if (!(val & 1))
  1942. icsk->icsk_ack.pingpong = 1;
  1943. }
  1944. }
  1945. break;
  1946. #ifdef CONFIG_TCP_MD5SIG
  1947. case TCP_MD5SIG:
  1948. /* Read the IP->Key mappings from userspace */
  1949. err = tp->af_specific->md5_parse(sk, optval, optlen);
  1950. break;
  1951. #endif
  1952. default:
  1953. err = -ENOPROTOOPT;
  1954. break;
  1955. }
  1956. release_sock(sk);
  1957. return err;
  1958. }
  1959. int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
  1960. unsigned int optlen)
  1961. {
  1962. struct inet_connection_sock *icsk = inet_csk(sk);
  1963. if (level != SOL_TCP)
  1964. return icsk->icsk_af_ops->setsockopt(sk, level, optname,
  1965. optval, optlen);
  1966. return do_tcp_setsockopt(sk, level, optname, optval, optlen);
  1967. }
  1968. #ifdef CONFIG_COMPAT
  1969. int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
  1970. char __user *optval, unsigned int optlen)
  1971. {
  1972. if (level != SOL_TCP)
  1973. return inet_csk_compat_setsockopt(sk, level, optname,
  1974. optval, optlen);
  1975. return do_tcp_setsockopt(sk, level, optname, optval, optlen);
  1976. }
  1977. EXPORT_SYMBOL(compat_tcp_setsockopt);
  1978. #endif
  1979. /* Return information about state of tcp endpoint in API format. */
  1980. void tcp_get_info(struct sock *sk, struct tcp_info *info)
  1981. {
  1982. struct tcp_sock *tp = tcp_sk(sk);
  1983. const struct inet_connection_sock *icsk = inet_csk(sk);
  1984. u32 now = tcp_time_stamp;
  1985. memset(info, 0, sizeof(*info));
  1986. info->tcpi_state = sk->sk_state;
  1987. info->tcpi_ca_state = icsk->icsk_ca_state;
  1988. info->tcpi_retransmits = icsk->icsk_retransmits;
  1989. info->tcpi_probes = icsk->icsk_probes_out;
  1990. info->tcpi_backoff = icsk->icsk_backoff;
  1991. if (tp->rx_opt.tstamp_ok)
  1992. info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
  1993. if (tcp_is_sack(tp))
  1994. info->tcpi_options |= TCPI_OPT_SACK;
  1995. if (tp->rx_opt.wscale_ok) {
  1996. info->tcpi_options |= TCPI_OPT_WSCALE;
  1997. info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
  1998. info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
  1999. }
  2000. if (tp->ecn_flags&TCP_ECN_OK)
  2001. info->tcpi_options |= TCPI_OPT_ECN;
  2002. info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
  2003. info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
  2004. info->tcpi_snd_mss = tp->mss_cache;
  2005. info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
  2006. if (sk->sk_state == TCP_LISTEN) {
  2007. info->tcpi_unacked = sk->sk_ack_backlog;
  2008. info->tcpi_sacked = sk->sk_max_ack_backlog;
  2009. } else {
  2010. info->tcpi_unacked = tp->packets_out;
  2011. info->tcpi_sacked = tp->sacked_out;
  2012. }
  2013. info->tcpi_lost = tp->lost_out;
  2014. info->tcpi_retrans = tp->retrans_out;
  2015. info->tcpi_fackets = tp->fackets_out;
  2016. info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
  2017. info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
  2018. info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
  2019. info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
  2020. info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
  2021. info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
  2022. info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
  2023. info->tcpi_snd_ssthresh = tp->snd_ssthresh;
  2024. info->tcpi_snd_cwnd = tp->snd_cwnd;
  2025. info->tcpi_advmss = tp->advmss;
  2026. info->tcpi_reordering = tp->reordering;
  2027. info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
  2028. info->tcpi_rcv_space = tp->rcvq_space.space;
  2029. info->tcpi_total_retrans = tp->total_retrans;
  2030. }
  2031. EXPORT_SYMBOL_GPL(tcp_get_info);
  2032. static int do_tcp_getsockopt(struct sock *sk, int level,
  2033. int optname, char __user *optval, int __user *optlen)
  2034. {
  2035. struct inet_connection_sock *icsk = inet_csk(sk);
  2036. struct tcp_sock *tp = tcp_sk(sk);
  2037. int val, len;
  2038. if (get_user(len, optlen))
  2039. return -EFAULT;
  2040. len = min_t(unsigned int, len, sizeof(int));
  2041. if (len < 0)
  2042. return -EINVAL;
  2043. switch (optname) {
  2044. case TCP_MAXSEG:
  2045. val = tp->mss_cache;
  2046. if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
  2047. val = tp->rx_opt.user_mss;
  2048. break;
  2049. case TCP_NODELAY:
  2050. val = !!(tp->nonagle&TCP_NAGLE_OFF);
  2051. break;
  2052. case TCP_CORK:
  2053. val = !!(tp->nonagle&TCP_NAGLE_CORK);
  2054. break;
  2055. case TCP_KEEPIDLE:
  2056. val = keepalive_time_when(tp) / HZ;
  2057. break;
  2058. case TCP_KEEPINTVL:
  2059. val = keepalive_intvl_when(tp) / HZ;
  2060. break;
  2061. case TCP_KEEPCNT:
  2062. val = keepalive_probes(tp);
  2063. break;
  2064. case TCP_SYNCNT:
  2065. val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
  2066. break;
  2067. case TCP_LINGER2:
  2068. val = tp->linger2;
  2069. if (val >= 0)
  2070. val = (val ? : sysctl_tcp_fin_timeout) / HZ;
  2071. break;
  2072. case TCP_DEFER_ACCEPT:
  2073. val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
  2074. TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
  2075. break;
  2076. case TCP_WINDOW_CLAMP:
  2077. val = tp->window_clamp;
  2078. break;
  2079. case TCP_INFO: {
  2080. struct tcp_info info;
  2081. if (get_user(len, optlen))
  2082. return -EFAULT;
  2083. tcp_get_info(sk, &info);
  2084. len = min_t(unsigned int, len, sizeof(info));
  2085. if (put_user(len, optlen))
  2086. return -EFAULT;
  2087. if (copy_to_user(optval, &info, len))
  2088. return -EFAULT;
  2089. return 0;
  2090. }
  2091. case TCP_QUICKACK:
  2092. val = !icsk->icsk_ack.pingpong;
  2093. break;
  2094. case TCP_CONGESTION:
  2095. if (get_user(len, optlen))
  2096. return -EFAULT;
  2097. len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
  2098. if (put_user(len, optlen))
  2099. return -EFAULT;
  2100. if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
  2101. return -EFAULT;
  2102. return 0;
  2103. default:
  2104. return -ENOPROTOOPT;
  2105. }
  2106. if (put_user(len, optlen))
  2107. return -EFAULT;
  2108. if (copy_to_user(optval, &val, len))
  2109. return -EFAULT;
  2110. return 0;
  2111. }
  2112. int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
  2113. int __user *optlen)
  2114. {
  2115. struct inet_connection_sock *icsk = inet_csk(sk);
  2116. if (level != SOL_TCP)
  2117. return icsk->icsk_af_ops->getsockopt(sk, level, optname,
  2118. optval, optlen);
  2119. return do_tcp_getsockopt(sk, level, optname, optval, optlen);
  2120. }
  2121. #ifdef CONFIG_COMPAT
  2122. int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
  2123. char __user *optval, int __user *optlen)
  2124. {
  2125. if (level != SOL_TCP)
  2126. return inet_csk_compat_getsockopt(sk, level, optname,
  2127. optval, optlen);
  2128. return do_tcp_getsockopt(sk, level, optname, optval, optlen);
  2129. }
  2130. EXPORT_SYMBOL(compat_tcp_getsockopt);
  2131. #endif
  2132. struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
  2133. {
  2134. struct sk_buff *segs = ERR_PTR(-EINVAL);
  2135. struct tcphdr *th;
  2136. unsigned thlen;
  2137. unsigned int seq;
  2138. __be32 delta;
  2139. unsigned int oldlen;
  2140. unsigned int mss;
  2141. if (!pskb_may_pull(skb, sizeof(*th)))
  2142. goto out;
  2143. th = tcp_hdr(skb);
  2144. thlen = th->doff * 4;
  2145. if (thlen < sizeof(*th))
  2146. goto out;
  2147. if (!pskb_may_pull(skb, thlen))
  2148. goto out;
  2149. oldlen = (u16)~skb->len;
  2150. __skb_pull(skb, thlen);
  2151. mss = skb_shinfo(skb)->gso_size;
  2152. if (unlikely(skb->len <= mss))
  2153. goto out;
  2154. if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
  2155. /* Packet is from an untrusted source, reset gso_segs. */
  2156. int type = skb_shinfo(skb)->gso_type;
  2157. if (unlikely(type &
  2158. ~(SKB_GSO_TCPV4 |
  2159. SKB_GSO_DODGY |
  2160. SKB_GSO_TCP_ECN |
  2161. SKB_GSO_TCPV6 |
  2162. 0) ||
  2163. !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
  2164. goto out;
  2165. skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
  2166. segs = NULL;
  2167. goto out;
  2168. }
  2169. segs = skb_segment(skb, features);
  2170. if (IS_ERR(segs))
  2171. goto out;
  2172. delta = htonl(oldlen + (thlen + mss));
  2173. skb = segs;
  2174. th = tcp_hdr(skb);
  2175. seq = ntohl(th->seq);
  2176. do {
  2177. th->fin = th->psh = 0;
  2178. th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
  2179. (__force u32)delta));
  2180. if (skb->ip_summed != CHECKSUM_PARTIAL)
  2181. th->check =
  2182. csum_fold(csum_partial(skb_transport_header(skb),
  2183. thlen, skb->csum));
  2184. seq += mss;
  2185. skb = skb->next;
  2186. th = tcp_hdr(skb);
  2187. th->seq = htonl(seq);
  2188. th->cwr = 0;
  2189. } while (skb->next);
  2190. delta = htonl(oldlen + (skb->tail - skb->transport_header) +
  2191. skb->data_len);
  2192. th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
  2193. (__force u32)delta));
  2194. if (skb->ip_summed != CHECKSUM_PARTIAL)
  2195. th->check = csum_fold(csum_partial(skb_transport_header(skb),
  2196. thlen, skb->csum));
  2197. out:
  2198. return segs;
  2199. }
  2200. EXPORT_SYMBOL(tcp_tso_segment);
  2201. struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
  2202. {
  2203. struct sk_buff **pp = NULL;
  2204. struct sk_buff *p;
  2205. struct tcphdr *th;
  2206. struct tcphdr *th2;
  2207. unsigned int len;
  2208. unsigned int thlen;
  2209. unsigned int flags;
  2210. unsigned int mss = 1;
  2211. unsigned int hlen;
  2212. unsigned int off;
  2213. int flush = 1;
  2214. int i;
  2215. off = skb_gro_offset(skb);
  2216. hlen = off + sizeof(*th);
  2217. th = skb_gro_header_fast(skb, off);
  2218. if (skb_gro_header_hard(skb, hlen)) {
  2219. th = skb_gro_header_slow(skb, hlen, off);
  2220. if (unlikely(!th))
  2221. goto out;
  2222. }
  2223. thlen = th->doff * 4;
  2224. if (thlen < sizeof(*th))
  2225. goto out;
  2226. hlen = off + thlen;
  2227. if (skb_gro_header_hard(skb, hlen)) {
  2228. th = skb_gro_header_slow(skb, hlen, off);
  2229. if (unlikely(!th))
  2230. goto out;
  2231. }
  2232. skb_gro_pull(skb, thlen);
  2233. len = skb_gro_len(skb);
  2234. flags = tcp_flag_word(th);
  2235. for (; (p = *head); head = &p->next) {
  2236. if (!NAPI_GRO_CB(p)->same_flow)
  2237. continue;
  2238. th2 = tcp_hdr(p);
  2239. if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
  2240. NAPI_GRO_CB(p)->same_flow = 0;
  2241. continue;
  2242. }
  2243. goto found;
  2244. }
  2245. goto out_check_final;
  2246. found:
  2247. flush = NAPI_GRO_CB(p)->flush;
  2248. flush |= flags & TCP_FLAG_CWR;
  2249. flush |= (flags ^ tcp_flag_word(th2)) &
  2250. ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH);
  2251. flush |= th->ack_seq ^ th2->ack_seq;
  2252. for (i = sizeof(*th); i < thlen; i += 4)
  2253. flush |= *(u32 *)((u8 *)th + i) ^
  2254. *(u32 *)((u8 *)th2 + i);
  2255. mss = skb_shinfo(p)->gso_size;
  2256. flush |= (len - 1) >= mss;
  2257. flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
  2258. if (flush || skb_gro_receive(head, skb)) {
  2259. mss = 1;
  2260. goto out_check_final;
  2261. }
  2262. p = *head;
  2263. th2 = tcp_hdr(p);
  2264. tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
  2265. out_check_final:
  2266. flush = len < mss;
  2267. flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST |
  2268. TCP_FLAG_SYN | TCP_FLAG_FIN);
  2269. if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
  2270. pp = head;
  2271. out:
  2272. NAPI_GRO_CB(skb)->flush |= flush;
  2273. return pp;
  2274. }
  2275. EXPORT_SYMBOL(tcp_gro_receive);
  2276. int tcp_gro_complete(struct sk_buff *skb)
  2277. {
  2278. struct tcphdr *th = tcp_hdr(skb);
  2279. skb->csum_start = skb_transport_header(skb) - skb->head;
  2280. skb->csum_offset = offsetof(struct tcphdr, check);
  2281. skb->ip_summed = CHECKSUM_PARTIAL;
  2282. skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
  2283. if (th->cwr)
  2284. skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
  2285. return 0;
  2286. }
  2287. EXPORT_SYMBOL(tcp_gro_complete);
  2288. #ifdef CONFIG_TCP_MD5SIG
  2289. static unsigned long tcp_md5sig_users;
  2290. static struct tcp_md5sig_pool **tcp_md5sig_pool;
  2291. static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
  2292. static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
  2293. {
  2294. int cpu;
  2295. for_each_possible_cpu(cpu) {
  2296. struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
  2297. if (p) {
  2298. if (p->md5_desc.tfm)
  2299. crypto_free_hash(p->md5_desc.tfm);
  2300. kfree(p);
  2301. p = NULL;
  2302. }
  2303. }
  2304. free_percpu(pool);
  2305. }
  2306. void tcp_free_md5sig_pool(void)
  2307. {
  2308. struct tcp_md5sig_pool **pool = NULL;
  2309. spin_lock_bh(&tcp_md5sig_pool_lock);
  2310. if (--tcp_md5sig_users == 0) {
  2311. pool = tcp_md5sig_pool;
  2312. tcp_md5sig_pool = NULL;
  2313. }
  2314. spin_unlock_bh(&tcp_md5sig_pool_lock);
  2315. if (pool)
  2316. __tcp_free_md5sig_pool(pool);
  2317. }
  2318. EXPORT_SYMBOL(tcp_free_md5sig_pool);
  2319. static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(struct sock *sk)
  2320. {
  2321. int cpu;
  2322. struct tcp_md5sig_pool **pool;
  2323. pool = alloc_percpu(struct tcp_md5sig_pool *);
  2324. if (!pool)
  2325. return NULL;
  2326. for_each_possible_cpu(cpu) {
  2327. struct tcp_md5sig_pool *p;
  2328. struct crypto_hash *hash;
  2329. p = kzalloc(sizeof(*p), sk->sk_allocation);
  2330. if (!p)
  2331. goto out_free;
  2332. *per_cpu_ptr(pool, cpu) = p;
  2333. hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
  2334. if (!hash || IS_ERR(hash))
  2335. goto out_free;
  2336. p->md5_desc.tfm = hash;
  2337. }
  2338. return pool;
  2339. out_free:
  2340. __tcp_free_md5sig_pool(pool);
  2341. return NULL;
  2342. }
  2343. struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *sk)
  2344. {
  2345. struct tcp_md5sig_pool **pool;
  2346. int alloc = 0;
  2347. retry:
  2348. spin_lock_bh(&tcp_md5sig_pool_lock);
  2349. pool = tcp_md5sig_pool;
  2350. if (tcp_md5sig_users++ == 0) {
  2351. alloc = 1;
  2352. spin_unlock_bh(&tcp_md5sig_pool_lock);
  2353. } else if (!pool) {
  2354. tcp_md5sig_users--;
  2355. spin_unlock_bh(&tcp_md5sig_pool_lock);
  2356. cpu_relax();
  2357. goto retry;
  2358. } else
  2359. spin_unlock_bh(&tcp_md5sig_pool_lock);
  2360. if (alloc) {
  2361. /* we cannot hold spinlock here because this may sleep. */
  2362. struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(sk);
  2363. spin_lock_bh(&tcp_md5sig_pool_lock);
  2364. if (!p) {
  2365. tcp_md5sig_users--;
  2366. spin_unlock_bh(&tcp_md5sig_pool_lock);
  2367. return NULL;
  2368. }
  2369. pool = tcp_md5sig_pool;
  2370. if (pool) {
  2371. /* oops, it has already been assigned. */
  2372. spin_unlock_bh(&tcp_md5sig_pool_lock);
  2373. __tcp_free_md5sig_pool(p);
  2374. } else {
  2375. tcp_md5sig_pool = pool = p;
  2376. spin_unlock_bh(&tcp_md5sig_pool_lock);
  2377. }
  2378. }
  2379. return pool;
  2380. }
  2381. EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
  2382. struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
  2383. {
  2384. struct tcp_md5sig_pool **p;
  2385. spin_lock_bh(&tcp_md5sig_pool_lock);
  2386. p = tcp_md5sig_pool;
  2387. if (p)
  2388. tcp_md5sig_users++;
  2389. spin_unlock_bh(&tcp_md5sig_pool_lock);
  2390. return (p ? *per_cpu_ptr(p, cpu) : NULL);
  2391. }
  2392. EXPORT_SYMBOL(__tcp_get_md5sig_pool);
  2393. void __tcp_put_md5sig_pool(void)
  2394. {
  2395. tcp_free_md5sig_pool();
  2396. }
  2397. EXPORT_SYMBOL(__tcp_put_md5sig_pool);
  2398. int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
  2399. struct tcphdr *th)
  2400. {
  2401. struct scatterlist sg;
  2402. int err;
  2403. __sum16 old_checksum = th->check;
  2404. th->check = 0;
  2405. /* options aren't included in the hash */
  2406. sg_init_one(&sg, th, sizeof(struct tcphdr));
  2407. err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(struct tcphdr));
  2408. th->check = old_checksum;
  2409. return err;
  2410. }
  2411. EXPORT_SYMBOL(tcp_md5_hash_header);
  2412. int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
  2413. struct sk_buff *skb, unsigned header_len)
  2414. {
  2415. struct scatterlist sg;
  2416. const struct tcphdr *tp = tcp_hdr(skb);
  2417. struct hash_desc *desc = &hp->md5_desc;
  2418. unsigned i;
  2419. const unsigned head_data_len = skb_headlen(skb) > header_len ?
  2420. skb_headlen(skb) - header_len : 0;
  2421. const struct skb_shared_info *shi = skb_shinfo(skb);
  2422. sg_init_table(&sg, 1);
  2423. sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
  2424. if (crypto_hash_update(desc, &sg, head_data_len))
  2425. return 1;
  2426. for (i = 0; i < shi->nr_frags; ++i) {
  2427. const struct skb_frag_struct *f = &shi->frags[i];
  2428. sg_set_page(&sg, f->page, f->size, f->page_offset);
  2429. if (crypto_hash_update(desc, &sg, f->size))
  2430. return 1;
  2431. }
  2432. return 0;
  2433. }
  2434. EXPORT_SYMBOL(tcp_md5_hash_skb_data);
  2435. int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key)
  2436. {
  2437. struct scatterlist sg;
  2438. sg_init_one(&sg, key->key, key->keylen);
  2439. return crypto_hash_update(&hp->md5_desc, &sg, key->keylen);
  2440. }
  2441. EXPORT_SYMBOL(tcp_md5_hash_key);
  2442. #endif
  2443. void tcp_done(struct sock *sk)
  2444. {
  2445. if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
  2446. TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
  2447. tcp_set_state(sk, TCP_CLOSE);
  2448. tcp_clear_xmit_timers(sk);
  2449. sk->sk_shutdown = SHUTDOWN_MASK;
  2450. if (!sock_flag(sk, SOCK_DEAD))
  2451. sk->sk_state_change(sk);
  2452. else
  2453. inet_csk_destroy_sock(sk);
  2454. }
  2455. EXPORT_SYMBOL_GPL(tcp_done);
  2456. extern struct tcp_congestion_ops tcp_reno;
  2457. static __initdata unsigned long thash_entries;
  2458. static int __init set_thash_entries(char *str)
  2459. {
  2460. if (!str)
  2461. return 0;
  2462. thash_entries = simple_strtoul(str, &str, 0);
  2463. return 1;
  2464. }
  2465. __setup("thash_entries=", set_thash_entries);
  2466. void __init tcp_init(void)
  2467. {
  2468. struct sk_buff *skb = NULL;
  2469. unsigned long nr_pages, limit;
  2470. int order, i, max_share;
  2471. BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
  2472. percpu_counter_init(&tcp_sockets_allocated, 0);
  2473. percpu_counter_init(&tcp_orphan_count, 0);
  2474. tcp_hashinfo.bind_bucket_cachep =
  2475. kmem_cache_create("tcp_bind_bucket",
  2476. sizeof(struct inet_bind_bucket), 0,
  2477. SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
  2478. /* Size and allocate the main established and bind bucket
  2479. * hash tables.
  2480. *
  2481. * The methodology is similar to that of the buffer cache.
  2482. */
  2483. tcp_hashinfo.ehash =
  2484. alloc_large_system_hash("TCP established",
  2485. sizeof(struct inet_ehash_bucket),
  2486. thash_entries,
  2487. (totalram_pages >= 128 * 1024) ?
  2488. 13 : 15,
  2489. 0,
  2490. &tcp_hashinfo.ehash_size,
  2491. NULL,
  2492. thash_entries ? 0 : 512 * 1024);
  2493. tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
  2494. for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
  2495. INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
  2496. INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i);
  2497. }
  2498. if (inet_ehash_locks_alloc(&tcp_hashinfo))
  2499. panic("TCP: failed to alloc ehash_locks");
  2500. tcp_hashinfo.bhash =
  2501. alloc_large_system_hash("TCP bind",
  2502. sizeof(struct inet_bind_hashbucket),
  2503. tcp_hashinfo.ehash_size,
  2504. (totalram_pages >= 128 * 1024) ?
  2505. 13 : 15,
  2506. 0,
  2507. &tcp_hashinfo.bhash_size,
  2508. NULL,
  2509. 64 * 1024);
  2510. tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
  2511. for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
  2512. spin_lock_init(&tcp_hashinfo.bhash[i].lock);
  2513. INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
  2514. }
  2515. /* Try to be a bit smarter and adjust defaults depending
  2516. * on available memory.
  2517. */
  2518. for (order = 0; ((1 << order) << PAGE_SHIFT) <
  2519. (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
  2520. order++)
  2521. ;
  2522. if (order >= 4) {
  2523. tcp_death_row.sysctl_max_tw_buckets = 180000;
  2524. sysctl_tcp_max_orphans = 4096 << (order - 4);
  2525. sysctl_max_syn_backlog = 1024;
  2526. } else if (order < 3) {
  2527. tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
  2528. sysctl_tcp_max_orphans >>= (3 - order);
  2529. sysctl_max_syn_backlog = 128;
  2530. }
  2531. /* Set the pressure threshold to be a fraction of global memory that
  2532. * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
  2533. * memory, with a floor of 128 pages.
  2534. */
  2535. nr_pages = totalram_pages - totalhigh_pages;
  2536. limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
  2537. limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
  2538. limit = max(limit, 128UL);
  2539. sysctl_tcp_mem[0] = limit / 4 * 3;
  2540. sysctl_tcp_mem[1] = limit;
  2541. sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
  2542. /* Set per-socket limits to no more than 1/128 the pressure threshold */
  2543. limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
  2544. max_share = min(4UL*1024*1024, limit);
  2545. sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
  2546. sysctl_tcp_wmem[1] = 16*1024;
  2547. sysctl_tcp_wmem[2] = max(64*1024, max_share);
  2548. sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
  2549. sysctl_tcp_rmem[1] = 87380;
  2550. sysctl_tcp_rmem[2] = max(87380, max_share);
  2551. printk(KERN_INFO "TCP: Hash tables configured "
  2552. "(established %d bind %d)\n",
  2553. tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size);
  2554. tcp_register_congestion_control(&tcp_reno);
  2555. }
  2556. EXPORT_SYMBOL(tcp_close);
  2557. EXPORT_SYMBOL(tcp_disconnect);
  2558. EXPORT_SYMBOL(tcp_getsockopt);
  2559. EXPORT_SYMBOL(tcp_ioctl);
  2560. EXPORT_SYMBOL(tcp_poll);
  2561. EXPORT_SYMBOL(tcp_read_sock);
  2562. EXPORT_SYMBOL(tcp_recvmsg);
  2563. EXPORT_SYMBOL(tcp_sendmsg);
  2564. EXPORT_SYMBOL(tcp_splice_read);
  2565. EXPORT_SYMBOL(tcp_sendpage);
  2566. EXPORT_SYMBOL(tcp_setsockopt);
  2567. EXPORT_SYMBOL(tcp_shutdown);