tcp_output.c 73 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Implementation of the Transmission Control Protocol(TCP).
  7. *
  8. * Version: $Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $
  9. *
  10. * Authors: Ross Biro
  11. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12. * Mark Evans, <evansmp@uhura.aston.ac.uk>
  13. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  14. * Florian La Roche, <flla@stud.uni-sb.de>
  15. * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  16. * Linus Torvalds, <torvalds@cs.helsinki.fi>
  17. * Alan Cox, <gw4pts@gw4pts.ampr.org>
  18. * Matthew Dillon, <dillon@apollo.west.oic.com>
  19. * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  20. * Jorge Cwik, <jorge@laser.satlink.net>
  21. */
  22. /*
  23. * Changes: Pedro Roque : Retransmit queue handled by TCP.
  24. * : Fragmentation on mtu decrease
  25. * : Segment collapse on retransmit
  26. * : AF independence
  27. *
  28. * Linus Torvalds : send_delayed_ack
  29. * David S. Miller : Charge memory using the right skb
  30. * during syn/ack processing.
  31. * David S. Miller : Output engine completely rewritten.
  32. * Andrea Arcangeli: SYNACK carry ts_recent in tsecr.
  33. * Cacophonix Gaul : draft-minshall-nagle-01
  34. * J Hadi Salim : ECN support
  35. *
  36. */
  37. #include <net/tcp.h>
  38. #include <linux/compiler.h>
  39. #include <linux/module.h>
  40. /* People can turn this off for buggy TCP's found in printers etc. */
  41. int sysctl_tcp_retrans_collapse __read_mostly = 1;
  42. /* People can turn this on to work with those rare, broken TCPs that
  43. * interpret the window field as a signed quantity.
  44. */
  45. int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
  46. /* This limits the percentage of the congestion window which we
  47. * will allow a single TSO frame to consume. Building TSO frames
  48. * which are too large can cause TCP streams to be bursty.
  49. */
  50. int sysctl_tcp_tso_win_divisor __read_mostly = 3;
  51. int sysctl_tcp_mtu_probing __read_mostly = 0;
  52. int sysctl_tcp_base_mss __read_mostly = 512;
  53. /* By default, RFC2861 behavior. */
  54. int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
  55. static inline void tcp_packets_out_inc(struct sock *sk,
  56. const struct sk_buff *skb)
  57. {
  58. struct tcp_sock *tp = tcp_sk(sk);
  59. int orig = tp->packets_out;
  60. tp->packets_out += tcp_skb_pcount(skb);
  61. if (!orig)
  62. inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
  63. inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
  64. }
  65. static void update_send_head(struct sock *sk, struct sk_buff *skb)
  66. {
  67. struct tcp_sock *tp = tcp_sk(sk);
  68. tcp_advance_send_head(sk, skb);
  69. tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
  70. tcp_packets_out_inc(sk, skb);
  71. /* Don't override Nagle indefinately with F-RTO */
  72. if (tp->frto_counter == 2)
  73. tp->frto_counter = 3;
  74. }
  75. /* SND.NXT, if window was not shrunk.
  76. * If window has been shrunk, what should we make? It is not clear at all.
  77. * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
  78. * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
  79. * invalid. OK, let's make this for now:
  80. */
  81. static inline __u32 tcp_acceptable_seq(struct sock *sk)
  82. {
  83. struct tcp_sock *tp = tcp_sk(sk);
  84. if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt))
  85. return tp->snd_nxt;
  86. else
  87. return tp->snd_una+tp->snd_wnd;
  88. }
  89. /* Calculate mss to advertise in SYN segment.
  90. * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
  91. *
  92. * 1. It is independent of path mtu.
  93. * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
  94. * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
  95. * attached devices, because some buggy hosts are confused by
  96. * large MSS.
  97. * 4. We do not make 3, we advertise MSS, calculated from first
  98. * hop device mtu, but allow to raise it to ip_rt_min_advmss.
  99. * This may be overridden via information stored in routing table.
  100. * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
  101. * probably even Jumbo".
  102. */
  103. static __u16 tcp_advertise_mss(struct sock *sk)
  104. {
  105. struct tcp_sock *tp = tcp_sk(sk);
  106. struct dst_entry *dst = __sk_dst_get(sk);
  107. int mss = tp->advmss;
  108. if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) {
  109. mss = dst_metric(dst, RTAX_ADVMSS);
  110. tp->advmss = mss;
  111. }
  112. return (__u16)mss;
  113. }
  114. /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
  115. * This is the first part of cwnd validation mechanism. */
  116. static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
  117. {
  118. struct tcp_sock *tp = tcp_sk(sk);
  119. s32 delta = tcp_time_stamp - tp->lsndtime;
  120. u32 restart_cwnd = tcp_init_cwnd(tp, dst);
  121. u32 cwnd = tp->snd_cwnd;
  122. tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
  123. tp->snd_ssthresh = tcp_current_ssthresh(sk);
  124. restart_cwnd = min(restart_cwnd, cwnd);
  125. while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
  126. cwnd >>= 1;
  127. tp->snd_cwnd = max(cwnd, restart_cwnd);
  128. tp->snd_cwnd_stamp = tcp_time_stamp;
  129. tp->snd_cwnd_used = 0;
  130. }
  131. static void tcp_event_data_sent(struct tcp_sock *tp,
  132. struct sk_buff *skb, struct sock *sk)
  133. {
  134. struct inet_connection_sock *icsk = inet_csk(sk);
  135. const u32 now = tcp_time_stamp;
  136. if (sysctl_tcp_slow_start_after_idle &&
  137. (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
  138. tcp_cwnd_restart(sk, __sk_dst_get(sk));
  139. tp->lsndtime = now;
  140. /* If it is a reply for ato after last received
  141. * packet, enter pingpong mode.
  142. */
  143. if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
  144. icsk->icsk_ack.pingpong = 1;
  145. }
  146. static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
  147. {
  148. tcp_dec_quickack_mode(sk, pkts);
  149. inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
  150. }
  151. /* Determine a window scaling and initial window to offer.
  152. * Based on the assumption that the given amount of space
  153. * will be offered. Store the results in the tp structure.
  154. * NOTE: for smooth operation initial space offering should
  155. * be a multiple of mss if possible. We assume here that mss >= 1.
  156. * This MUST be enforced by all callers.
  157. */
  158. void tcp_select_initial_window(int __space, __u32 mss,
  159. __u32 *rcv_wnd, __u32 *window_clamp,
  160. int wscale_ok, __u8 *rcv_wscale)
  161. {
  162. unsigned int space = (__space < 0 ? 0 : __space);
  163. /* If no clamp set the clamp to the max possible scaled window */
  164. if (*window_clamp == 0)
  165. (*window_clamp) = (65535 << 14);
  166. space = min(*window_clamp, space);
  167. /* Quantize space offering to a multiple of mss if possible. */
  168. if (space > mss)
  169. space = (space / mss) * mss;
  170. /* NOTE: offering an initial window larger than 32767
  171. * will break some buggy TCP stacks. If the admin tells us
  172. * it is likely we could be speaking with such a buggy stack
  173. * we will truncate our initial window offering to 32K-1
  174. * unless the remote has sent us a window scaling option,
  175. * which we interpret as a sign the remote TCP is not
  176. * misinterpreting the window field as a signed quantity.
  177. */
  178. if (sysctl_tcp_workaround_signed_windows)
  179. (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
  180. else
  181. (*rcv_wnd) = space;
  182. (*rcv_wscale) = 0;
  183. if (wscale_ok) {
  184. /* Set window scaling on max possible window
  185. * See RFC1323 for an explanation of the limit to 14
  186. */
  187. space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
  188. space = min_t(u32, space, *window_clamp);
  189. while (space > 65535 && (*rcv_wscale) < 14) {
  190. space >>= 1;
  191. (*rcv_wscale)++;
  192. }
  193. }
  194. /* Set initial window to value enough for senders,
  195. * following RFC2414. Senders, not following this RFC,
  196. * will be satisfied with 2.
  197. */
  198. if (mss > (1<<*rcv_wscale)) {
  199. int init_cwnd = 4;
  200. if (mss > 1460*3)
  201. init_cwnd = 2;
  202. else if (mss > 1460)
  203. init_cwnd = 3;
  204. if (*rcv_wnd > init_cwnd*mss)
  205. *rcv_wnd = init_cwnd*mss;
  206. }
  207. /* Set the clamp no higher than max representable value */
  208. (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
  209. }
  210. /* Chose a new window to advertise, update state in tcp_sock for the
  211. * socket, and return result with RFC1323 scaling applied. The return
  212. * value can be stuffed directly into th->window for an outgoing
  213. * frame.
  214. */
  215. static u16 tcp_select_window(struct sock *sk)
  216. {
  217. struct tcp_sock *tp = tcp_sk(sk);
  218. u32 cur_win = tcp_receive_window(tp);
  219. u32 new_win = __tcp_select_window(sk);
  220. /* Never shrink the offered window */
  221. if (new_win < cur_win) {
  222. /* Danger Will Robinson!
  223. * Don't update rcv_wup/rcv_wnd here or else
  224. * we will not be able to advertise a zero
  225. * window in time. --DaveM
  226. *
  227. * Relax Will Robinson.
  228. */
  229. new_win = cur_win;
  230. }
  231. tp->rcv_wnd = new_win;
  232. tp->rcv_wup = tp->rcv_nxt;
  233. /* Make sure we do not exceed the maximum possible
  234. * scaled window.
  235. */
  236. if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
  237. new_win = min(new_win, MAX_TCP_WINDOW);
  238. else
  239. new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
  240. /* RFC1323 scaling applied */
  241. new_win >>= tp->rx_opt.rcv_wscale;
  242. /* If we advertise zero window, disable fast path. */
  243. if (new_win == 0)
  244. tp->pred_flags = 0;
  245. return new_win;
  246. }
  247. static inline void TCP_ECN_send_synack(struct tcp_sock *tp,
  248. struct sk_buff *skb)
  249. {
  250. TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR;
  251. if (!(tp->ecn_flags&TCP_ECN_OK))
  252. TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE;
  253. }
  254. static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
  255. {
  256. struct tcp_sock *tp = tcp_sk(sk);
  257. tp->ecn_flags = 0;
  258. if (sysctl_tcp_ecn) {
  259. TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR;
  260. tp->ecn_flags = TCP_ECN_OK;
  261. }
  262. }
  263. static __inline__ void
  264. TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th)
  265. {
  266. if (inet_rsk(req)->ecn_ok)
  267. th->ece = 1;
  268. }
  269. static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
  270. int tcp_header_len)
  271. {
  272. struct tcp_sock *tp = tcp_sk(sk);
  273. if (tp->ecn_flags & TCP_ECN_OK) {
  274. /* Not-retransmitted data segment: set ECT and inject CWR. */
  275. if (skb->len != tcp_header_len &&
  276. !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
  277. INET_ECN_xmit(sk);
  278. if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) {
  279. tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
  280. tcp_hdr(skb)->cwr = 1;
  281. skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
  282. }
  283. } else {
  284. /* ACK or retransmitted segment: clear ECT|CE */
  285. INET_ECN_dontxmit(sk);
  286. }
  287. if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
  288. tcp_hdr(skb)->ece = 1;
  289. }
  290. }
  291. static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
  292. __u32 tstamp, __u8 **md5_hash)
  293. {
  294. if (tp->rx_opt.tstamp_ok) {
  295. *ptr++ = htonl((TCPOPT_NOP << 24) |
  296. (TCPOPT_NOP << 16) |
  297. (TCPOPT_TIMESTAMP << 8) |
  298. TCPOLEN_TIMESTAMP);
  299. *ptr++ = htonl(tstamp);
  300. *ptr++ = htonl(tp->rx_opt.ts_recent);
  301. }
  302. if (tp->rx_opt.eff_sacks) {
  303. struct tcp_sack_block *sp = tp->rx_opt.dsack ? tp->duplicate_sack : tp->selective_acks;
  304. int this_sack;
  305. *ptr++ = htonl((TCPOPT_NOP << 24) |
  306. (TCPOPT_NOP << 16) |
  307. (TCPOPT_SACK << 8) |
  308. (TCPOLEN_SACK_BASE + (tp->rx_opt.eff_sacks *
  309. TCPOLEN_SACK_PERBLOCK)));
  310. for (this_sack = 0; this_sack < tp->rx_opt.eff_sacks; this_sack++) {
  311. *ptr++ = htonl(sp[this_sack].start_seq);
  312. *ptr++ = htonl(sp[this_sack].end_seq);
  313. }
  314. if (tp->rx_opt.dsack) {
  315. tp->rx_opt.dsack = 0;
  316. tp->rx_opt.eff_sacks--;
  317. }
  318. }
  319. #ifdef CONFIG_TCP_MD5SIG
  320. if (md5_hash) {
  321. *ptr++ = htonl((TCPOPT_NOP << 24) |
  322. (TCPOPT_NOP << 16) |
  323. (TCPOPT_MD5SIG << 8) |
  324. TCPOLEN_MD5SIG);
  325. *md5_hash = (__u8 *)ptr;
  326. }
  327. #endif
  328. }
  329. /* Construct a tcp options header for a SYN or SYN_ACK packet.
  330. * If this is every changed make sure to change the definition of
  331. * MAX_SYN_SIZE to match the new maximum number of options that you
  332. * can generate.
  333. *
  334. * Note - that with the RFC2385 TCP option, we make room for the
  335. * 16 byte MD5 hash. This will be filled in later, so the pointer for the
  336. * location to be filled is passed back up.
  337. */
  338. static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
  339. int offer_wscale, int wscale, __u32 tstamp,
  340. __u32 ts_recent, __u8 **md5_hash)
  341. {
  342. /* We always get an MSS option.
  343. * The option bytes which will be seen in normal data
  344. * packets should timestamps be used, must be in the MSS
  345. * advertised. But we subtract them from tp->mss_cache so
  346. * that calculations in tcp_sendmsg are simpler etc.
  347. * So account for this fact here if necessary. If we
  348. * don't do this correctly, as a receiver we won't
  349. * recognize data packets as being full sized when we
  350. * should, and thus we won't abide by the delayed ACK
  351. * rules correctly.
  352. * SACKs don't matter, we never delay an ACK when we
  353. * have any of those going out.
  354. */
  355. *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
  356. if (ts) {
  357. if (sack)
  358. *ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
  359. (TCPOLEN_SACK_PERM << 16) |
  360. (TCPOPT_TIMESTAMP << 8) |
  361. TCPOLEN_TIMESTAMP);
  362. else
  363. *ptr++ = htonl((TCPOPT_NOP << 24) |
  364. (TCPOPT_NOP << 16) |
  365. (TCPOPT_TIMESTAMP << 8) |
  366. TCPOLEN_TIMESTAMP);
  367. *ptr++ = htonl(tstamp); /* TSVAL */
  368. *ptr++ = htonl(ts_recent); /* TSECR */
  369. } else if (sack)
  370. *ptr++ = htonl((TCPOPT_NOP << 24) |
  371. (TCPOPT_NOP << 16) |
  372. (TCPOPT_SACK_PERM << 8) |
  373. TCPOLEN_SACK_PERM);
  374. if (offer_wscale)
  375. *ptr++ = htonl((TCPOPT_NOP << 24) |
  376. (TCPOPT_WINDOW << 16) |
  377. (TCPOLEN_WINDOW << 8) |
  378. (wscale));
  379. #ifdef CONFIG_TCP_MD5SIG
  380. /*
  381. * If MD5 is enabled, then we set the option, and include the size
  382. * (always 18). The actual MD5 hash is added just before the
  383. * packet is sent.
  384. */
  385. if (md5_hash) {
  386. *ptr++ = htonl((TCPOPT_NOP << 24) |
  387. (TCPOPT_NOP << 16) |
  388. (TCPOPT_MD5SIG << 8) |
  389. TCPOLEN_MD5SIG);
  390. *md5_hash = (__u8 *) ptr;
  391. }
  392. #endif
  393. }
  394. /* This routine actually transmits TCP packets queued in by
  395. * tcp_do_sendmsg(). This is used by both the initial
  396. * transmission and possible later retransmissions.
  397. * All SKB's seen here are completely headerless. It is our
  398. * job to build the TCP header, and pass the packet down to
  399. * IP so it can do the same plus pass the packet off to the
  400. * device.
  401. *
  402. * We are working here with either a clone of the original
  403. * SKB, or a fresh unique copy made by the retransmit engine.
  404. */
  405. static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, gfp_t gfp_mask)
  406. {
  407. const struct inet_connection_sock *icsk = inet_csk(sk);
  408. struct inet_sock *inet;
  409. struct tcp_sock *tp;
  410. struct tcp_skb_cb *tcb;
  411. int tcp_header_size;
  412. #ifdef CONFIG_TCP_MD5SIG
  413. struct tcp_md5sig_key *md5;
  414. __u8 *md5_hash_location;
  415. #endif
  416. struct tcphdr *th;
  417. int sysctl_flags;
  418. int err;
  419. BUG_ON(!skb || !tcp_skb_pcount(skb));
  420. /* If congestion control is doing timestamping, we must
  421. * take such a timestamp before we potentially clone/copy.
  422. */
  423. if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
  424. __net_timestamp(skb);
  425. if (likely(clone_it)) {
  426. if (unlikely(skb_cloned(skb)))
  427. skb = pskb_copy(skb, gfp_mask);
  428. else
  429. skb = skb_clone(skb, gfp_mask);
  430. if (unlikely(!skb))
  431. return -ENOBUFS;
  432. }
  433. inet = inet_sk(sk);
  434. tp = tcp_sk(sk);
  435. tcb = TCP_SKB_CB(skb);
  436. tcp_header_size = tp->tcp_header_len;
  437. #define SYSCTL_FLAG_TSTAMPS 0x1
  438. #define SYSCTL_FLAG_WSCALE 0x2
  439. #define SYSCTL_FLAG_SACK 0x4
  440. sysctl_flags = 0;
  441. if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
  442. tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS;
  443. if (sysctl_tcp_timestamps) {
  444. tcp_header_size += TCPOLEN_TSTAMP_ALIGNED;
  445. sysctl_flags |= SYSCTL_FLAG_TSTAMPS;
  446. }
  447. if (sysctl_tcp_window_scaling) {
  448. tcp_header_size += TCPOLEN_WSCALE_ALIGNED;
  449. sysctl_flags |= SYSCTL_FLAG_WSCALE;
  450. }
  451. if (sysctl_tcp_sack) {
  452. sysctl_flags |= SYSCTL_FLAG_SACK;
  453. if (!(sysctl_flags & SYSCTL_FLAG_TSTAMPS))
  454. tcp_header_size += TCPOLEN_SACKPERM_ALIGNED;
  455. }
  456. } else if (unlikely(tp->rx_opt.eff_sacks)) {
  457. /* A SACK is 2 pad bytes, a 2 byte header, plus
  458. * 2 32-bit sequence numbers for each SACK block.
  459. */
  460. tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED +
  461. (tp->rx_opt.eff_sacks *
  462. TCPOLEN_SACK_PERBLOCK));
  463. }
  464. if (tcp_packets_in_flight(tp) == 0)
  465. tcp_ca_event(sk, CA_EVENT_TX_START);
  466. #ifdef CONFIG_TCP_MD5SIG
  467. /*
  468. * Are we doing MD5 on this segment? If so - make
  469. * room for it.
  470. */
  471. md5 = tp->af_specific->md5_lookup(sk, sk);
  472. if (md5)
  473. tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
  474. #endif
  475. skb_push(skb, tcp_header_size);
  476. skb_reset_transport_header(skb);
  477. skb_set_owner_w(skb, sk);
  478. /* Build TCP header and checksum it. */
  479. th = tcp_hdr(skb);
  480. th->source = inet->sport;
  481. th->dest = inet->dport;
  482. th->seq = htonl(tcb->seq);
  483. th->ack_seq = htonl(tp->rcv_nxt);
  484. *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
  485. tcb->flags);
  486. if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
  487. /* RFC1323: The window in SYN & SYN/ACK segments
  488. * is never scaled.
  489. */
  490. th->window = htons(min(tp->rcv_wnd, 65535U));
  491. } else {
  492. th->window = htons(tcp_select_window(sk));
  493. }
  494. th->check = 0;
  495. th->urg_ptr = 0;
  496. if (unlikely(tp->urg_mode &&
  497. between(tp->snd_up, tcb->seq+1, tcb->seq+0xFFFF))) {
  498. th->urg_ptr = htons(tp->snd_up-tcb->seq);
  499. th->urg = 1;
  500. }
  501. if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
  502. tcp_syn_build_options((__be32 *)(th + 1),
  503. tcp_advertise_mss(sk),
  504. (sysctl_flags & SYSCTL_FLAG_TSTAMPS),
  505. (sysctl_flags & SYSCTL_FLAG_SACK),
  506. (sysctl_flags & SYSCTL_FLAG_WSCALE),
  507. tp->rx_opt.rcv_wscale,
  508. tcb->when,
  509. tp->rx_opt.ts_recent,
  510. #ifdef CONFIG_TCP_MD5SIG
  511. md5 ? &md5_hash_location :
  512. #endif
  513. NULL);
  514. } else {
  515. tcp_build_and_update_options((__be32 *)(th + 1),
  516. tp, tcb->when,
  517. #ifdef CONFIG_TCP_MD5SIG
  518. md5 ? &md5_hash_location :
  519. #endif
  520. NULL);
  521. TCP_ECN_send(sk, skb, tcp_header_size);
  522. }
  523. #ifdef CONFIG_TCP_MD5SIG
  524. /* Calculate the MD5 hash, as we have all we need now */
  525. if (md5) {
  526. tp->af_specific->calc_md5_hash(md5_hash_location,
  527. md5,
  528. sk, NULL, NULL,
  529. tcp_hdr(skb),
  530. sk->sk_protocol,
  531. skb->len);
  532. }
  533. #endif
  534. icsk->icsk_af_ops->send_check(sk, skb->len, skb);
  535. if (likely(tcb->flags & TCPCB_FLAG_ACK))
  536. tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
  537. if (skb->len != tcp_header_size)
  538. tcp_event_data_sent(tp, skb, sk);
  539. if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
  540. TCP_INC_STATS(TCP_MIB_OUTSEGS);
  541. err = icsk->icsk_af_ops->queue_xmit(skb, 0);
  542. if (likely(err <= 0))
  543. return err;
  544. tcp_enter_cwr(sk, 1);
  545. return net_xmit_eval(err);
  546. #undef SYSCTL_FLAG_TSTAMPS
  547. #undef SYSCTL_FLAG_WSCALE
  548. #undef SYSCTL_FLAG_SACK
  549. }
  550. /* This routine just queue's the buffer
  551. *
  552. * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
  553. * otherwise socket can stall.
  554. */
  555. static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
  556. {
  557. struct tcp_sock *tp = tcp_sk(sk);
  558. /* Advance write_seq and place onto the write_queue. */
  559. tp->write_seq = TCP_SKB_CB(skb)->end_seq;
  560. skb_header_release(skb);
  561. tcp_add_write_queue_tail(sk, skb);
  562. sk_charge_skb(sk, skb);
  563. }
  564. static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
  565. {
  566. if (skb->len <= mss_now || !sk_can_gso(sk)) {
  567. /* Avoid the costly divide in the normal
  568. * non-TSO case.
  569. */
  570. skb_shinfo(skb)->gso_segs = 1;
  571. skb_shinfo(skb)->gso_size = 0;
  572. skb_shinfo(skb)->gso_type = 0;
  573. } else {
  574. skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
  575. skb_shinfo(skb)->gso_size = mss_now;
  576. skb_shinfo(skb)->gso_type = sk->sk_gso_type;
  577. }
  578. }
  579. /* When a modification to fackets out becomes necessary, we need to check
  580. * skb is counted to fackets_out or not.
  581. */
  582. static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb,
  583. int decr)
  584. {
  585. struct tcp_sock *tp = tcp_sk(sk);
  586. if (!tp->sacked_out || tcp_is_reno(tp))
  587. return;
  588. if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
  589. tp->fackets_out -= decr;
  590. }
  591. /* Function to create two new TCP segments. Shrinks the given segment
  592. * to the specified size and appends a new segment with the rest of the
  593. * packet to the list. This won't be called frequently, I hope.
  594. * Remember, these are still headerless SKBs at this point.
  595. */
  596. int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
  597. {
  598. struct tcp_sock *tp = tcp_sk(sk);
  599. struct sk_buff *buff;
  600. int nsize, old_factor;
  601. int nlen;
  602. u16 flags;
  603. BUG_ON(len > skb->len);
  604. tcp_clear_retrans_hints_partial(tp);
  605. nsize = skb_headlen(skb) - len;
  606. if (nsize < 0)
  607. nsize = 0;
  608. if (skb_cloned(skb) &&
  609. skb_is_nonlinear(skb) &&
  610. pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
  611. return -ENOMEM;
  612. /* Get a new skb... force flag on. */
  613. buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
  614. if (buff == NULL)
  615. return -ENOMEM; /* We'll just try again later. */
  616. sk_charge_skb(sk, buff);
  617. nlen = skb->len - len - nsize;
  618. buff->truesize += nlen;
  619. skb->truesize -= nlen;
  620. /* Correct the sequence numbers. */
  621. TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
  622. TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
  623. TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
  624. /* PSH and FIN should only be set in the second packet. */
  625. flags = TCP_SKB_CB(skb)->flags;
  626. TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
  627. TCP_SKB_CB(buff)->flags = flags;
  628. TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
  629. TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL;
  630. if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
  631. /* Copy and checksum data tail into the new buffer. */
  632. buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize),
  633. nsize, 0);
  634. skb_trim(skb, len);
  635. skb->csum = csum_block_sub(skb->csum, buff->csum, len);
  636. } else {
  637. skb->ip_summed = CHECKSUM_PARTIAL;
  638. skb_split(skb, buff, len);
  639. }
  640. buff->ip_summed = skb->ip_summed;
  641. /* Looks stupid, but our code really uses when of
  642. * skbs, which it never sent before. --ANK
  643. */
  644. TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
  645. buff->tstamp = skb->tstamp;
  646. old_factor = tcp_skb_pcount(skb);
  647. /* Fix up tso_factor for both original and new SKB. */
  648. tcp_set_skb_tso_segs(sk, skb, mss_now);
  649. tcp_set_skb_tso_segs(sk, buff, mss_now);
  650. /* If this packet has been sent out already, we must
  651. * adjust the various packet counters.
  652. */
  653. if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
  654. int diff = old_factor - tcp_skb_pcount(skb) -
  655. tcp_skb_pcount(buff);
  656. tp->packets_out -= diff;
  657. if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
  658. tp->sacked_out -= diff;
  659. if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
  660. tp->retrans_out -= diff;
  661. if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
  662. tp->lost_out -= diff;
  663. /* Adjust Reno SACK estimate. */
  664. if (tcp_is_reno(tp) && diff > 0) {
  665. tcp_dec_pcount_approx_int(&tp->sacked_out, diff);
  666. tcp_verify_left_out(tp);
  667. }
  668. tcp_adjust_fackets_out(sk, skb, diff);
  669. }
  670. /* Link BUFF into the send queue. */
  671. skb_header_release(buff);
  672. tcp_insert_write_queue_after(skb, buff, sk);
  673. return 0;
  674. }
  675. /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
  676. * eventually). The difference is that pulled data not copied, but
  677. * immediately discarded.
  678. */
  679. static void __pskb_trim_head(struct sk_buff *skb, int len)
  680. {
  681. int i, k, eat;
  682. eat = len;
  683. k = 0;
  684. for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
  685. if (skb_shinfo(skb)->frags[i].size <= eat) {
  686. put_page(skb_shinfo(skb)->frags[i].page);
  687. eat -= skb_shinfo(skb)->frags[i].size;
  688. } else {
  689. skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
  690. if (eat) {
  691. skb_shinfo(skb)->frags[k].page_offset += eat;
  692. skb_shinfo(skb)->frags[k].size -= eat;
  693. eat = 0;
  694. }
  695. k++;
  696. }
  697. }
  698. skb_shinfo(skb)->nr_frags = k;
  699. skb_reset_tail_pointer(skb);
  700. skb->data_len -= len;
  701. skb->len = skb->data_len;
  702. }
  703. int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
  704. {
  705. if (skb_cloned(skb) &&
  706. pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
  707. return -ENOMEM;
  708. /* If len == headlen, we avoid __skb_pull to preserve alignment. */
  709. if (unlikely(len < skb_headlen(skb)))
  710. __skb_pull(skb, len);
  711. else
  712. __pskb_trim_head(skb, len - skb_headlen(skb));
  713. TCP_SKB_CB(skb)->seq += len;
  714. skb->ip_summed = CHECKSUM_PARTIAL;
  715. skb->truesize -= len;
  716. sk->sk_wmem_queued -= len;
  717. sk->sk_forward_alloc += len;
  718. sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
  719. /* Any change of skb->len requires recalculation of tso
  720. * factor and mss.
  721. */
  722. if (tcp_skb_pcount(skb) > 1)
  723. tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1));
  724. return 0;
  725. }
  726. /* Not accounting for SACKs here. */
  727. int tcp_mtu_to_mss(struct sock *sk, int pmtu)
  728. {
  729. struct tcp_sock *tp = tcp_sk(sk);
  730. struct inet_connection_sock *icsk = inet_csk(sk);
  731. int mss_now;
  732. /* Calculate base mss without TCP options:
  733. It is MMS_S - sizeof(tcphdr) of rfc1122
  734. */
  735. mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
  736. /* Clamp it (mss_clamp does not include tcp options) */
  737. if (mss_now > tp->rx_opt.mss_clamp)
  738. mss_now = tp->rx_opt.mss_clamp;
  739. /* Now subtract optional transport overhead */
  740. mss_now -= icsk->icsk_ext_hdr_len;
  741. /* Then reserve room for full set of TCP options and 8 bytes of data */
  742. if (mss_now < 48)
  743. mss_now = 48;
  744. /* Now subtract TCP options size, not including SACKs */
  745. mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
  746. return mss_now;
  747. }
  748. /* Inverse of above */
  749. int tcp_mss_to_mtu(struct sock *sk, int mss)
  750. {
  751. struct tcp_sock *tp = tcp_sk(sk);
  752. struct inet_connection_sock *icsk = inet_csk(sk);
  753. int mtu;
  754. mtu = mss +
  755. tp->tcp_header_len +
  756. icsk->icsk_ext_hdr_len +
  757. icsk->icsk_af_ops->net_header_len;
  758. return mtu;
  759. }
  760. void tcp_mtup_init(struct sock *sk)
  761. {
  762. struct tcp_sock *tp = tcp_sk(sk);
  763. struct inet_connection_sock *icsk = inet_csk(sk);
  764. icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
  765. icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
  766. icsk->icsk_af_ops->net_header_len;
  767. icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
  768. icsk->icsk_mtup.probe_size = 0;
  769. }
  770. /* This function synchronize snd mss to current pmtu/exthdr set.
  771. tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
  772. for TCP options, but includes only bare TCP header.
  773. tp->rx_opt.mss_clamp is mss negotiated at connection setup.
  774. It is minimum of user_mss and mss received with SYN.
  775. It also does not include TCP options.
  776. inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
  777. tp->mss_cache is current effective sending mss, including
  778. all tcp options except for SACKs. It is evaluated,
  779. taking into account current pmtu, but never exceeds
  780. tp->rx_opt.mss_clamp.
  781. NOTE1. rfc1122 clearly states that advertised MSS
  782. DOES NOT include either tcp or ip options.
  783. NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
  784. are READ ONLY outside this function. --ANK (980731)
  785. */
  786. unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
  787. {
  788. struct tcp_sock *tp = tcp_sk(sk);
  789. struct inet_connection_sock *icsk = inet_csk(sk);
  790. int mss_now;
  791. if (icsk->icsk_mtup.search_high > pmtu)
  792. icsk->icsk_mtup.search_high = pmtu;
  793. mss_now = tcp_mtu_to_mss(sk, pmtu);
  794. /* Bound mss with half of window */
  795. if (tp->max_window && mss_now > (tp->max_window>>1))
  796. mss_now = max((tp->max_window>>1), 68U - tp->tcp_header_len);
  797. /* And store cached results */
  798. icsk->icsk_pmtu_cookie = pmtu;
  799. if (icsk->icsk_mtup.enabled)
  800. mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
  801. tp->mss_cache = mss_now;
  802. return mss_now;
  803. }
  804. /* Compute the current effective MSS, taking SACKs and IP options,
  805. * and even PMTU discovery events into account.
  806. *
  807. * LARGESEND note: !urg_mode is overkill, only frames up to snd_up
  808. * cannot be large. However, taking into account rare use of URG, this
  809. * is not a big flaw.
  810. */
  811. unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
  812. {
  813. struct tcp_sock *tp = tcp_sk(sk);
  814. struct dst_entry *dst = __sk_dst_get(sk);
  815. u32 mss_now;
  816. u16 xmit_size_goal;
  817. int doing_tso = 0;
  818. mss_now = tp->mss_cache;
  819. if (large_allowed && sk_can_gso(sk) && !tp->urg_mode)
  820. doing_tso = 1;
  821. if (dst) {
  822. u32 mtu = dst_mtu(dst);
  823. if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
  824. mss_now = tcp_sync_mss(sk, mtu);
  825. }
  826. if (tp->rx_opt.eff_sacks)
  827. mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
  828. (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK));
  829. #ifdef CONFIG_TCP_MD5SIG
  830. if (tp->af_specific->md5_lookup(sk, sk))
  831. mss_now -= TCPOLEN_MD5SIG_ALIGNED;
  832. #endif
  833. xmit_size_goal = mss_now;
  834. if (doing_tso) {
  835. xmit_size_goal = (65535 -
  836. inet_csk(sk)->icsk_af_ops->net_header_len -
  837. inet_csk(sk)->icsk_ext_hdr_len -
  838. tp->tcp_header_len);
  839. if (tp->max_window &&
  840. (xmit_size_goal > (tp->max_window >> 1)))
  841. xmit_size_goal = max((tp->max_window >> 1),
  842. 68U - tp->tcp_header_len);
  843. xmit_size_goal -= (xmit_size_goal % mss_now);
  844. }
  845. tp->xmit_size_goal = xmit_size_goal;
  846. return mss_now;
  847. }
  848. /* Congestion window validation. (RFC2861) */
  849. static void tcp_cwnd_validate(struct sock *sk)
  850. {
  851. struct tcp_sock *tp = tcp_sk(sk);
  852. __u32 packets_out = tp->packets_out;
  853. if (packets_out >= tp->snd_cwnd) {
  854. /* Network is feed fully. */
  855. tp->snd_cwnd_used = 0;
  856. tp->snd_cwnd_stamp = tcp_time_stamp;
  857. } else {
  858. /* Network starves. */
  859. if (tp->packets_out > tp->snd_cwnd_used)
  860. tp->snd_cwnd_used = tp->packets_out;
  861. if (sysctl_tcp_slow_start_after_idle &&
  862. (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
  863. tcp_cwnd_application_limited(sk);
  864. }
  865. }
  866. static unsigned int tcp_window_allows(struct tcp_sock *tp, struct sk_buff *skb, unsigned int mss_now, unsigned int cwnd)
  867. {
  868. u32 window, cwnd_len;
  869. window = (tp->snd_una + tp->snd_wnd - TCP_SKB_CB(skb)->seq);
  870. cwnd_len = mss_now * cwnd;
  871. return min(window, cwnd_len);
  872. }
  873. /* Can at least one segment of SKB be sent right now, according to the
  874. * congestion window rules? If so, return how many segments are allowed.
  875. */
  876. static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb)
  877. {
  878. u32 in_flight, cwnd;
  879. /* Don't be strict about the congestion window for the final FIN. */
  880. if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
  881. tcp_skb_pcount(skb) == 1)
  882. return 1;
  883. in_flight = tcp_packets_in_flight(tp);
  884. cwnd = tp->snd_cwnd;
  885. if (in_flight < cwnd)
  886. return (cwnd - in_flight);
  887. return 0;
  888. }
  889. /* This must be invoked the first time we consider transmitting
  890. * SKB onto the wire.
  891. */
  892. static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now)
  893. {
  894. int tso_segs = tcp_skb_pcount(skb);
  895. if (!tso_segs ||
  896. (tso_segs > 1 &&
  897. tcp_skb_mss(skb) != mss_now)) {
  898. tcp_set_skb_tso_segs(sk, skb, mss_now);
  899. tso_segs = tcp_skb_pcount(skb);
  900. }
  901. return tso_segs;
  902. }
  903. static inline int tcp_minshall_check(const struct tcp_sock *tp)
  904. {
  905. return after(tp->snd_sml,tp->snd_una) &&
  906. !after(tp->snd_sml, tp->snd_nxt);
  907. }
  908. /* Return 0, if packet can be sent now without violation Nagle's rules:
  909. * 1. It is full sized.
  910. * 2. Or it contains FIN. (already checked by caller)
  911. * 3. Or TCP_NODELAY was set.
  912. * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
  913. * With Minshall's modification: all sent small packets are ACKed.
  914. */
  915. static inline int tcp_nagle_check(const struct tcp_sock *tp,
  916. const struct sk_buff *skb,
  917. unsigned mss_now, int nonagle)
  918. {
  919. return (skb->len < mss_now &&
  920. ((nonagle&TCP_NAGLE_CORK) ||
  921. (!nonagle &&
  922. tp->packets_out &&
  923. tcp_minshall_check(tp))));
  924. }
  925. /* Return non-zero if the Nagle test allows this packet to be
  926. * sent now.
  927. */
  928. static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
  929. unsigned int cur_mss, int nonagle)
  930. {
  931. /* Nagle rule does not apply to frames, which sit in the middle of the
  932. * write_queue (they have no chances to get new data).
  933. *
  934. * This is implemented in the callers, where they modify the 'nonagle'
  935. * argument based upon the location of SKB in the send queue.
  936. */
  937. if (nonagle & TCP_NAGLE_PUSH)
  938. return 1;
  939. /* Don't use the nagle rule for urgent data (or for the final FIN).
  940. * Nagle can be ignored during F-RTO too (see RFC4138).
  941. */
  942. if (tp->urg_mode || (tp->frto_counter == 2) ||
  943. (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
  944. return 1;
  945. if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
  946. return 1;
  947. return 0;
  948. }
  949. /* Does at least the first segment of SKB fit into the send window? */
  950. static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, unsigned int cur_mss)
  951. {
  952. u32 end_seq = TCP_SKB_CB(skb)->end_seq;
  953. if (skb->len > cur_mss)
  954. end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
  955. return !after(end_seq, tp->snd_una + tp->snd_wnd);
  956. }
  957. /* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
  958. * should be put on the wire right now. If so, it returns the number of
  959. * packets allowed by the congestion window.
  960. */
  961. static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
  962. unsigned int cur_mss, int nonagle)
  963. {
  964. struct tcp_sock *tp = tcp_sk(sk);
  965. unsigned int cwnd_quota;
  966. tcp_init_tso_segs(sk, skb, cur_mss);
  967. if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
  968. return 0;
  969. cwnd_quota = tcp_cwnd_test(tp, skb);
  970. if (cwnd_quota &&
  971. !tcp_snd_wnd_test(tp, skb, cur_mss))
  972. cwnd_quota = 0;
  973. return cwnd_quota;
  974. }
  975. int tcp_may_send_now(struct sock *sk)
  976. {
  977. struct tcp_sock *tp = tcp_sk(sk);
  978. struct sk_buff *skb = tcp_send_head(sk);
  979. return (skb &&
  980. tcp_snd_test(sk, skb, tcp_current_mss(sk, 1),
  981. (tcp_skb_is_last(sk, skb) ?
  982. tp->nonagle : TCP_NAGLE_PUSH)));
  983. }
  984. /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
  985. * which is put after SKB on the list. It is very much like
  986. * tcp_fragment() except that it may make several kinds of assumptions
  987. * in order to speed up the splitting operation. In particular, we
  988. * know that all the data is in scatter-gather pages, and that the
  989. * packet has never been sent out before (and thus is not cloned).
  990. */
  991. static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now)
  992. {
  993. struct sk_buff *buff;
  994. int nlen = skb->len - len;
  995. u16 flags;
  996. /* All of a TSO frame must be composed of paged data. */
  997. if (skb->len != skb->data_len)
  998. return tcp_fragment(sk, skb, len, mss_now);
  999. buff = sk_stream_alloc_skb(sk, 0, GFP_ATOMIC);
  1000. if (unlikely(buff == NULL))
  1001. return -ENOMEM;
  1002. sk_charge_skb(sk, buff);
  1003. buff->truesize += nlen;
  1004. skb->truesize -= nlen;
  1005. /* Correct the sequence numbers. */
  1006. TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
  1007. TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
  1008. TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
  1009. /* PSH and FIN should only be set in the second packet. */
  1010. flags = TCP_SKB_CB(skb)->flags;
  1011. TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
  1012. TCP_SKB_CB(buff)->flags = flags;
  1013. /* This packet was never sent out yet, so no SACK bits. */
  1014. TCP_SKB_CB(buff)->sacked = 0;
  1015. buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
  1016. skb_split(skb, buff, len);
  1017. /* Fix up tso_factor for both original and new SKB. */
  1018. tcp_set_skb_tso_segs(sk, skb, mss_now);
  1019. tcp_set_skb_tso_segs(sk, buff, mss_now);
  1020. /* Link BUFF into the send queue. */
  1021. skb_header_release(buff);
  1022. tcp_insert_write_queue_after(skb, buff, sk);
  1023. return 0;
  1024. }
  1025. /* Try to defer sending, if possible, in order to minimize the amount
  1026. * of TSO splitting we do. View it as a kind of TSO Nagle test.
  1027. *
  1028. * This algorithm is from John Heffner.
  1029. */
  1030. static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
  1031. {
  1032. struct tcp_sock *tp = tcp_sk(sk);
  1033. const struct inet_connection_sock *icsk = inet_csk(sk);
  1034. u32 send_win, cong_win, limit, in_flight;
  1035. if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
  1036. goto send_now;
  1037. if (icsk->icsk_ca_state != TCP_CA_Open)
  1038. goto send_now;
  1039. /* Defer for less than two clock ticks. */
  1040. if (tp->tso_deferred &&
  1041. ((jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
  1042. goto send_now;
  1043. in_flight = tcp_packets_in_flight(tp);
  1044. BUG_ON(tcp_skb_pcount(skb) <= 1 ||
  1045. (tp->snd_cwnd <= in_flight));
  1046. send_win = (tp->snd_una + tp->snd_wnd) - TCP_SKB_CB(skb)->seq;
  1047. /* From in_flight test above, we know that cwnd > in_flight. */
  1048. cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
  1049. limit = min(send_win, cong_win);
  1050. /* If a full-sized TSO skb can be sent, do it. */
  1051. if (limit >= 65536)
  1052. goto send_now;
  1053. if (sysctl_tcp_tso_win_divisor) {
  1054. u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
  1055. /* If at least some fraction of a window is available,
  1056. * just use it.
  1057. */
  1058. chunk /= sysctl_tcp_tso_win_divisor;
  1059. if (limit >= chunk)
  1060. goto send_now;
  1061. } else {
  1062. /* Different approach, try not to defer past a single
  1063. * ACK. Receiver should ACK every other full sized
  1064. * frame, so if we have space for more than 3 frames
  1065. * then send now.
  1066. */
  1067. if (limit > tcp_max_burst(tp) * tp->mss_cache)
  1068. goto send_now;
  1069. }
  1070. /* Ok, it looks like it is advisable to defer. */
  1071. tp->tso_deferred = 1 | (jiffies<<1);
  1072. return 1;
  1073. send_now:
  1074. tp->tso_deferred = 0;
  1075. return 0;
  1076. }
  1077. /* Create a new MTU probe if we are ready.
  1078. * Returns 0 if we should wait to probe (no cwnd available),
  1079. * 1 if a probe was sent,
  1080. * -1 otherwise */
  1081. static int tcp_mtu_probe(struct sock *sk)
  1082. {
  1083. struct tcp_sock *tp = tcp_sk(sk);
  1084. struct inet_connection_sock *icsk = inet_csk(sk);
  1085. struct sk_buff *skb, *nskb, *next;
  1086. int len;
  1087. int probe_size;
  1088. int size_needed;
  1089. int copy;
  1090. int mss_now;
  1091. /* Not currently probing/verifying,
  1092. * not in recovery,
  1093. * have enough cwnd, and
  1094. * not SACKing (the variable headers throw things off) */
  1095. if (!icsk->icsk_mtup.enabled ||
  1096. icsk->icsk_mtup.probe_size ||
  1097. inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
  1098. tp->snd_cwnd < 11 ||
  1099. tp->rx_opt.eff_sacks)
  1100. return -1;
  1101. /* Very simple search strategy: just double the MSS. */
  1102. mss_now = tcp_current_mss(sk, 0);
  1103. probe_size = 2*tp->mss_cache;
  1104. size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
  1105. if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
  1106. /* TODO: set timer for probe_converge_event */
  1107. return -1;
  1108. }
  1109. /* Have enough data in the send queue to probe? */
  1110. if (tp->write_seq - tp->snd_nxt < size_needed)
  1111. return -1;
  1112. if (tp->snd_wnd < size_needed)
  1113. return -1;
  1114. if (after(tp->snd_nxt + size_needed, tp->snd_una + tp->snd_wnd))
  1115. return 0;
  1116. /* Do we need to wait to drain cwnd? With none in flight, don't stall */
  1117. if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
  1118. if (!tcp_packets_in_flight(tp))
  1119. return -1;
  1120. else
  1121. return 0;
  1122. }
  1123. /* We're allowed to probe. Build it now. */
  1124. if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
  1125. return -1;
  1126. sk_charge_skb(sk, nskb);
  1127. skb = tcp_send_head(sk);
  1128. TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
  1129. TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
  1130. TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK;
  1131. TCP_SKB_CB(nskb)->sacked = 0;
  1132. nskb->csum = 0;
  1133. nskb->ip_summed = skb->ip_summed;
  1134. tcp_insert_write_queue_before(nskb, skb, sk);
  1135. len = 0;
  1136. tcp_for_write_queue_from_safe(skb, next, sk) {
  1137. copy = min_t(int, skb->len, probe_size - len);
  1138. if (nskb->ip_summed)
  1139. skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
  1140. else
  1141. nskb->csum = skb_copy_and_csum_bits(skb, 0,
  1142. skb_put(nskb, copy), copy, nskb->csum);
  1143. if (skb->len <= copy) {
  1144. /* We've eaten all the data from this skb.
  1145. * Throw it away. */
  1146. TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags;
  1147. tcp_unlink_write_queue(skb, sk);
  1148. sk_stream_free_skb(sk, skb);
  1149. } else {
  1150. TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
  1151. ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
  1152. if (!skb_shinfo(skb)->nr_frags) {
  1153. skb_pull(skb, copy);
  1154. if (skb->ip_summed != CHECKSUM_PARTIAL)
  1155. skb->csum = csum_partial(skb->data, skb->len, 0);
  1156. } else {
  1157. __pskb_trim_head(skb, copy);
  1158. tcp_set_skb_tso_segs(sk, skb, mss_now);
  1159. }
  1160. TCP_SKB_CB(skb)->seq += copy;
  1161. }
  1162. len += copy;
  1163. if (len >= probe_size)
  1164. break;
  1165. }
  1166. tcp_init_tso_segs(sk, nskb, nskb->len);
  1167. /* We're ready to send. If this fails, the probe will
  1168. * be resegmented into mss-sized pieces by tcp_write_xmit(). */
  1169. TCP_SKB_CB(nskb)->when = tcp_time_stamp;
  1170. if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
  1171. /* Decrement cwnd here because we are sending
  1172. * effectively two packets. */
  1173. tp->snd_cwnd--;
  1174. update_send_head(sk, nskb);
  1175. icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
  1176. tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
  1177. tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
  1178. return 1;
  1179. }
  1180. return -1;
  1181. }
  1182. /* This routine writes packets to the network. It advances the
  1183. * send_head. This happens as incoming acks open up the remote
  1184. * window for us.
  1185. *
  1186. * Returns 1, if no segments are in flight and we have queued segments, but
  1187. * cannot send anything now because of SWS or another problem.
  1188. */
  1189. static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
  1190. {
  1191. struct tcp_sock *tp = tcp_sk(sk);
  1192. struct sk_buff *skb;
  1193. unsigned int tso_segs, sent_pkts;
  1194. int cwnd_quota;
  1195. int result;
  1196. /* If we are closed, the bytes will have to remain here.
  1197. * In time closedown will finish, we empty the write queue and all
  1198. * will be happy.
  1199. */
  1200. if (unlikely(sk->sk_state == TCP_CLOSE))
  1201. return 0;
  1202. sent_pkts = 0;
  1203. /* Do MTU probing. */
  1204. if ((result = tcp_mtu_probe(sk)) == 0) {
  1205. return 0;
  1206. } else if (result > 0) {
  1207. sent_pkts = 1;
  1208. }
  1209. while ((skb = tcp_send_head(sk))) {
  1210. unsigned int limit;
  1211. tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
  1212. BUG_ON(!tso_segs);
  1213. cwnd_quota = tcp_cwnd_test(tp, skb);
  1214. if (!cwnd_quota)
  1215. break;
  1216. if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
  1217. break;
  1218. if (tso_segs == 1) {
  1219. if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
  1220. (tcp_skb_is_last(sk, skb) ?
  1221. nonagle : TCP_NAGLE_PUSH))))
  1222. break;
  1223. } else {
  1224. if (tcp_tso_should_defer(sk, skb))
  1225. break;
  1226. }
  1227. limit = mss_now;
  1228. if (tso_segs > 1) {
  1229. limit = tcp_window_allows(tp, skb,
  1230. mss_now, cwnd_quota);
  1231. if (skb->len < limit) {
  1232. unsigned int trim = skb->len % mss_now;
  1233. if (trim)
  1234. limit = skb->len - trim;
  1235. }
  1236. }
  1237. if (skb->len > limit &&
  1238. unlikely(tso_fragment(sk, skb, limit, mss_now)))
  1239. break;
  1240. TCP_SKB_CB(skb)->when = tcp_time_stamp;
  1241. if (unlikely(tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC)))
  1242. break;
  1243. /* Advance the send_head. This one is sent out.
  1244. * This call will increment packets_out.
  1245. */
  1246. update_send_head(sk, skb);
  1247. tcp_minshall_update(tp, mss_now, skb);
  1248. sent_pkts++;
  1249. }
  1250. if (likely(sent_pkts)) {
  1251. tcp_cwnd_validate(sk);
  1252. return 0;
  1253. }
  1254. return !tp->packets_out && tcp_send_head(sk);
  1255. }
  1256. /* Push out any pending frames which were held back due to
  1257. * TCP_CORK or attempt at coalescing tiny packets.
  1258. * The socket must be locked by the caller.
  1259. */
  1260. void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
  1261. int nonagle)
  1262. {
  1263. struct sk_buff *skb = tcp_send_head(sk);
  1264. if (skb) {
  1265. if (tcp_write_xmit(sk, cur_mss, nonagle))
  1266. tcp_check_probe_timer(sk);
  1267. }
  1268. }
  1269. /* Send _single_ skb sitting at the send head. This function requires
  1270. * true push pending frames to setup probe timer etc.
  1271. */
  1272. void tcp_push_one(struct sock *sk, unsigned int mss_now)
  1273. {
  1274. struct tcp_sock *tp = tcp_sk(sk);
  1275. struct sk_buff *skb = tcp_send_head(sk);
  1276. unsigned int tso_segs, cwnd_quota;
  1277. BUG_ON(!skb || skb->len < mss_now);
  1278. tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
  1279. cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH);
  1280. if (likely(cwnd_quota)) {
  1281. unsigned int limit;
  1282. BUG_ON(!tso_segs);
  1283. limit = mss_now;
  1284. if (tso_segs > 1) {
  1285. limit = tcp_window_allows(tp, skb,
  1286. mss_now, cwnd_quota);
  1287. if (skb->len < limit) {
  1288. unsigned int trim = skb->len % mss_now;
  1289. if (trim)
  1290. limit = skb->len - trim;
  1291. }
  1292. }
  1293. if (skb->len > limit &&
  1294. unlikely(tso_fragment(sk, skb, limit, mss_now)))
  1295. return;
  1296. /* Send it out now. */
  1297. TCP_SKB_CB(skb)->when = tcp_time_stamp;
  1298. if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) {
  1299. update_send_head(sk, skb);
  1300. tcp_cwnd_validate(sk);
  1301. return;
  1302. }
  1303. }
  1304. }
  1305. /* This function returns the amount that we can raise the
  1306. * usable window based on the following constraints
  1307. *
  1308. * 1. The window can never be shrunk once it is offered (RFC 793)
  1309. * 2. We limit memory per socket
  1310. *
  1311. * RFC 1122:
  1312. * "the suggested [SWS] avoidance algorithm for the receiver is to keep
  1313. * RECV.NEXT + RCV.WIN fixed until:
  1314. * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
  1315. *
  1316. * i.e. don't raise the right edge of the window until you can raise
  1317. * it at least MSS bytes.
  1318. *
  1319. * Unfortunately, the recommended algorithm breaks header prediction,
  1320. * since header prediction assumes th->window stays fixed.
  1321. *
  1322. * Strictly speaking, keeping th->window fixed violates the receiver
  1323. * side SWS prevention criteria. The problem is that under this rule
  1324. * a stream of single byte packets will cause the right side of the
  1325. * window to always advance by a single byte.
  1326. *
  1327. * Of course, if the sender implements sender side SWS prevention
  1328. * then this will not be a problem.
  1329. *
  1330. * BSD seems to make the following compromise:
  1331. *
  1332. * If the free space is less than the 1/4 of the maximum
  1333. * space available and the free space is less than 1/2 mss,
  1334. * then set the window to 0.
  1335. * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
  1336. * Otherwise, just prevent the window from shrinking
  1337. * and from being larger than the largest representable value.
  1338. *
  1339. * This prevents incremental opening of the window in the regime
  1340. * where TCP is limited by the speed of the reader side taking
  1341. * data out of the TCP receive queue. It does nothing about
  1342. * those cases where the window is constrained on the sender side
  1343. * because the pipeline is full.
  1344. *
  1345. * BSD also seems to "accidentally" limit itself to windows that are a
  1346. * multiple of MSS, at least until the free space gets quite small.
  1347. * This would appear to be a side effect of the mbuf implementation.
  1348. * Combining these two algorithms results in the observed behavior
  1349. * of having a fixed window size at almost all times.
  1350. *
  1351. * Below we obtain similar behavior by forcing the offered window to
  1352. * a multiple of the mss when it is feasible to do so.
  1353. *
  1354. * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
  1355. * Regular options like TIMESTAMP are taken into account.
  1356. */
  1357. u32 __tcp_select_window(struct sock *sk)
  1358. {
  1359. struct inet_connection_sock *icsk = inet_csk(sk);
  1360. struct tcp_sock *tp = tcp_sk(sk);
  1361. /* MSS for the peer's data. Previous versions used mss_clamp
  1362. * here. I don't know if the value based on our guesses
  1363. * of peer's MSS is better for the performance. It's more correct
  1364. * but may be worse for the performance because of rcv_mss
  1365. * fluctuations. --SAW 1998/11/1
  1366. */
  1367. int mss = icsk->icsk_ack.rcv_mss;
  1368. int free_space = tcp_space(sk);
  1369. int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
  1370. int window;
  1371. if (mss > full_space)
  1372. mss = full_space;
  1373. if (free_space < (full_space >> 1)) {
  1374. icsk->icsk_ack.quick = 0;
  1375. if (tcp_memory_pressure)
  1376. tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss);
  1377. if (free_space < mss)
  1378. return 0;
  1379. }
  1380. if (free_space > tp->rcv_ssthresh)
  1381. free_space = tp->rcv_ssthresh;
  1382. /* Don't do rounding if we are using window scaling, since the
  1383. * scaled window will not line up with the MSS boundary anyway.
  1384. */
  1385. window = tp->rcv_wnd;
  1386. if (tp->rx_opt.rcv_wscale) {
  1387. window = free_space;
  1388. /* Advertise enough space so that it won't get scaled away.
  1389. * Import case: prevent zero window announcement if
  1390. * 1<<rcv_wscale > mss.
  1391. */
  1392. if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
  1393. window = (((window >> tp->rx_opt.rcv_wscale) + 1)
  1394. << tp->rx_opt.rcv_wscale);
  1395. } else {
  1396. /* Get the largest window that is a nice multiple of mss.
  1397. * Window clamp already applied above.
  1398. * If our current window offering is within 1 mss of the
  1399. * free space we just keep it. This prevents the divide
  1400. * and multiply from happening most of the time.
  1401. * We also don't do any window rounding when the free space
  1402. * is too small.
  1403. */
  1404. if (window <= free_space - mss || window > free_space)
  1405. window = (free_space/mss)*mss;
  1406. else if (mss == full_space &&
  1407. free_space > window + (full_space >> 1))
  1408. window = free_space;
  1409. }
  1410. return window;
  1411. }
  1412. /* Attempt to collapse two adjacent SKB's during retransmission. */
  1413. static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now)
  1414. {
  1415. struct tcp_sock *tp = tcp_sk(sk);
  1416. struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
  1417. /* The first test we must make is that neither of these two
  1418. * SKB's are still referenced by someone else.
  1419. */
  1420. if (!skb_cloned(skb) && !skb_cloned(next_skb)) {
  1421. int skb_size = skb->len, next_skb_size = next_skb->len;
  1422. u16 flags = TCP_SKB_CB(skb)->flags;
  1423. /* Also punt if next skb has been SACK'd. */
  1424. if (TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED)
  1425. return;
  1426. /* Next skb is out of window. */
  1427. if (after(TCP_SKB_CB(next_skb)->end_seq, tp->snd_una+tp->snd_wnd))
  1428. return;
  1429. /* Punt if not enough space exists in the first SKB for
  1430. * the data in the second, or the total combined payload
  1431. * would exceed the MSS.
  1432. */
  1433. if ((next_skb_size > skb_tailroom(skb)) ||
  1434. ((skb_size + next_skb_size) > mss_now))
  1435. return;
  1436. BUG_ON(tcp_skb_pcount(skb) != 1 ||
  1437. tcp_skb_pcount(next_skb) != 1);
  1438. tcp_highest_sack_combine(sk, next_skb, skb);
  1439. /* Ok. We will be able to collapse the packet. */
  1440. tcp_unlink_write_queue(next_skb, sk);
  1441. skb_copy_from_linear_data(next_skb,
  1442. skb_put(skb, next_skb_size),
  1443. next_skb_size);
  1444. if (next_skb->ip_summed == CHECKSUM_PARTIAL)
  1445. skb->ip_summed = CHECKSUM_PARTIAL;
  1446. if (skb->ip_summed != CHECKSUM_PARTIAL)
  1447. skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
  1448. /* Update sequence range on original skb. */
  1449. TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
  1450. /* Merge over control information. */
  1451. flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */
  1452. TCP_SKB_CB(skb)->flags = flags;
  1453. /* All done, get rid of second SKB and account for it so
  1454. * packet counting does not break.
  1455. */
  1456. TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked&(TCPCB_EVER_RETRANS|TCPCB_AT_TAIL);
  1457. if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS)
  1458. tp->retrans_out -= tcp_skb_pcount(next_skb);
  1459. if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST)
  1460. tp->lost_out -= tcp_skb_pcount(next_skb);
  1461. /* Reno case is special. Sigh... */
  1462. if (tcp_is_reno(tp) && tp->sacked_out)
  1463. tcp_dec_pcount_approx(&tp->sacked_out, next_skb);
  1464. tcp_adjust_fackets_out(sk, next_skb, tcp_skb_pcount(next_skb));
  1465. tp->packets_out -= tcp_skb_pcount(next_skb);
  1466. /* changed transmit queue under us so clear hints */
  1467. tcp_clear_retrans_hints_partial(tp);
  1468. sk_stream_free_skb(sk, next_skb);
  1469. }
  1470. }
  1471. /* Do a simple retransmit without using the backoff mechanisms in
  1472. * tcp_timer. This is used for path mtu discovery.
  1473. * The socket is already locked here.
  1474. */
  1475. void tcp_simple_retransmit(struct sock *sk)
  1476. {
  1477. const struct inet_connection_sock *icsk = inet_csk(sk);
  1478. struct tcp_sock *tp = tcp_sk(sk);
  1479. struct sk_buff *skb;
  1480. unsigned int mss = tcp_current_mss(sk, 0);
  1481. int lost = 0;
  1482. tcp_for_write_queue(skb, sk) {
  1483. if (skb == tcp_send_head(sk))
  1484. break;
  1485. if (skb->len > mss &&
  1486. !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
  1487. if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
  1488. TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
  1489. tp->retrans_out -= tcp_skb_pcount(skb);
  1490. }
  1491. if (!(TCP_SKB_CB(skb)->sacked&TCPCB_LOST)) {
  1492. TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
  1493. tp->lost_out += tcp_skb_pcount(skb);
  1494. lost = 1;
  1495. }
  1496. }
  1497. }
  1498. tcp_clear_all_retrans_hints(tp);
  1499. if (!lost)
  1500. return;
  1501. tcp_verify_left_out(tp);
  1502. /* Don't muck with the congestion window here.
  1503. * Reason is that we do not increase amount of _data_
  1504. * in network, but units changed and effective
  1505. * cwnd/ssthresh really reduced now.
  1506. */
  1507. if (icsk->icsk_ca_state != TCP_CA_Loss) {
  1508. tp->high_seq = tp->snd_nxt;
  1509. tp->snd_ssthresh = tcp_current_ssthresh(sk);
  1510. tp->prior_ssthresh = 0;
  1511. tp->undo_marker = 0;
  1512. tcp_set_ca_state(sk, TCP_CA_Loss);
  1513. }
  1514. tcp_xmit_retransmit_queue(sk);
  1515. }
  1516. /* This retransmits one SKB. Policy decisions and retransmit queue
  1517. * state updates are done by the caller. Returns non-zero if an
  1518. * error occurred which prevented the send.
  1519. */
  1520. int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
  1521. {
  1522. struct tcp_sock *tp = tcp_sk(sk);
  1523. struct inet_connection_sock *icsk = inet_csk(sk);
  1524. unsigned int cur_mss = tcp_current_mss(sk, 0);
  1525. int err;
  1526. /* Inconslusive MTU probe */
  1527. if (icsk->icsk_mtup.probe_size) {
  1528. icsk->icsk_mtup.probe_size = 0;
  1529. }
  1530. /* Do not sent more than we queued. 1/4 is reserved for possible
  1531. * copying overhead: fragmentation, tunneling, mangling etc.
  1532. */
  1533. if (atomic_read(&sk->sk_wmem_alloc) >
  1534. min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
  1535. return -EAGAIN;
  1536. if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
  1537. if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
  1538. BUG();
  1539. if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
  1540. return -ENOMEM;
  1541. }
  1542. /* If receiver has shrunk his window, and skb is out of
  1543. * new window, do not retransmit it. The exception is the
  1544. * case, when window is shrunk to zero. In this case
  1545. * our retransmit serves as a zero window probe.
  1546. */
  1547. if (!before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)
  1548. && TCP_SKB_CB(skb)->seq != tp->snd_una)
  1549. return -EAGAIN;
  1550. if (skb->len > cur_mss) {
  1551. if (tcp_fragment(sk, skb, cur_mss, cur_mss))
  1552. return -ENOMEM; /* We'll try again later. */
  1553. }
  1554. /* Collapse two adjacent packets if worthwhile and we can. */
  1555. if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
  1556. (skb->len < (cur_mss >> 1)) &&
  1557. (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) &&
  1558. (!tcp_skb_is_last(sk, skb)) &&
  1559. (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) &&
  1560. (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(tcp_write_queue_next(sk, skb)) == 1) &&
  1561. (sysctl_tcp_retrans_collapse != 0))
  1562. tcp_retrans_try_collapse(sk, skb, cur_mss);
  1563. if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
  1564. return -EHOSTUNREACH; /* Routing failure or similar. */
  1565. /* Some Solaris stacks overoptimize and ignore the FIN on a
  1566. * retransmit when old data is attached. So strip it off
  1567. * since it is cheap to do so and saves bytes on the network.
  1568. */
  1569. if (skb->len > 0 &&
  1570. (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
  1571. tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
  1572. if (!pskb_trim(skb, 0)) {
  1573. TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
  1574. skb_shinfo(skb)->gso_segs = 1;
  1575. skb_shinfo(skb)->gso_size = 0;
  1576. skb_shinfo(skb)->gso_type = 0;
  1577. skb->ip_summed = CHECKSUM_NONE;
  1578. skb->csum = 0;
  1579. }
  1580. }
  1581. /* Make a copy, if the first transmission SKB clone we made
  1582. * is still in somebody's hands, else make a clone.
  1583. */
  1584. TCP_SKB_CB(skb)->when = tcp_time_stamp;
  1585. err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
  1586. if (err == 0) {
  1587. /* Update global TCP statistics. */
  1588. TCP_INC_STATS(TCP_MIB_RETRANSSEGS);
  1589. tp->total_retrans++;
  1590. #if FASTRETRANS_DEBUG > 0
  1591. if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) {
  1592. if (net_ratelimit())
  1593. printk(KERN_DEBUG "retrans_out leaked.\n");
  1594. }
  1595. #endif
  1596. if (!tp->retrans_out)
  1597. tp->lost_retrans_low = tp->snd_nxt;
  1598. TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
  1599. tp->retrans_out += tcp_skb_pcount(skb);
  1600. /* Save stamp of the first retransmit. */
  1601. if (!tp->retrans_stamp)
  1602. tp->retrans_stamp = TCP_SKB_CB(skb)->when;
  1603. tp->undo_retrans++;
  1604. /* snd_nxt is stored to detect loss of retransmitted segment,
  1605. * see tcp_input.c tcp_sacktag_write_queue().
  1606. */
  1607. TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
  1608. }
  1609. return err;
  1610. }
  1611. /* This gets called after a retransmit timeout, and the initially
  1612. * retransmitted data is acknowledged. It tries to continue
  1613. * resending the rest of the retransmit queue, until either
  1614. * we've sent it all or the congestion window limit is reached.
  1615. * If doing SACK, the first ACK which comes back for a timeout
  1616. * based retransmit packet might feed us FACK information again.
  1617. * If so, we use it to avoid unnecessarily retransmissions.
  1618. */
  1619. void tcp_xmit_retransmit_queue(struct sock *sk)
  1620. {
  1621. const struct inet_connection_sock *icsk = inet_csk(sk);
  1622. struct tcp_sock *tp = tcp_sk(sk);
  1623. struct sk_buff *skb;
  1624. int packet_cnt;
  1625. if (tp->retransmit_skb_hint) {
  1626. skb = tp->retransmit_skb_hint;
  1627. packet_cnt = tp->retransmit_cnt_hint;
  1628. }else{
  1629. skb = tcp_write_queue_head(sk);
  1630. packet_cnt = 0;
  1631. }
  1632. /* First pass: retransmit lost packets. */
  1633. if (tp->lost_out) {
  1634. tcp_for_write_queue_from(skb, sk) {
  1635. __u8 sacked = TCP_SKB_CB(skb)->sacked;
  1636. if (skb == tcp_send_head(sk))
  1637. break;
  1638. /* we could do better than to assign each time */
  1639. tp->retransmit_skb_hint = skb;
  1640. tp->retransmit_cnt_hint = packet_cnt;
  1641. /* Assume this retransmit will generate
  1642. * only one packet for congestion window
  1643. * calculation purposes. This works because
  1644. * tcp_retransmit_skb() will chop up the
  1645. * packet to be MSS sized and all the
  1646. * packet counting works out.
  1647. */
  1648. if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
  1649. return;
  1650. if (sacked & TCPCB_LOST) {
  1651. if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
  1652. if (tcp_retransmit_skb(sk, skb)) {
  1653. tp->retransmit_skb_hint = NULL;
  1654. return;
  1655. }
  1656. if (icsk->icsk_ca_state != TCP_CA_Loss)
  1657. NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS);
  1658. else
  1659. NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS);
  1660. if (skb == tcp_write_queue_head(sk))
  1661. inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
  1662. inet_csk(sk)->icsk_rto,
  1663. TCP_RTO_MAX);
  1664. }
  1665. packet_cnt += tcp_skb_pcount(skb);
  1666. if (packet_cnt >= tp->lost_out)
  1667. break;
  1668. }
  1669. }
  1670. }
  1671. /* OK, demanded retransmission is finished. */
  1672. /* Forward retransmissions are possible only during Recovery. */
  1673. if (icsk->icsk_ca_state != TCP_CA_Recovery)
  1674. return;
  1675. /* No forward retransmissions in Reno are possible. */
  1676. if (tcp_is_reno(tp))
  1677. return;
  1678. /* Yeah, we have to make difficult choice between forward transmission
  1679. * and retransmission... Both ways have their merits...
  1680. *
  1681. * For now we do not retransmit anything, while we have some new
  1682. * segments to send. In the other cases, follow rule 3 for
  1683. * NextSeg() specified in RFC3517.
  1684. */
  1685. if (tcp_may_send_now(sk))
  1686. return;
  1687. /* If nothing is SACKed, highest_sack in the loop won't be valid */
  1688. if (!tp->sacked_out)
  1689. return;
  1690. if (tp->forward_skb_hint)
  1691. skb = tp->forward_skb_hint;
  1692. else
  1693. skb = tcp_write_queue_head(sk);
  1694. tcp_for_write_queue_from(skb, sk) {
  1695. if (skb == tcp_send_head(sk))
  1696. break;
  1697. tp->forward_skb_hint = skb;
  1698. if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
  1699. break;
  1700. if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
  1701. break;
  1702. if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS)
  1703. continue;
  1704. /* Ok, retransmit it. */
  1705. if (tcp_retransmit_skb(sk, skb)) {
  1706. tp->forward_skb_hint = NULL;
  1707. break;
  1708. }
  1709. if (skb == tcp_write_queue_head(sk))
  1710. inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
  1711. inet_csk(sk)->icsk_rto,
  1712. TCP_RTO_MAX);
  1713. NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);
  1714. }
  1715. }
  1716. /* Send a fin. The caller locks the socket for us. This cannot be
  1717. * allowed to fail queueing a FIN frame under any circumstances.
  1718. */
  1719. void tcp_send_fin(struct sock *sk)
  1720. {
  1721. struct tcp_sock *tp = tcp_sk(sk);
  1722. struct sk_buff *skb = tcp_write_queue_tail(sk);
  1723. int mss_now;
  1724. /* Optimization, tack on the FIN if we have a queue of
  1725. * unsent frames. But be careful about outgoing SACKS
  1726. * and IP options.
  1727. */
  1728. mss_now = tcp_current_mss(sk, 1);
  1729. if (tcp_send_head(sk) != NULL) {
  1730. TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
  1731. TCP_SKB_CB(skb)->end_seq++;
  1732. tp->write_seq++;
  1733. } else {
  1734. /* Socket is locked, keep trying until memory is available. */
  1735. for (;;) {
  1736. skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL);
  1737. if (skb)
  1738. break;
  1739. yield();
  1740. }
  1741. /* Reserve space for headers and prepare control bits. */
  1742. skb_reserve(skb, MAX_TCP_HEADER);
  1743. skb->csum = 0;
  1744. TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
  1745. TCP_SKB_CB(skb)->sacked = 0;
  1746. skb_shinfo(skb)->gso_segs = 1;
  1747. skb_shinfo(skb)->gso_size = 0;
  1748. skb_shinfo(skb)->gso_type = 0;
  1749. /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
  1750. TCP_SKB_CB(skb)->seq = tp->write_seq;
  1751. TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
  1752. tcp_queue_skb(sk, skb);
  1753. }
  1754. __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
  1755. }
  1756. /* We get here when a process closes a file descriptor (either due to
  1757. * an explicit close() or as a byproduct of exit()'ing) and there
  1758. * was unread data in the receive queue. This behavior is recommended
  1759. * by RFC 2525, section 2.17. -DaveM
  1760. */
  1761. void tcp_send_active_reset(struct sock *sk, gfp_t priority)
  1762. {
  1763. struct sk_buff *skb;
  1764. /* NOTE: No TCP options attached and we never retransmit this. */
  1765. skb = alloc_skb(MAX_TCP_HEADER, priority);
  1766. if (!skb) {
  1767. NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
  1768. return;
  1769. }
  1770. /* Reserve space for headers and prepare control bits. */
  1771. skb_reserve(skb, MAX_TCP_HEADER);
  1772. skb->csum = 0;
  1773. TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
  1774. TCP_SKB_CB(skb)->sacked = 0;
  1775. skb_shinfo(skb)->gso_segs = 1;
  1776. skb_shinfo(skb)->gso_size = 0;
  1777. skb_shinfo(skb)->gso_type = 0;
  1778. /* Send it off. */
  1779. TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk);
  1780. TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
  1781. TCP_SKB_CB(skb)->when = tcp_time_stamp;
  1782. if (tcp_transmit_skb(sk, skb, 0, priority))
  1783. NET_INC_STATS(LINUX_MIB_TCPABORTFAILED);
  1784. }
  1785. /* WARNING: This routine must only be called when we have already sent
  1786. * a SYN packet that crossed the incoming SYN that caused this routine
  1787. * to get called. If this assumption fails then the initial rcv_wnd
  1788. * and rcv_wscale values will not be correct.
  1789. */
  1790. int tcp_send_synack(struct sock *sk)
  1791. {
  1792. struct sk_buff* skb;
  1793. skb = tcp_write_queue_head(sk);
  1794. if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) {
  1795. printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
  1796. return -EFAULT;
  1797. }
  1798. if (!(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_ACK)) {
  1799. if (skb_cloned(skb)) {
  1800. struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
  1801. if (nskb == NULL)
  1802. return -ENOMEM;
  1803. tcp_unlink_write_queue(skb, sk);
  1804. skb_header_release(nskb);
  1805. __tcp_add_write_queue_head(sk, nskb);
  1806. sk_stream_free_skb(sk, skb);
  1807. sk_charge_skb(sk, nskb);
  1808. skb = nskb;
  1809. }
  1810. TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK;
  1811. TCP_ECN_send_synack(tcp_sk(sk), skb);
  1812. }
  1813. TCP_SKB_CB(skb)->when = tcp_time_stamp;
  1814. return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
  1815. }
  1816. /*
  1817. * Prepare a SYN-ACK.
  1818. */
  1819. struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
  1820. struct request_sock *req)
  1821. {
  1822. struct inet_request_sock *ireq = inet_rsk(req);
  1823. struct tcp_sock *tp = tcp_sk(sk);
  1824. struct tcphdr *th;
  1825. int tcp_header_size;
  1826. struct sk_buff *skb;
  1827. #ifdef CONFIG_TCP_MD5SIG
  1828. struct tcp_md5sig_key *md5;
  1829. __u8 *md5_hash_location;
  1830. #endif
  1831. skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
  1832. if (skb == NULL)
  1833. return NULL;
  1834. /* Reserve space for headers. */
  1835. skb_reserve(skb, MAX_TCP_HEADER);
  1836. skb->dst = dst_clone(dst);
  1837. tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS +
  1838. (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) +
  1839. (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) +
  1840. /* SACK_PERM is in the place of NOP NOP of TS */
  1841. ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0));
  1842. #ifdef CONFIG_TCP_MD5SIG
  1843. /* Are we doing MD5 on this segment? If so - make room for it */
  1844. md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
  1845. if (md5)
  1846. tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
  1847. #endif
  1848. skb_push(skb, tcp_header_size);
  1849. skb_reset_transport_header(skb);
  1850. th = tcp_hdr(skb);
  1851. memset(th, 0, sizeof(struct tcphdr));
  1852. th->syn = 1;
  1853. th->ack = 1;
  1854. TCP_ECN_make_synack(req, th);
  1855. th->source = inet_sk(sk)->sport;
  1856. th->dest = ireq->rmt_port;
  1857. TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn;
  1858. TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
  1859. TCP_SKB_CB(skb)->sacked = 0;
  1860. skb_shinfo(skb)->gso_segs = 1;
  1861. skb_shinfo(skb)->gso_size = 0;
  1862. skb_shinfo(skb)->gso_type = 0;
  1863. th->seq = htonl(TCP_SKB_CB(skb)->seq);
  1864. th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
  1865. if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
  1866. __u8 rcv_wscale;
  1867. /* Set this up on the first call only */
  1868. req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
  1869. /* tcp_full_space because it is guaranteed to be the first packet */
  1870. tcp_select_initial_window(tcp_full_space(sk),
  1871. dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
  1872. &req->rcv_wnd,
  1873. &req->window_clamp,
  1874. ireq->wscale_ok,
  1875. &rcv_wscale);
  1876. ireq->rcv_wscale = rcv_wscale;
  1877. }
  1878. /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
  1879. th->window = htons(min(req->rcv_wnd, 65535U));
  1880. TCP_SKB_CB(skb)->when = tcp_time_stamp;
  1881. tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok,
  1882. ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale,
  1883. TCP_SKB_CB(skb)->when,
  1884. req->ts_recent,
  1885. (
  1886. #ifdef CONFIG_TCP_MD5SIG
  1887. md5 ? &md5_hash_location :
  1888. #endif
  1889. NULL)
  1890. );
  1891. skb->csum = 0;
  1892. th->doff = (tcp_header_size >> 2);
  1893. TCP_INC_STATS(TCP_MIB_OUTSEGS);
  1894. #ifdef CONFIG_TCP_MD5SIG
  1895. /* Okay, we have all we need - do the md5 hash if needed */
  1896. if (md5) {
  1897. tp->af_specific->calc_md5_hash(md5_hash_location,
  1898. md5,
  1899. NULL, dst, req,
  1900. tcp_hdr(skb), sk->sk_protocol,
  1901. skb->len);
  1902. }
  1903. #endif
  1904. return skb;
  1905. }
  1906. /*
  1907. * Do all connect socket setups that can be done AF independent.
  1908. */
  1909. static void tcp_connect_init(struct sock *sk)
  1910. {
  1911. struct dst_entry *dst = __sk_dst_get(sk);
  1912. struct tcp_sock *tp = tcp_sk(sk);
  1913. __u8 rcv_wscale;
  1914. /* We'll fix this up when we get a response from the other end.
  1915. * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
  1916. */
  1917. tp->tcp_header_len = sizeof(struct tcphdr) +
  1918. (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
  1919. #ifdef CONFIG_TCP_MD5SIG
  1920. if (tp->af_specific->md5_lookup(sk, sk) != NULL)
  1921. tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
  1922. #endif
  1923. /* If user gave his TCP_MAXSEG, record it to clamp */
  1924. if (tp->rx_opt.user_mss)
  1925. tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
  1926. tp->max_window = 0;
  1927. tcp_mtup_init(sk);
  1928. tcp_sync_mss(sk, dst_mtu(dst));
  1929. if (!tp->window_clamp)
  1930. tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
  1931. tp->advmss = dst_metric(dst, RTAX_ADVMSS);
  1932. tcp_initialize_rcv_mss(sk);
  1933. tcp_select_initial_window(tcp_full_space(sk),
  1934. tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
  1935. &tp->rcv_wnd,
  1936. &tp->window_clamp,
  1937. sysctl_tcp_window_scaling,
  1938. &rcv_wscale);
  1939. tp->rx_opt.rcv_wscale = rcv_wscale;
  1940. tp->rcv_ssthresh = tp->rcv_wnd;
  1941. sk->sk_err = 0;
  1942. sock_reset_flag(sk, SOCK_DONE);
  1943. tp->snd_wnd = 0;
  1944. tcp_init_wl(tp, tp->write_seq, 0);
  1945. tp->snd_una = tp->write_seq;
  1946. tp->snd_sml = tp->write_seq;
  1947. tp->rcv_nxt = 0;
  1948. tp->rcv_wup = 0;
  1949. tp->copied_seq = 0;
  1950. inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
  1951. inet_csk(sk)->icsk_retransmits = 0;
  1952. tcp_clear_retrans(tp);
  1953. }
  1954. /*
  1955. * Build a SYN and send it off.
  1956. */
  1957. int tcp_connect(struct sock *sk)
  1958. {
  1959. struct tcp_sock *tp = tcp_sk(sk);
  1960. struct sk_buff *buff;
  1961. tcp_connect_init(sk);
  1962. buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
  1963. if (unlikely(buff == NULL))
  1964. return -ENOBUFS;
  1965. /* Reserve space for headers. */
  1966. skb_reserve(buff, MAX_TCP_HEADER);
  1967. TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
  1968. TCP_ECN_send_syn(sk, buff);
  1969. TCP_SKB_CB(buff)->sacked = 0;
  1970. skb_shinfo(buff)->gso_segs = 1;
  1971. skb_shinfo(buff)->gso_size = 0;
  1972. skb_shinfo(buff)->gso_type = 0;
  1973. buff->csum = 0;
  1974. tp->snd_nxt = tp->write_seq;
  1975. TCP_SKB_CB(buff)->seq = tp->write_seq++;
  1976. TCP_SKB_CB(buff)->end_seq = tp->write_seq;
  1977. /* Send it off. */
  1978. TCP_SKB_CB(buff)->when = tcp_time_stamp;
  1979. tp->retrans_stamp = TCP_SKB_CB(buff)->when;
  1980. skb_header_release(buff);
  1981. __tcp_add_write_queue_tail(sk, buff);
  1982. sk_charge_skb(sk, buff);
  1983. tp->packets_out += tcp_skb_pcount(buff);
  1984. tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
  1985. /* We change tp->snd_nxt after the tcp_transmit_skb() call
  1986. * in order to make this packet get counted in tcpOutSegs.
  1987. */
  1988. tp->snd_nxt = tp->write_seq;
  1989. tp->pushed_seq = tp->write_seq;
  1990. TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
  1991. /* Timer for repeating the SYN until an answer. */
  1992. inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
  1993. inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
  1994. return 0;
  1995. }
  1996. /* Send out a delayed ack, the caller does the policy checking
  1997. * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check()
  1998. * for details.
  1999. */
  2000. void tcp_send_delayed_ack(struct sock *sk)
  2001. {
  2002. struct inet_connection_sock *icsk = inet_csk(sk);
  2003. int ato = icsk->icsk_ack.ato;
  2004. unsigned long timeout;
  2005. if (ato > TCP_DELACK_MIN) {
  2006. const struct tcp_sock *tp = tcp_sk(sk);
  2007. int max_ato = HZ/2;
  2008. if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
  2009. max_ato = TCP_DELACK_MAX;
  2010. /* Slow path, intersegment interval is "high". */
  2011. /* If some rtt estimate is known, use it to bound delayed ack.
  2012. * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
  2013. * directly.
  2014. */
  2015. if (tp->srtt) {
  2016. int rtt = max(tp->srtt>>3, TCP_DELACK_MIN);
  2017. if (rtt < max_ato)
  2018. max_ato = rtt;
  2019. }
  2020. ato = min(ato, max_ato);
  2021. }
  2022. /* Stay within the limit we were given */
  2023. timeout = jiffies + ato;
  2024. /* Use new timeout only if there wasn't a older one earlier. */
  2025. if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
  2026. /* If delack timer was blocked or is about to expire,
  2027. * send ACK now.
  2028. */
  2029. if (icsk->icsk_ack.blocked ||
  2030. time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
  2031. tcp_send_ack(sk);
  2032. return;
  2033. }
  2034. if (!time_before(timeout, icsk->icsk_ack.timeout))
  2035. timeout = icsk->icsk_ack.timeout;
  2036. }
  2037. icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
  2038. icsk->icsk_ack.timeout = timeout;
  2039. sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
  2040. }
  2041. /* This routine sends an ack and also updates the window. */
  2042. void tcp_send_ack(struct sock *sk)
  2043. {
  2044. /* If we have been reset, we may not send again. */
  2045. if (sk->sk_state != TCP_CLOSE) {
  2046. struct sk_buff *buff;
  2047. /* We are not putting this on the write queue, so
  2048. * tcp_transmit_skb() will set the ownership to this
  2049. * sock.
  2050. */
  2051. buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
  2052. if (buff == NULL) {
  2053. inet_csk_schedule_ack(sk);
  2054. inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
  2055. inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
  2056. TCP_DELACK_MAX, TCP_RTO_MAX);
  2057. return;
  2058. }
  2059. /* Reserve space for headers and prepare control bits. */
  2060. skb_reserve(buff, MAX_TCP_HEADER);
  2061. buff->csum = 0;
  2062. TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;
  2063. TCP_SKB_CB(buff)->sacked = 0;
  2064. skb_shinfo(buff)->gso_segs = 1;
  2065. skb_shinfo(buff)->gso_size = 0;
  2066. skb_shinfo(buff)->gso_type = 0;
  2067. /* Send it off, this clears delayed acks for us. */
  2068. TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk);
  2069. TCP_SKB_CB(buff)->when = tcp_time_stamp;
  2070. tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
  2071. }
  2072. }
  2073. /* This routine sends a packet with an out of date sequence
  2074. * number. It assumes the other end will try to ack it.
  2075. *
  2076. * Question: what should we make while urgent mode?
  2077. * 4.4BSD forces sending single byte of data. We cannot send
  2078. * out of window data, because we have SND.NXT==SND.MAX...
  2079. *
  2080. * Current solution: to send TWO zero-length segments in urgent mode:
  2081. * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
  2082. * out-of-date with SND.UNA-1 to probe window.
  2083. */
  2084. static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
  2085. {
  2086. struct tcp_sock *tp = tcp_sk(sk);
  2087. struct sk_buff *skb;
  2088. /* We don't queue it, tcp_transmit_skb() sets ownership. */
  2089. skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
  2090. if (skb == NULL)
  2091. return -1;
  2092. /* Reserve space for headers and set control bits. */
  2093. skb_reserve(skb, MAX_TCP_HEADER);
  2094. skb->csum = 0;
  2095. TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
  2096. TCP_SKB_CB(skb)->sacked = urgent;
  2097. skb_shinfo(skb)->gso_segs = 1;
  2098. skb_shinfo(skb)->gso_size = 0;
  2099. skb_shinfo(skb)->gso_type = 0;
  2100. /* Use a previous sequence. This should cause the other
  2101. * end to send an ack. Don't queue or clone SKB, just
  2102. * send it.
  2103. */
  2104. TCP_SKB_CB(skb)->seq = urgent ? tp->snd_una : tp->snd_una - 1;
  2105. TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
  2106. TCP_SKB_CB(skb)->when = tcp_time_stamp;
  2107. return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
  2108. }
  2109. int tcp_write_wakeup(struct sock *sk)
  2110. {
  2111. if (sk->sk_state != TCP_CLOSE) {
  2112. struct tcp_sock *tp = tcp_sk(sk);
  2113. struct sk_buff *skb;
  2114. if ((skb = tcp_send_head(sk)) != NULL &&
  2115. before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) {
  2116. int err;
  2117. unsigned int mss = tcp_current_mss(sk, 0);
  2118. unsigned int seg_size = tp->snd_una+tp->snd_wnd-TCP_SKB_CB(skb)->seq;
  2119. if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
  2120. tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
  2121. /* We are probing the opening of a window
  2122. * but the window size is != 0
  2123. * must have been a result SWS avoidance ( sender )
  2124. */
  2125. if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
  2126. skb->len > mss) {
  2127. seg_size = min(seg_size, mss);
  2128. TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
  2129. if (tcp_fragment(sk, skb, seg_size, mss))
  2130. return -1;
  2131. } else if (!tcp_skb_pcount(skb))
  2132. tcp_set_skb_tso_segs(sk, skb, mss);
  2133. TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
  2134. TCP_SKB_CB(skb)->when = tcp_time_stamp;
  2135. err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
  2136. if (!err) {
  2137. update_send_head(sk, skb);
  2138. }
  2139. return err;
  2140. } else {
  2141. if (tp->urg_mode &&
  2142. between(tp->snd_up, tp->snd_una+1, tp->snd_una+0xFFFF))
  2143. tcp_xmit_probe_skb(sk, TCPCB_URG);
  2144. return tcp_xmit_probe_skb(sk, 0);
  2145. }
  2146. }
  2147. return -1;
  2148. }
  2149. /* A window probe timeout has occurred. If window is not closed send
  2150. * a partial packet else a zero probe.
  2151. */
  2152. void tcp_send_probe0(struct sock *sk)
  2153. {
  2154. struct inet_connection_sock *icsk = inet_csk(sk);
  2155. struct tcp_sock *tp = tcp_sk(sk);
  2156. int err;
  2157. err = tcp_write_wakeup(sk);
  2158. if (tp->packets_out || !tcp_send_head(sk)) {
  2159. /* Cancel probe timer, if it is not required. */
  2160. icsk->icsk_probes_out = 0;
  2161. icsk->icsk_backoff = 0;
  2162. return;
  2163. }
  2164. if (err <= 0) {
  2165. if (icsk->icsk_backoff < sysctl_tcp_retries2)
  2166. icsk->icsk_backoff++;
  2167. icsk->icsk_probes_out++;
  2168. inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
  2169. min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
  2170. TCP_RTO_MAX);
  2171. } else {
  2172. /* If packet was not sent due to local congestion,
  2173. * do not backoff and do not remember icsk_probes_out.
  2174. * Let local senders to fight for local resources.
  2175. *
  2176. * Use accumulated backoff yet.
  2177. */
  2178. if (!icsk->icsk_probes_out)
  2179. icsk->icsk_probes_out = 1;
  2180. inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
  2181. min(icsk->icsk_rto << icsk->icsk_backoff,
  2182. TCP_RESOURCE_PROBE_INTERVAL),
  2183. TCP_RTO_MAX);
  2184. }
  2185. }
  2186. EXPORT_SYMBOL(tcp_connect);
  2187. EXPORT_SYMBOL(tcp_make_synack);
  2188. EXPORT_SYMBOL(tcp_simple_retransmit);
  2189. EXPORT_SYMBOL(tcp_sync_mss);
  2190. EXPORT_SYMBOL(sysctl_tcp_tso_win_divisor);
  2191. EXPORT_SYMBOL(tcp_mtup_init);