tcp_timer.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * Implementation of the Transmission Control Protocol(TCP).
  7. *
  8. * Authors: Ross Biro
  9. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10. * Mark Evans, <evansmp@uhura.aston.ac.uk>
  11. * Corey Minyard <wf-rch!minyard@relay.EU.net>
  12. * Florian La Roche, <flla@stud.uni-sb.de>
  13. * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  14. * Linus Torvalds, <torvalds@cs.helsinki.fi>
  15. * Alan Cox, <gw4pts@gw4pts.ampr.org>
  16. * Matthew Dillon, <dillon@apollo.west.oic.com>
  17. * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  18. * Jorge Cwik, <jorge@laser.satlink.net>
  19. */
  20. #include <linux/module.h>
  21. #include <linux/gfp.h>
  22. #include <net/tcp.h>
  23. int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
  24. int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
  25. int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
  26. int sysctl_tcp_keepalive_probes __read_mostly = TCP_KEEPALIVE_PROBES;
  27. int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL;
  28. int sysctl_tcp_retries1 __read_mostly = TCP_RETR1;
  29. int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
  30. int sysctl_tcp_orphan_retries __read_mostly;
  31. int sysctl_tcp_thin_linear_timeouts __read_mostly;
  32. static void tcp_write_err(struct sock *sk)
  33. {
  34. sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
  35. sk->sk_error_report(sk);
  36. tcp_done(sk);
  37. NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
  38. }
  39. /* Do not allow orphaned sockets to eat all our resources.
  40. * This is direct violation of TCP specs, but it is required
  41. * to prevent DoS attacks. It is called when a retransmission timeout
  42. * or zero probe timeout occurs on orphaned socket.
  43. *
  44. * Criteria is still not confirmed experimentally and may change.
  45. * We kill the socket, if:
  46. * 1. If number of orphaned sockets exceeds an administratively configured
  47. * limit.
  48. * 2. If we have strong memory pressure.
  49. */
  50. static int tcp_out_of_resources(struct sock *sk, int do_reset)
  51. {
  52. struct tcp_sock *tp = tcp_sk(sk);
  53. int shift = 0;
  54. /* If peer does not open window for long time, or did not transmit
  55. * anything for long time, penalize it. */
  56. if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
  57. shift++;
  58. /* If some dubious ICMP arrived, penalize even more. */
  59. if (sk->sk_err_soft)
  60. shift++;
  61. if (tcp_check_oom(sk, shift)) {
  62. /* Catch exceptional cases, when connection requires reset.
  63. * 1. Last segment was sent recently. */
  64. if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
  65. /* 2. Window is closed. */
  66. (!tp->snd_wnd && !tp->packets_out))
  67. do_reset = 1;
  68. if (do_reset)
  69. tcp_send_active_reset(sk, GFP_ATOMIC);
  70. tcp_done(sk);
  71. NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
  72. return 1;
  73. }
  74. return 0;
  75. }
  76. /* Calculate maximal number or retries on an orphaned socket. */
  77. static int tcp_orphan_retries(struct sock *sk, int alive)
  78. {
  79. int retries = sysctl_tcp_orphan_retries; /* May be zero. */
  80. /* We know from an ICMP that something is wrong. */
  81. if (sk->sk_err_soft && !alive)
  82. retries = 0;
  83. /* However, if socket sent something recently, select some safe
  84. * number of retries. 8 corresponds to >100 seconds with minimal
  85. * RTO of 200msec. */
  86. if (retries == 0 && alive)
  87. retries = 8;
  88. return retries;
  89. }
  90. static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
  91. {
  92. /* Black hole detection */
  93. if (sysctl_tcp_mtu_probing) {
  94. if (!icsk->icsk_mtup.enabled) {
  95. icsk->icsk_mtup.enabled = 1;
  96. tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
  97. } else {
  98. struct tcp_sock *tp = tcp_sk(sk);
  99. int mss;
  100. mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
  101. mss = min(sysctl_tcp_base_mss, mss);
  102. mss = max(mss, 68 - tp->tcp_header_len);
  103. icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
  104. tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
  105. }
  106. }
  107. }
  108. /* This function calculates a "timeout" which is equivalent to the timeout of a
  109. * TCP connection after "boundary" unsuccessful, exponentially backed-off
  110. * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
  111. * syn_set flag is set.
  112. */
  113. static bool retransmits_timed_out(struct sock *sk,
  114. unsigned int boundary,
  115. unsigned int timeout,
  116. bool syn_set)
  117. {
  118. unsigned int linear_backoff_thresh, start_ts;
  119. unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
  120. if (!inet_csk(sk)->icsk_retransmits)
  121. return false;
  122. if (unlikely(!tcp_sk(sk)->retrans_stamp))
  123. start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when;
  124. else
  125. start_ts = tcp_sk(sk)->retrans_stamp;
  126. if (likely(timeout == 0)) {
  127. linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
  128. if (boundary <= linear_backoff_thresh)
  129. timeout = ((2 << boundary) - 1) * rto_base;
  130. else
  131. timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
  132. (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
  133. }
  134. return (tcp_time_stamp - start_ts) >= timeout;
  135. }
  136. /* A write timeout has occurred. Process the after effects. */
  137. static int tcp_write_timeout(struct sock *sk)
  138. {
  139. struct inet_connection_sock *icsk = inet_csk(sk);
  140. int retry_until;
  141. bool do_reset, syn_set = false;
  142. if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
  143. if (icsk->icsk_retransmits)
  144. dst_negative_advice(sk);
  145. retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
  146. syn_set = true;
  147. } else {
  148. if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
  149. /* Black hole detection */
  150. tcp_mtu_probing(icsk, sk);
  151. dst_negative_advice(sk);
  152. }
  153. retry_until = sysctl_tcp_retries2;
  154. if (sock_flag(sk, SOCK_DEAD)) {
  155. const int alive = (icsk->icsk_rto < TCP_RTO_MAX);
  156. retry_until = tcp_orphan_retries(sk, alive);
  157. do_reset = alive ||
  158. !retransmits_timed_out(sk, retry_until, 0, 0);
  159. if (tcp_out_of_resources(sk, do_reset))
  160. return 1;
  161. }
  162. }
  163. if (retransmits_timed_out(sk, retry_until,
  164. syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
  165. /* Has it gone just too far? */
  166. tcp_write_err(sk);
  167. return 1;
  168. }
  169. return 0;
  170. }
  171. void tcp_delack_timer_handler(struct sock *sk)
  172. {
  173. struct tcp_sock *tp = tcp_sk(sk);
  174. struct inet_connection_sock *icsk = inet_csk(sk);
  175. sk_mem_reclaim_partial(sk);
  176. if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
  177. goto out;
  178. if (time_after(icsk->icsk_ack.timeout, jiffies)) {
  179. sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
  180. goto out;
  181. }
  182. icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
  183. if (!skb_queue_empty(&tp->ucopy.prequeue)) {
  184. struct sk_buff *skb;
  185. NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
  186. while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
  187. sk_backlog_rcv(sk, skb);
  188. tp->ucopy.memory = 0;
  189. }
  190. if (inet_csk_ack_scheduled(sk)) {
  191. if (!icsk->icsk_ack.pingpong) {
  192. /* Delayed ACK missed: inflate ATO. */
  193. icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
  194. } else {
  195. /* Delayed ACK missed: leave pingpong mode and
  196. * deflate ATO.
  197. */
  198. icsk->icsk_ack.pingpong = 0;
  199. icsk->icsk_ack.ato = TCP_ATO_MIN;
  200. }
  201. tcp_send_ack(sk);
  202. NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
  203. }
  204. out:
  205. if (sk_under_memory_pressure(sk))
  206. sk_mem_reclaim(sk);
  207. }
  208. static void tcp_delack_timer(unsigned long data)
  209. {
  210. struct sock *sk = (struct sock *)data;
  211. bh_lock_sock(sk);
  212. if (!sock_owned_by_user(sk)) {
  213. tcp_delack_timer_handler(sk);
  214. } else {
  215. inet_csk(sk)->icsk_ack.blocked = 1;
  216. NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
  217. /* deleguate our work to tcp_release_cb() */
  218. set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags);
  219. }
  220. bh_unlock_sock(sk);
  221. sock_put(sk);
  222. }
  223. static void tcp_probe_timer(struct sock *sk)
  224. {
  225. struct inet_connection_sock *icsk = inet_csk(sk);
  226. struct tcp_sock *tp = tcp_sk(sk);
  227. int max_probes;
  228. if (tp->packets_out || !tcp_send_head(sk)) {
  229. icsk->icsk_probes_out = 0;
  230. return;
  231. }
  232. /* *WARNING* RFC 1122 forbids this
  233. *
  234. * It doesn't AFAIK, because we kill the retransmit timer -AK
  235. *
  236. * FIXME: We ought not to do it, Solaris 2.5 actually has fixing
  237. * this behaviour in Solaris down as a bug fix. [AC]
  238. *
  239. * Let me to explain. icsk_probes_out is zeroed by incoming ACKs
  240. * even if they advertise zero window. Hence, connection is killed only
  241. * if we received no ACKs for normal connection timeout. It is not killed
  242. * only because window stays zero for some time, window may be zero
  243. * until armageddon and even later. We are in full accordance
  244. * with RFCs, only probe timer combines both retransmission timeout
  245. * and probe timeout in one bottle. --ANK
  246. */
  247. max_probes = sysctl_tcp_retries2;
  248. if (sock_flag(sk, SOCK_DEAD)) {
  249. const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX);
  250. max_probes = tcp_orphan_retries(sk, alive);
  251. if (tcp_out_of_resources(sk, alive || icsk->icsk_probes_out <= max_probes))
  252. return;
  253. }
  254. if (icsk->icsk_probes_out > max_probes) {
  255. tcp_write_err(sk);
  256. } else {
  257. /* Only send another probe if we didn't close things up. */
  258. tcp_send_probe0(sk);
  259. }
  260. }
  261. /*
  262. * The TCP retransmit timer.
  263. */
  264. void tcp_retransmit_timer(struct sock *sk)
  265. {
  266. struct tcp_sock *tp = tcp_sk(sk);
  267. struct inet_connection_sock *icsk = inet_csk(sk);
  268. if (tp->early_retrans_delayed) {
  269. tcp_resume_early_retransmit(sk);
  270. return;
  271. }
  272. if (!tp->packets_out)
  273. goto out;
  274. WARN_ON(tcp_write_queue_empty(sk));
  275. if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
  276. !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
  277. /* Receiver dastardly shrinks window. Our retransmits
  278. * become zero probes, but we should not timeout this
  279. * connection. If the socket is an orphan, time it out,
  280. * we cannot allow such beasts to hang infinitely.
  281. */
  282. struct inet_sock *inet = inet_sk(sk);
  283. if (sk->sk_family == AF_INET) {
  284. LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"),
  285. &inet->inet_daddr,
  286. ntohs(inet->inet_dport), inet->inet_num,
  287. tp->snd_una, tp->snd_nxt);
  288. }
  289. #if IS_ENABLED(CONFIG_IPV6)
  290. else if (sk->sk_family == AF_INET6) {
  291. struct ipv6_pinfo *np = inet6_sk(sk);
  292. LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"),
  293. &np->daddr,
  294. ntohs(inet->inet_dport), inet->inet_num,
  295. tp->snd_una, tp->snd_nxt);
  296. }
  297. #endif
  298. if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
  299. tcp_write_err(sk);
  300. goto out;
  301. }
  302. tcp_enter_loss(sk, 0);
  303. tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
  304. __sk_dst_reset(sk);
  305. goto out_reset_timer;
  306. }
  307. if (tcp_write_timeout(sk))
  308. goto out;
  309. if (icsk->icsk_retransmits == 0) {
  310. int mib_idx;
  311. if (icsk->icsk_ca_state == TCP_CA_Recovery) {
  312. if (tcp_is_sack(tp))
  313. mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
  314. else
  315. mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
  316. } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
  317. mib_idx = LINUX_MIB_TCPLOSSFAILURES;
  318. } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
  319. tp->sacked_out) {
  320. if (tcp_is_sack(tp))
  321. mib_idx = LINUX_MIB_TCPSACKFAILURES;
  322. else
  323. mib_idx = LINUX_MIB_TCPRENOFAILURES;
  324. } else {
  325. mib_idx = LINUX_MIB_TCPTIMEOUTS;
  326. }
  327. NET_INC_STATS_BH(sock_net(sk), mib_idx);
  328. }
  329. if (tcp_use_frto(sk)) {
  330. tcp_enter_frto(sk);
  331. } else {
  332. tcp_enter_loss(sk, 0);
  333. }
  334. if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) {
  335. /* Retransmission failed because of local congestion,
  336. * do not backoff.
  337. */
  338. if (!icsk->icsk_retransmits)
  339. icsk->icsk_retransmits = 1;
  340. inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
  341. min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
  342. TCP_RTO_MAX);
  343. goto out;
  344. }
  345. /* Increase the timeout each time we retransmit. Note that
  346. * we do not increase the rtt estimate. rto is initialized
  347. * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
  348. * that doubling rto each time is the least we can get away with.
  349. * In KA9Q, Karn uses this for the first few times, and then
  350. * goes to quadratic. netBSD doubles, but only goes up to *64,
  351. * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
  352. * defined in the protocol as the maximum possible RTT. I guess
  353. * we'll have to use something other than TCP to talk to the
  354. * University of Mars.
  355. *
  356. * PAWS allows us longer timeouts and large windows, so once
  357. * implemented ftp to mars will work nicely. We will have to fix
  358. * the 120 second clamps though!
  359. */
  360. icsk->icsk_backoff++;
  361. icsk->icsk_retransmits++;
  362. out_reset_timer:
  363. /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
  364. * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
  365. * might be increased if the stream oscillates between thin and thick,
  366. * thus the old value might already be too high compared to the value
  367. * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
  368. * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
  369. * exponential backoff behaviour to avoid continue hammering
  370. * linear-timeout retransmissions into a black hole
  371. */
  372. if (sk->sk_state == TCP_ESTABLISHED &&
  373. (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
  374. tcp_stream_is_thin(tp) &&
  375. icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
  376. icsk->icsk_backoff = 0;
  377. icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
  378. } else {
  379. /* Use normal (exponential) backoff */
  380. icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
  381. }
  382. inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
  383. if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
  384. __sk_dst_reset(sk);
  385. out:;
  386. }
  387. void tcp_write_timer_handler(struct sock *sk)
  388. {
  389. struct inet_connection_sock *icsk = inet_csk(sk);
  390. int event;
  391. if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
  392. goto out;
  393. if (time_after(icsk->icsk_timeout, jiffies)) {
  394. sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
  395. goto out;
  396. }
  397. event = icsk->icsk_pending;
  398. icsk->icsk_pending = 0;
  399. switch (event) {
  400. case ICSK_TIME_RETRANS:
  401. tcp_retransmit_timer(sk);
  402. break;
  403. case ICSK_TIME_PROBE0:
  404. tcp_probe_timer(sk);
  405. break;
  406. }
  407. out:
  408. sk_mem_reclaim(sk);
  409. }
  410. static void tcp_write_timer(unsigned long data)
  411. {
  412. struct sock *sk = (struct sock *)data;
  413. bh_lock_sock(sk);
  414. if (!sock_owned_by_user(sk)) {
  415. tcp_write_timer_handler(sk);
  416. } else {
  417. /* deleguate our work to tcp_release_cb() */
  418. set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags);
  419. }
  420. bh_unlock_sock(sk);
  421. sock_put(sk);
  422. }
  423. /*
  424. * Timer for listening sockets
  425. */
  426. static void tcp_synack_timer(struct sock *sk)
  427. {
  428. inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL,
  429. TCP_TIMEOUT_INIT, TCP_RTO_MAX);
  430. }
  431. void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
  432. {
  433. NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
  434. }
  435. EXPORT_SYMBOL(tcp_syn_ack_timeout);
  436. void tcp_set_keepalive(struct sock *sk, int val)
  437. {
  438. if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
  439. return;
  440. if (val && !sock_flag(sk, SOCK_KEEPOPEN))
  441. inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
  442. else if (!val)
  443. inet_csk_delete_keepalive_timer(sk);
  444. }
  445. static void tcp_keepalive_timer (unsigned long data)
  446. {
  447. struct sock *sk = (struct sock *) data;
  448. struct inet_connection_sock *icsk = inet_csk(sk);
  449. struct tcp_sock *tp = tcp_sk(sk);
  450. u32 elapsed;
  451. /* Only process if socket is not in use. */
  452. bh_lock_sock(sk);
  453. if (sock_owned_by_user(sk)) {
  454. /* Try again later. */
  455. inet_csk_reset_keepalive_timer (sk, HZ/20);
  456. goto out;
  457. }
  458. if (sk->sk_state == TCP_LISTEN) {
  459. tcp_synack_timer(sk);
  460. goto out;
  461. }
  462. if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
  463. if (tp->linger2 >= 0) {
  464. const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
  465. if (tmo > 0) {
  466. tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
  467. goto out;
  468. }
  469. }
  470. tcp_send_active_reset(sk, GFP_ATOMIC);
  471. goto death;
  472. }
  473. if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
  474. goto out;
  475. elapsed = keepalive_time_when(tp);
  476. /* It is alive without keepalive 8) */
  477. if (tp->packets_out || tcp_send_head(sk))
  478. goto resched;
  479. elapsed = keepalive_time_elapsed(tp);
  480. if (elapsed >= keepalive_time_when(tp)) {
  481. /* If the TCP_USER_TIMEOUT option is enabled, use that
  482. * to determine when to timeout instead.
  483. */
  484. if ((icsk->icsk_user_timeout != 0 &&
  485. elapsed >= icsk->icsk_user_timeout &&
  486. icsk->icsk_probes_out > 0) ||
  487. (icsk->icsk_user_timeout == 0 &&
  488. icsk->icsk_probes_out >= keepalive_probes(tp))) {
  489. tcp_send_active_reset(sk, GFP_ATOMIC);
  490. tcp_write_err(sk);
  491. goto out;
  492. }
  493. if (tcp_write_wakeup(sk) <= 0) {
  494. icsk->icsk_probes_out++;
  495. elapsed = keepalive_intvl_when(tp);
  496. } else {
  497. /* If keepalive was lost due to local congestion,
  498. * try harder.
  499. */
  500. elapsed = TCP_RESOURCE_PROBE_INTERVAL;
  501. }
  502. } else {
  503. /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
  504. elapsed = keepalive_time_when(tp) - elapsed;
  505. }
  506. sk_mem_reclaim(sk);
  507. resched:
  508. inet_csk_reset_keepalive_timer (sk, elapsed);
  509. goto out;
  510. death:
  511. tcp_done(sk);
  512. out:
  513. bh_unlock_sock(sk);
  514. sock_put(sk);
  515. }
  516. void tcp_init_xmit_timers(struct sock *sk)
  517. {
  518. inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
  519. &tcp_keepalive_timer);
  520. }
  521. EXPORT_SYMBOL(tcp_init_xmit_timers);