|
@@ -32,17 +32,6 @@ int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
|
|
|
int sysctl_tcp_orphan_retries __read_mostly;
|
|
|
int sysctl_tcp_thin_linear_timeouts __read_mostly;
|
|
|
|
|
|
-static void tcp_write_timer(unsigned long);
|
|
|
-static void tcp_delack_timer(unsigned long);
|
|
|
-static void tcp_keepalive_timer (unsigned long data);
|
|
|
-
|
|
|
-void tcp_init_xmit_timers(struct sock *sk)
|
|
|
-{
|
|
|
- inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
|
|
|
- &tcp_keepalive_timer);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(tcp_init_xmit_timers);
|
|
|
-
|
|
|
static void tcp_write_err(struct sock *sk)
|
|
|
{
|
|
|
sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
|
|
@@ -205,21 +194,11 @@ static int tcp_write_timeout(struct sock *sk)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void tcp_delack_timer(unsigned long data)
|
|
|
+void tcp_delack_timer_handler(struct sock *sk)
|
|
|
{
|
|
|
- struct sock *sk = (struct sock *)data;
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
|
|
|
- bh_lock_sock(sk);
|
|
|
- if (sock_owned_by_user(sk)) {
|
|
|
- /* Try again later. */
|
|
|
- icsk->icsk_ack.blocked = 1;
|
|
|
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
|
|
|
- sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN);
|
|
|
- goto out_unlock;
|
|
|
- }
|
|
|
-
|
|
|
sk_mem_reclaim_partial(sk);
|
|
|
|
|
|
if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
|
|
@@ -260,7 +239,21 @@ static void tcp_delack_timer(unsigned long data)
|
|
|
out:
|
|
|
if (sk_under_memory_pressure(sk))
|
|
|
sk_mem_reclaim(sk);
|
|
|
-out_unlock:
|
|
|
+}
|
|
|
+
|
|
|
+static void tcp_delack_timer(unsigned long data)
|
|
|
+{
|
|
|
+ struct sock *sk = (struct sock *)data;
|
|
|
+
|
|
|
+ bh_lock_sock(sk);
|
|
|
+ if (!sock_owned_by_user(sk)) {
|
|
|
+ tcp_delack_timer_handler(sk);
|
|
|
+ } else {
|
|
|
+ inet_csk(sk)->icsk_ack.blocked = 1;
|
|
|
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
|
|
|
+ /* deleguate our work to tcp_release_cb() */
|
|
|
+ set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags);
|
|
|
+ }
|
|
|
bh_unlock_sock(sk);
|
|
|
sock_put(sk);
|
|
|
}
|
|
@@ -450,19 +443,11 @@ out_reset_timer:
|
|
|
out:;
|
|
|
}
|
|
|
|
|
|
-static void tcp_write_timer(unsigned long data)
|
|
|
+void tcp_write_timer_handler(struct sock *sk)
|
|
|
{
|
|
|
- struct sock *sk = (struct sock *)data;
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
int event;
|
|
|
|
|
|
- bh_lock_sock(sk);
|
|
|
- if (sock_owned_by_user(sk)) {
|
|
|
- /* Try again later */
|
|
|
- sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20));
|
|
|
- goto out_unlock;
|
|
|
- }
|
|
|
-
|
|
|
if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
|
|
|
goto out;
|
|
|
|
|
@@ -485,7 +470,19 @@ static void tcp_write_timer(unsigned long data)
|
|
|
|
|
|
out:
|
|
|
sk_mem_reclaim(sk);
|
|
|
-out_unlock:
|
|
|
+}
|
|
|
+
|
|
|
+static void tcp_write_timer(unsigned long data)
|
|
|
+{
|
|
|
+ struct sock *sk = (struct sock *)data;
|
|
|
+
|
|
|
+ bh_lock_sock(sk);
|
|
|
+ if (!sock_owned_by_user(sk)) {
|
|
|
+ tcp_write_timer_handler(sk);
|
|
|
+ } else {
|
|
|
+ /* deleguate our work to tcp_release_cb() */
|
|
|
+ set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags);
|
|
|
+ }
|
|
|
bh_unlock_sock(sk);
|
|
|
sock_put(sk);
|
|
|
}
|
|
@@ -602,3 +599,10 @@ out:
|
|
|
bh_unlock_sock(sk);
|
|
|
sock_put(sk);
|
|
|
}
|
|
|
+
|
|
|
+void tcp_init_xmit_timers(struct sock *sk)
|
|
|
+{
|
|
|
+ inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
|
|
|
+ &tcp_keepalive_timer);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(tcp_init_xmit_timers);
|