|
@@ -1153,7 +1153,7 @@ static void tcp_prequeue_process(struct sock *sk)
|
|
|
struct sk_buff *skb;
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
|
|
- NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
|
|
|
+ NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
|
|
|
|
|
|
/* RX process wants to run with disabled BHs, though it is not
|
|
|
* necessary */
|
|
@@ -1793,13 +1793,13 @@ void tcp_close(struct sock *sk, long timeout)
|
|
|
*/
|
|
|
if (data_was_unread) {
|
|
|
/* Unread data was tossed, zap the connection. */
|
|
|
- NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
|
|
|
+ NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
|
|
|
tcp_set_state(sk, TCP_CLOSE);
|
|
|
tcp_send_active_reset(sk, GFP_KERNEL);
|
|
|
} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
|
|
|
/* Check zero linger _after_ checking for unread data. */
|
|
|
sk->sk_prot->disconnect(sk, 0);
|
|
|
- NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
|
|
|
+ NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
|
|
|
} else if (tcp_close_state(sk)) {
|
|
|
/* We FIN if the application ate all the data before
|
|
|
* zapping the connection.
|