|
@@ -294,9 +294,9 @@ static u16 tcp_select_window(struct sock *sk)
|
|
/* Packet ECN state for a SYN-ACK */
|
|
/* Packet ECN state for a SYN-ACK */
|
|
static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb)
|
|
static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb)
|
|
{
|
|
{
|
|
- TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR;
|
|
|
|
|
|
+ TCP_SKB_CB(skb)->flags &= ~TCPHDR_CWR;
|
|
if (!(tp->ecn_flags & TCP_ECN_OK))
|
|
if (!(tp->ecn_flags & TCP_ECN_OK))
|
|
- TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE;
|
|
|
|
|
|
+ TCP_SKB_CB(skb)->flags &= ~TCPHDR_ECE;
|
|
}
|
|
}
|
|
|
|
|
|
/* Packet ECN state for a SYN. */
|
|
/* Packet ECN state for a SYN. */
|
|
@@ -306,7 +306,7 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
|
|
|
|
|
|
tp->ecn_flags = 0;
|
|
tp->ecn_flags = 0;
|
|
if (sysctl_tcp_ecn == 1) {
|
|
if (sysctl_tcp_ecn == 1) {
|
|
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE | TCPCB_FLAG_CWR;
|
|
|
|
|
|
+ TCP_SKB_CB(skb)->flags |= TCPHDR_ECE | TCPHDR_CWR;
|
|
tp->ecn_flags = TCP_ECN_OK;
|
|
tp->ecn_flags = TCP_ECN_OK;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -361,7 +361,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
|
|
skb_shinfo(skb)->gso_type = 0;
|
|
skb_shinfo(skb)->gso_type = 0;
|
|
|
|
|
|
TCP_SKB_CB(skb)->seq = seq;
|
|
TCP_SKB_CB(skb)->seq = seq;
|
|
- if (flags & (TCPCB_FLAG_SYN | TCPCB_FLAG_FIN))
|
|
|
|
|
|
+ if (flags & (TCPHDR_SYN | TCPHDR_FIN))
|
|
seq++;
|
|
seq++;
|
|
TCP_SKB_CB(skb)->end_seq = seq;
|
|
TCP_SKB_CB(skb)->end_seq = seq;
|
|
}
|
|
}
|
|
@@ -820,7 +820,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
|
|
tcb = TCP_SKB_CB(skb);
|
|
tcb = TCP_SKB_CB(skb);
|
|
memset(&opts, 0, sizeof(opts));
|
|
memset(&opts, 0, sizeof(opts));
|
|
|
|
|
|
- if (unlikely(tcb->flags & TCPCB_FLAG_SYN))
|
|
|
|
|
|
+ if (unlikely(tcb->flags & TCPHDR_SYN))
|
|
tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
|
|
tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
|
|
else
|
|
else
|
|
tcp_options_size = tcp_established_options(sk, skb, &opts,
|
|
tcp_options_size = tcp_established_options(sk, skb, &opts,
|
|
@@ -843,7 +843,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
|
|
*(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
|
|
*(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
|
|
tcb->flags);
|
|
tcb->flags);
|
|
|
|
|
|
- if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
|
|
|
|
|
|
+ if (unlikely(tcb->flags & TCPHDR_SYN)) {
|
|
/* RFC1323: The window in SYN & SYN/ACK segments
|
|
/* RFC1323: The window in SYN & SYN/ACK segments
|
|
* is never scaled.
|
|
* is never scaled.
|
|
*/
|
|
*/
|
|
@@ -866,7 +866,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
|
|
}
|
|
}
|
|
|
|
|
|
tcp_options_write((__be32 *)(th + 1), tp, &opts);
|
|
tcp_options_write((__be32 *)(th + 1), tp, &opts);
|
|
- if (likely((tcb->flags & TCPCB_FLAG_SYN) == 0))
|
|
|
|
|
|
+ if (likely((tcb->flags & TCPHDR_SYN) == 0))
|
|
TCP_ECN_send(sk, skb, tcp_header_size);
|
|
TCP_ECN_send(sk, skb, tcp_header_size);
|
|
|
|
|
|
#ifdef CONFIG_TCP_MD5SIG
|
|
#ifdef CONFIG_TCP_MD5SIG
|
|
@@ -880,7 +880,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
|
|
|
|
|
|
icsk->icsk_af_ops->send_check(sk, skb);
|
|
icsk->icsk_af_ops->send_check(sk, skb);
|
|
|
|
|
|
- if (likely(tcb->flags & TCPCB_FLAG_ACK))
|
|
|
|
|
|
+ if (likely(tcb->flags & TCPHDR_ACK))
|
|
tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
|
|
tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
|
|
|
|
|
|
if (skb->len != tcp_header_size)
|
|
if (skb->len != tcp_header_size)
|
|
@@ -1023,7 +1023,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
|
|
|
|
|
|
/* PSH and FIN should only be set in the second packet. */
|
|
/* PSH and FIN should only be set in the second packet. */
|
|
flags = TCP_SKB_CB(skb)->flags;
|
|
flags = TCP_SKB_CB(skb)->flags;
|
|
- TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH);
|
|
|
|
|
|
+ TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
|
|
TCP_SKB_CB(buff)->flags = flags;
|
|
TCP_SKB_CB(buff)->flags = flags;
|
|
TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
|
|
TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
|
|
|
|
|
|
@@ -1328,8 +1328,7 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp,
|
|
u32 in_flight, cwnd;
|
|
u32 in_flight, cwnd;
|
|
|
|
|
|
/* Don't be strict about the congestion window for the final FIN. */
|
|
/* Don't be strict about the congestion window for the final FIN. */
|
|
- if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
|
|
|
|
- tcp_skb_pcount(skb) == 1)
|
|
|
|
|
|
+ if ((TCP_SKB_CB(skb)->flags & TCPHDR_FIN) && tcp_skb_pcount(skb) == 1)
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
in_flight = tcp_packets_in_flight(tp);
|
|
in_flight = tcp_packets_in_flight(tp);
|
|
@@ -1398,7 +1397,7 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
|
|
* Nagle can be ignored during F-RTO too (see RFC4138).
|
|
* Nagle can be ignored during F-RTO too (see RFC4138).
|
|
*/
|
|
*/
|
|
if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
|
|
if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
|
|
- (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN))
|
|
|
|
|
|
+ (TCP_SKB_CB(skb)->flags & TCPHDR_FIN))
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
|
|
if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
|
|
@@ -1487,7 +1486,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
|
|
|
|
|
|
/* PSH and FIN should only be set in the second packet. */
|
|
/* PSH and FIN should only be set in the second packet. */
|
|
flags = TCP_SKB_CB(skb)->flags;
|
|
flags = TCP_SKB_CB(skb)->flags;
|
|
- TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH);
|
|
|
|
|
|
+ TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
|
|
TCP_SKB_CB(buff)->flags = flags;
|
|
TCP_SKB_CB(buff)->flags = flags;
|
|
|
|
|
|
/* This packet was never sent out yet, so no SACK bits. */
|
|
/* This packet was never sent out yet, so no SACK bits. */
|
|
@@ -1518,7 +1517,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
|
|
const struct inet_connection_sock *icsk = inet_csk(sk);
|
|
const struct inet_connection_sock *icsk = inet_csk(sk);
|
|
u32 send_win, cong_win, limit, in_flight;
|
|
u32 send_win, cong_win, limit, in_flight;
|
|
|
|
|
|
- if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
|
|
|
|
|
|
+ if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)
|
|
goto send_now;
|
|
goto send_now;
|
|
|
|
|
|
if (icsk->icsk_ca_state != TCP_CA_Open)
|
|
if (icsk->icsk_ca_state != TCP_CA_Open)
|
|
@@ -1644,7 +1643,7 @@ static int tcp_mtu_probe(struct sock *sk)
|
|
|
|
|
|
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
|
|
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
|
|
TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
|
|
TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
|
|
- TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK;
|
|
|
|
|
|
+ TCP_SKB_CB(nskb)->flags = TCPHDR_ACK;
|
|
TCP_SKB_CB(nskb)->sacked = 0;
|
|
TCP_SKB_CB(nskb)->sacked = 0;
|
|
nskb->csum = 0;
|
|
nskb->csum = 0;
|
|
nskb->ip_summed = skb->ip_summed;
|
|
nskb->ip_summed = skb->ip_summed;
|
|
@@ -1669,7 +1668,7 @@ static int tcp_mtu_probe(struct sock *sk)
|
|
sk_wmem_free_skb(sk, skb);
|
|
sk_wmem_free_skb(sk, skb);
|
|
} else {
|
|
} else {
|
|
TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
|
|
TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
|
|
- ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
|
|
|
|
|
|
+ ~(TCPHDR_FIN|TCPHDR_PSH);
|
|
if (!skb_shinfo(skb)->nr_frags) {
|
|
if (!skb_shinfo(skb)->nr_frags) {
|
|
skb_pull(skb, copy);
|
|
skb_pull(skb, copy);
|
|
if (skb->ip_summed != CHECKSUM_PARTIAL)
|
|
if (skb->ip_summed != CHECKSUM_PARTIAL)
|
|
@@ -2020,7 +2019,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
|
|
|
|
|
|
if (!sysctl_tcp_retrans_collapse)
|
|
if (!sysctl_tcp_retrans_collapse)
|
|
return;
|
|
return;
|
|
- if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)
|
|
|
|
|
|
+ if (TCP_SKB_CB(skb)->flags & TCPHDR_SYN)
|
|
return;
|
|
return;
|
|
|
|
|
|
tcp_for_write_queue_from_safe(skb, tmp, sk) {
|
|
tcp_for_write_queue_from_safe(skb, tmp, sk) {
|
|
@@ -2112,7 +2111,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
|
|
* since it is cheap to do so and saves bytes on the network.
|
|
* since it is cheap to do so and saves bytes on the network.
|
|
*/
|
|
*/
|
|
if (skb->len > 0 &&
|
|
if (skb->len > 0 &&
|
|
- (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
|
|
|
|
|
|
+ (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) &&
|
|
tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
|
|
tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
|
|
if (!pskb_trim(skb, 0)) {
|
|
if (!pskb_trim(skb, 0)) {
|
|
/* Reuse, even though it does some unnecessary work */
|
|
/* Reuse, even though it does some unnecessary work */
|
|
@@ -2301,7 +2300,7 @@ void tcp_send_fin(struct sock *sk)
|
|
mss_now = tcp_current_mss(sk);
|
|
mss_now = tcp_current_mss(sk);
|
|
|
|
|
|
if (tcp_send_head(sk) != NULL) {
|
|
if (tcp_send_head(sk) != NULL) {
|
|
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
|
|
|
|
|
|
+ TCP_SKB_CB(skb)->flags |= TCPHDR_FIN;
|
|
TCP_SKB_CB(skb)->end_seq++;
|
|
TCP_SKB_CB(skb)->end_seq++;
|
|
tp->write_seq++;
|
|
tp->write_seq++;
|
|
} else {
|
|
} else {
|
|
@@ -2318,7 +2317,7 @@ void tcp_send_fin(struct sock *sk)
|
|
skb_reserve(skb, MAX_TCP_HEADER);
|
|
skb_reserve(skb, MAX_TCP_HEADER);
|
|
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
|
|
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
|
|
tcp_init_nondata_skb(skb, tp->write_seq,
|
|
tcp_init_nondata_skb(skb, tp->write_seq,
|
|
- TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
|
|
|
|
|
|
+ TCPHDR_ACK | TCPHDR_FIN);
|
|
tcp_queue_skb(sk, skb);
|
|
tcp_queue_skb(sk, skb);
|
|
}
|
|
}
|
|
__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
|
|
__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
|
|
@@ -2343,7 +2342,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
|
|
/* Reserve space for headers and prepare control bits. */
|
|
/* Reserve space for headers and prepare control bits. */
|
|
skb_reserve(skb, MAX_TCP_HEADER);
|
|
skb_reserve(skb, MAX_TCP_HEADER);
|
|
tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
|
|
tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
|
|
- TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
|
|
|
|
|
|
+ TCPHDR_ACK | TCPHDR_RST);
|
|
/* Send it off. */
|
|
/* Send it off. */
|
|
TCP_SKB_CB(skb)->when = tcp_time_stamp;
|
|
TCP_SKB_CB(skb)->when = tcp_time_stamp;
|
|
if (tcp_transmit_skb(sk, skb, 0, priority))
|
|
if (tcp_transmit_skb(sk, skb, 0, priority))
|
|
@@ -2363,11 +2362,11 @@ int tcp_send_synack(struct sock *sk)
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
|
|
|
|
skb = tcp_write_queue_head(sk);
|
|
skb = tcp_write_queue_head(sk);
|
|
- if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)) {
|
|
|
|
|
|
+ if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPHDR_SYN)) {
|
|
printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
|
|
printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
|
|
return -EFAULT;
|
|
return -EFAULT;
|
|
}
|
|
}
|
|
- if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_ACK)) {
|
|
|
|
|
|
+ if (!(TCP_SKB_CB(skb)->flags & TCPHDR_ACK)) {
|
|
if (skb_cloned(skb)) {
|
|
if (skb_cloned(skb)) {
|
|
struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
|
|
struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
|
|
if (nskb == NULL)
|
|
if (nskb == NULL)
|
|
@@ -2381,7 +2380,7 @@ int tcp_send_synack(struct sock *sk)
|
|
skb = nskb;
|
|
skb = nskb;
|
|
}
|
|
}
|
|
|
|
|
|
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK;
|
|
|
|
|
|
+ TCP_SKB_CB(skb)->flags |= TCPHDR_ACK;
|
|
TCP_ECN_send_synack(tcp_sk(sk), skb);
|
|
TCP_ECN_send_synack(tcp_sk(sk), skb);
|
|
}
|
|
}
|
|
TCP_SKB_CB(skb)->when = tcp_time_stamp;
|
|
TCP_SKB_CB(skb)->when = tcp_time_stamp;
|
|
@@ -2460,7 +2459,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
|
|
* not even correctly set)
|
|
* not even correctly set)
|
|
*/
|
|
*/
|
|
tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
|
|
tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
|
|
- TCPCB_FLAG_SYN | TCPCB_FLAG_ACK);
|
|
|
|
|
|
+ TCPHDR_SYN | TCPHDR_ACK);
|
|
|
|
|
|
if (OPTION_COOKIE_EXTENSION & opts.options) {
|
|
if (OPTION_COOKIE_EXTENSION & opts.options) {
|
|
if (s_data_desired) {
|
|
if (s_data_desired) {
|
|
@@ -2592,7 +2591,7 @@ int tcp_connect(struct sock *sk)
|
|
skb_reserve(buff, MAX_TCP_HEADER);
|
|
skb_reserve(buff, MAX_TCP_HEADER);
|
|
|
|
|
|
tp->snd_nxt = tp->write_seq;
|
|
tp->snd_nxt = tp->write_seq;
|
|
- tcp_init_nondata_skb(buff, tp->write_seq++, TCPCB_FLAG_SYN);
|
|
|
|
|
|
+ tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
|
|
TCP_ECN_send_syn(sk, buff);
|
|
TCP_ECN_send_syn(sk, buff);
|
|
|
|
|
|
/* Send it off. */
|
|
/* Send it off. */
|
|
@@ -2698,7 +2697,7 @@ void tcp_send_ack(struct sock *sk)
|
|
|
|
|
|
/* Reserve space for headers and prepare control bits. */
|
|
/* Reserve space for headers and prepare control bits. */
|
|
skb_reserve(buff, MAX_TCP_HEADER);
|
|
skb_reserve(buff, MAX_TCP_HEADER);
|
|
- tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPCB_FLAG_ACK);
|
|
|
|
|
|
+ tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
|
|
|
|
|
|
/* Send it off, this clears delayed acks for us. */
|
|
/* Send it off, this clears delayed acks for us. */
|
|
TCP_SKB_CB(buff)->when = tcp_time_stamp;
|
|
TCP_SKB_CB(buff)->when = tcp_time_stamp;
|
|
@@ -2732,7 +2731,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
|
|
* end to send an ack. Don't queue or clone SKB, just
|
|
* end to send an ack. Don't queue or clone SKB, just
|
|
* send it.
|
|
* send it.
|
|
*/
|
|
*/
|
|
- tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPCB_FLAG_ACK);
|
|
|
|
|
|
+ tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
|
|
TCP_SKB_CB(skb)->when = tcp_time_stamp;
|
|
TCP_SKB_CB(skb)->when = tcp_time_stamp;
|
|
return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
|
|
return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
|
|
}
|
|
}
|
|
@@ -2762,13 +2761,13 @@ int tcp_write_wakeup(struct sock *sk)
|
|
if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
|
|
if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
|
|
skb->len > mss) {
|
|
skb->len > mss) {
|
|
seg_size = min(seg_size, mss);
|
|
seg_size = min(seg_size, mss);
|
|
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
|
|
|
|
|
|
+ TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
|
|
if (tcp_fragment(sk, skb, seg_size, mss))
|
|
if (tcp_fragment(sk, skb, seg_size, mss))
|
|
return -1;
|
|
return -1;
|
|
} else if (!tcp_skb_pcount(skb))
|
|
} else if (!tcp_skb_pcount(skb))
|
|
tcp_set_skb_tso_segs(sk, skb, mss);
|
|
tcp_set_skb_tso_segs(sk, skb, mss);
|
|
|
|
|
|
- TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
|
|
|
|
|
|
+ TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
|
|
TCP_SKB_CB(skb)->when = tcp_time_stamp;
|
|
TCP_SKB_CB(skb)->when = tcp_time_stamp;
|
|
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
|
|
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
|
|
if (!err)
|
|
if (!err)
|