|
@@ -206,7 +206,7 @@ static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp)
|
|
|
tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
|
|
|
}
|
|
|
|
|
|
-static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, struct sk_buff *skb)
|
|
|
+static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb)
|
|
|
{
|
|
|
if (tcp_hdr(skb)->cwr)
|
|
|
tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
|
|
@@ -239,19 +239,19 @@ static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *s
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, struct tcphdr *th)
|
|
|
+static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
|
|
|
{
|
|
|
if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr))
|
|
|
tp->ecn_flags &= ~TCP_ECN_OK;
|
|
|
}
|
|
|
|
|
|
-static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, struct tcphdr *th)
|
|
|
+static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
|
|
|
{
|
|
|
if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr))
|
|
|
tp->ecn_flags &= ~TCP_ECN_OK;
|
|
|
}
|
|
|
|
|
|
-static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th)
|
|
|
+static inline int TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
|
|
|
{
|
|
|
if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
|
|
|
return 1;
|
|
@@ -315,7 +315,7 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void tcp_grow_window(struct sock *sk, struct sk_buff *skb)
|
|
|
+static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
|
@@ -429,7 +429,7 @@ static void tcp_clamp_window(struct sock *sk)
|
|
|
*/
|
|
|
void tcp_initialize_rcv_mss(struct sock *sk)
|
|
|
{
|
|
|
- struct tcp_sock *tp = tcp_sk(sk);
|
|
|
+ const struct tcp_sock *tp = tcp_sk(sk);
|
|
|
unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
|
|
|
|
|
|
hint = min(hint, tp->rcv_wnd / 2);
|
|
@@ -824,7 +824,7 @@ void tcp_update_metrics(struct sock *sk)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-__u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
|
|
|
+__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
|
|
|
{
|
|
|
__u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
|
|
|
|
|
@@ -1216,7 +1216,7 @@ static void tcp_mark_lost_retrans(struct sock *sk)
|
|
|
tp->lost_retrans_low = new_low_seq;
|
|
|
}
|
|
|
|
|
|
-static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
|
|
|
+static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
|
|
|
struct tcp_sack_block_wire *sp, int num_sacks,
|
|
|
u32 prior_snd_una)
|
|
|
{
|
|
@@ -1310,7 +1310,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
|
|
|
return in_sack;
|
|
|
}
|
|
|
|
|
|
-static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
|
|
|
+static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
|
|
|
struct tcp_sacktag_state *state,
|
|
|
int dup_sack, int pcount)
|
|
|
{
|
|
@@ -1465,13 +1465,13 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
|
|
|
/* I wish gso_size would have a bit more sane initialization than
|
|
|
* something-or-zero which complicates things
|
|
|
*/
|
|
|
-static int tcp_skb_seglen(struct sk_buff *skb)
|
|
|
+static int tcp_skb_seglen(const struct sk_buff *skb)
|
|
|
{
|
|
|
return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb);
|
|
|
}
|
|
|
|
|
|
/* Shifting pages past head area doesn't work */
|
|
|
-static int skb_can_shift(struct sk_buff *skb)
|
|
|
+static int skb_can_shift(const struct sk_buff *skb)
|
|
|
{
|
|
|
return !skb_headlen(skb) && skb_is_nonlinear(skb);
|
|
|
}
|
|
@@ -1720,19 +1720,19 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
|
|
|
return skb;
|
|
|
}
|
|
|
|
|
|
-static int tcp_sack_cache_ok(struct tcp_sock *tp, struct tcp_sack_block *cache)
|
|
|
+static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache)
|
|
|
{
|
|
|
return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
|
|
|
}
|
|
|
|
|
|
static int
|
|
|
-tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
|
|
|
+tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
|
|
|
u32 prior_snd_una)
|
|
|
{
|
|
|
const struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
- unsigned char *ptr = (skb_transport_header(ack_skb) +
|
|
|
- TCP_SKB_CB(ack_skb)->sacked);
|
|
|
+ const unsigned char *ptr = (skb_transport_header(ack_skb) +
|
|
|
+ TCP_SKB_CB(ack_skb)->sacked);
|
|
|
struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
|
|
|
struct tcp_sack_block sp[TCP_NUM_SACKS];
|
|
|
struct tcp_sack_block *cache;
|
|
@@ -2296,7 +2296,7 @@ static int tcp_check_sack_reneging(struct sock *sk, int flag)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static inline int tcp_fackets_out(struct tcp_sock *tp)
|
|
|
+static inline int tcp_fackets_out(const struct tcp_sock *tp)
|
|
|
{
|
|
|
return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out;
|
|
|
}
|
|
@@ -2316,19 +2316,20 @@ static inline int tcp_fackets_out(struct tcp_sock *tp)
|
|
|
* they differ. Since neither occurs due to loss, TCP should really
|
|
|
* ignore them.
|
|
|
*/
|
|
|
-static inline int tcp_dupack_heuristics(struct tcp_sock *tp)
|
|
|
+static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
|
|
|
{
|
|
|
return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1;
|
|
|
}
|
|
|
|
|
|
-static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
|
|
|
+static inline int tcp_skb_timedout(const struct sock *sk,
|
|
|
+ const struct sk_buff *skb)
|
|
|
{
|
|
|
return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto;
|
|
|
}
|
|
|
|
|
|
-static inline int tcp_head_timedout(struct sock *sk)
|
|
|
+static inline int tcp_head_timedout(const struct sock *sk)
|
|
|
{
|
|
|
- struct tcp_sock *tp = tcp_sk(sk);
|
|
|
+ const struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
|
|
return tp->packets_out &&
|
|
|
tcp_skb_timedout(sk, tcp_write_queue_head(sk));
|
|
@@ -2639,7 +2640,7 @@ static void tcp_cwnd_down(struct sock *sk, int flag)
|
|
|
/* Nothing was retransmitted or returned timestamp is less
|
|
|
* than timestamp of the first retransmission.
|
|
|
*/
|
|
|
-static inline int tcp_packet_delayed(struct tcp_sock *tp)
|
|
|
+static inline int tcp_packet_delayed(const struct tcp_sock *tp)
|
|
|
{
|
|
|
return !tp->retrans_stamp ||
|
|
|
(tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
|
|
@@ -2700,7 +2701,7 @@ static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh)
|
|
|
tp->snd_cwnd_stamp = tcp_time_stamp;
|
|
|
}
|
|
|
|
|
|
-static inline int tcp_may_undo(struct tcp_sock *tp)
|
|
|
+static inline int tcp_may_undo(const struct tcp_sock *tp)
|
|
|
{
|
|
|
return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
|
|
|
}
|
|
@@ -2764,9 +2765,9 @@ static void tcp_try_undo_dsack(struct sock *sk)
|
|
|
* that successive retransmissions of a segment must not advance
|
|
|
* retrans_stamp under any conditions.
|
|
|
*/
|
|
|
-static int tcp_any_retrans_done(struct sock *sk)
|
|
|
+static int tcp_any_retrans_done(const struct sock *sk)
|
|
|
{
|
|
|
- struct tcp_sock *tp = tcp_sk(sk);
|
|
|
+ const struct tcp_sock *tp = tcp_sk(sk);
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
if (tp->retrans_out)
|
|
@@ -3245,7 +3246,7 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
|
|
|
*/
|
|
|
static void tcp_rearm_rto(struct sock *sk)
|
|
|
{
|
|
|
- struct tcp_sock *tp = tcp_sk(sk);
|
|
|
+ const struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
|
|
if (!tp->packets_out) {
|
|
|
inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
|
|
@@ -3497,7 +3498,7 @@ static inline int tcp_may_update_window(const struct tcp_sock *tp,
|
|
|
* Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
|
|
|
* and in FreeBSD. NetBSD's one is even worse.) is wrong.
|
|
|
*/
|
|
|
-static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack,
|
|
|
+static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack,
|
|
|
u32 ack_seq)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
@@ -3673,7 +3674,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
|
|
|
}
|
|
|
|
|
|
/* This routine deals with incoming acks, but not outgoing ones. */
|
|
|
-static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
|
|
|
+static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
|
|
|
{
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
@@ -3810,14 +3811,14 @@ old_ack:
|
|
|
* But, this can also be called on packets in the established flow when
|
|
|
* the fast version below fails.
|
|
|
*/
|
|
|
-void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
|
|
|
- u8 **hvpp, int estab)
|
|
|
+void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx,
|
|
|
+ const u8 **hvpp, int estab)
|
|
|
{
|
|
|
- unsigned char *ptr;
|
|
|
- struct tcphdr *th = tcp_hdr(skb);
|
|
|
+ const unsigned char *ptr;
|
|
|
+ const struct tcphdr *th = tcp_hdr(skb);
|
|
|
int length = (th->doff * 4) - sizeof(struct tcphdr);
|
|
|
|
|
|
- ptr = (unsigned char *)(th + 1);
|
|
|
+ ptr = (const unsigned char *)(th + 1);
|
|
|
opt_rx->saw_tstamp = 0;
|
|
|
|
|
|
while (length > 0) {
|
|
@@ -3928,9 +3929,9 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
|
|
|
}
|
|
|
EXPORT_SYMBOL(tcp_parse_options);
|
|
|
|
|
|
-static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)
|
|
|
+static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th)
|
|
|
{
|
|
|
- __be32 *ptr = (__be32 *)(th + 1);
|
|
|
+ const __be32 *ptr = (const __be32 *)(th + 1);
|
|
|
|
|
|
if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
|
|
|
| (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
|
|
@@ -3947,8 +3948,9 @@ static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)
|
|
|
/* Fast parse options. This hopes to only see timestamps.
|
|
|
* If it is wrong it falls back on tcp_parse_options().
|
|
|
*/
|
|
|
-static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
|
|
|
- struct tcp_sock *tp, u8 **hvpp)
|
|
|
+static int tcp_fast_parse_options(const struct sk_buff *skb,
|
|
|
+ const struct tcphdr *th,
|
|
|
+ struct tcp_sock *tp, const u8 **hvpp)
|
|
|
{
|
|
|
/* In the spirit of fast parsing, compare doff directly to constant
|
|
|
* values. Because equality is used, short doff can be ignored here.
|
|
@@ -3969,10 +3971,10 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
|
|
|
/*
|
|
|
* Parse MD5 Signature option
|
|
|
*/
|
|
|
-u8 *tcp_parse_md5sig_option(struct tcphdr *th)
|
|
|
+const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
|
|
|
{
|
|
|
- int length = (th->doff << 2) - sizeof (*th);
|
|
|
- u8 *ptr = (u8*)(th + 1);
|
|
|
+ int length = (th->doff << 2) - sizeof(*th);
|
|
|
+ const u8 *ptr = (const u8 *)(th + 1);
|
|
|
|
|
|
/* If the TCP option is too short, we can short cut */
|
|
|
if (length < TCPOLEN_MD5SIG)
|
|
@@ -4049,8 +4051,8 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
|
|
|
|
|
|
static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
|
|
|
{
|
|
|
- struct tcp_sock *tp = tcp_sk(sk);
|
|
|
- struct tcphdr *th = tcp_hdr(skb);
|
|
|
+ const struct tcp_sock *tp = tcp_sk(sk);
|
|
|
+ const struct tcphdr *th = tcp_hdr(skb);
|
|
|
u32 seq = TCP_SKB_CB(skb)->seq;
|
|
|
u32 ack = TCP_SKB_CB(skb)->ack_seq;
|
|
|
|
|
@@ -4089,7 +4091,7 @@ static inline int tcp_paws_discard(const struct sock *sk,
|
|
|
* (borrowed from freebsd)
|
|
|
*/
|
|
|
|
|
|
-static inline int tcp_sequence(struct tcp_sock *tp, u32 seq, u32 end_seq)
|
|
|
+static inline int tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq)
|
|
|
{
|
|
|
return !before(end_seq, tp->rcv_wup) &&
|
|
|
!after(seq, tp->rcv_nxt + tcp_receive_window(tp));
|
|
@@ -4246,7 +4248,7 @@ static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
|
|
|
tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
|
|
|
}
|
|
|
|
|
|
-static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
|
|
|
+static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
|
@@ -4433,7 +4435,7 @@ static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
|
|
|
|
|
|
static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
|
|
|
{
|
|
|
- struct tcphdr *th = tcp_hdr(skb);
|
|
|
+ const struct tcphdr *th = tcp_hdr(skb);
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
int eaten = -1;
|
|
|
|
|
@@ -4917,9 +4919,9 @@ void tcp_cwnd_application_limited(struct sock *sk)
|
|
|
tp->snd_cwnd_stamp = tcp_time_stamp;
|
|
|
}
|
|
|
|
|
|
-static int tcp_should_expand_sndbuf(struct sock *sk)
|
|
|
+static int tcp_should_expand_sndbuf(const struct sock *sk)
|
|
|
{
|
|
|
- struct tcp_sock *tp = tcp_sk(sk);
|
|
|
+ const struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
|
|
/* If the user specified a specific send buffer setting, do
|
|
|
* not modify it.
|
|
@@ -5028,7 +5030,7 @@ static inline void tcp_ack_snd_check(struct sock *sk)
|
|
|
* either form (or just set the sysctl tcp_stdurg).
|
|
|
*/
|
|
|
|
|
|
-static void tcp_check_urg(struct sock *sk, struct tcphdr *th)
|
|
|
+static void tcp_check_urg(struct sock *sk, const struct tcphdr *th)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
u32 ptr = ntohs(th->urg_ptr);
|
|
@@ -5094,7 +5096,7 @@ static void tcp_check_urg(struct sock *sk, struct tcphdr *th)
|
|
|
}
|
|
|
|
|
|
/* This is the 'fast' part of urgent handling. */
|
|
|
-static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th)
|
|
|
+static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
|
@@ -5215,9 +5217,9 @@ out:
|
|
|
* play significant role here.
|
|
|
*/
|
|
|
static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
|
|
|
- struct tcphdr *th, int syn_inerr)
|
|
|
+ const struct tcphdr *th, int syn_inerr)
|
|
|
{
|
|
|
- u8 *hash_location;
|
|
|
+ const u8 *hash_location;
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
|
|
|
/* RFC1323: H1. Apply PAWS check first. */
|
|
@@ -5298,7 +5300,7 @@ discard:
|
|
|
* tcp_data_queue when everything is OK.
|
|
|
*/
|
|
|
int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
|
|
- struct tcphdr *th, unsigned len)
|
|
|
+ const struct tcphdr *th, unsigned int len)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
int res;
|
|
@@ -5509,9 +5511,9 @@ discard:
|
|
|
EXPORT_SYMBOL(tcp_rcv_established);
|
|
|
|
|
|
static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
|
|
|
- struct tcphdr *th, unsigned len)
|
|
|
+ const struct tcphdr *th, unsigned int len)
|
|
|
{
|
|
|
- u8 *hash_location;
|
|
|
+ const u8 *hash_location;
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
struct tcp_cookie_values *cvp = tp->cookie_values;
|
|
@@ -5786,7 +5788,7 @@ reset_and_undo:
|
|
|
*/
|
|
|
|
|
|
int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
|
|
- struct tcphdr *th, unsigned len)
|
|
|
+ const struct tcphdr *th, unsigned int len)
|
|
|
{
|
|
|
struct tcp_sock *tp = tcp_sk(sk);
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|