|
@@ -986,6 +986,9 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
|
|
|
static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
|
|
|
unsigned int mss_now)
|
|
|
{
|
|
|
+ /* Make sure we own this skb before messing gso_size/gso_segs */
|
|
|
+ WARN_ON_ONCE(skb_cloned(skb));
|
|
|
+
|
|
|
if (skb->len <= mss_now || !sk_can_gso(sk) ||
|
|
|
skb->ip_summed == CHECKSUM_NONE) {
|
|
|
/* Avoid the costly divide in the normal
|
|
@@ -1067,9 +1070,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
|
|
|
if (nsize < 0)
|
|
|
nsize = 0;
|
|
|
|
|
|
- if (skb_cloned(skb) &&
|
|
|
- skb_is_nonlinear(skb) &&
|
|
|
- pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
|
|
|
+ if (skb_unclone(skb, GFP_ATOMIC))
|
|
|
return -ENOMEM;
|
|
|
|
|
|
/* Get a new skb... force flag on. */
|
|
@@ -2344,6 +2345,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
|
|
|
int oldpcount = tcp_skb_pcount(skb);
|
|
|
|
|
|
if (unlikely(oldpcount > 1)) {
|
|
|
+ if (skb_unclone(skb, GFP_ATOMIC))
|
|
|
+ return -ENOMEM;
|
|
|
tcp_init_tso_segs(sk, skb, cur_mss);
|
|
|
tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
|
|
|
}
|