|
@@ -1592,6 +1592,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
|
|
|
gfp_t gfp_mask;
|
|
|
long timeo;
|
|
|
int err;
|
|
|
+ int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
|
|
+
|
|
|
+ err = -EMSGSIZE;
|
|
|
+ if (npages > MAX_SKB_FRAGS)
|
|
|
+ goto failure;
|
|
|
|
|
|
gfp_mask = sk->sk_allocation;
|
|
|
if (gfp_mask & __GFP_WAIT)
|
|
@@ -1610,14 +1615,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
|
|
|
if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
|
|
|
skb = alloc_skb(header_len, gfp_mask);
|
|
|
if (skb) {
|
|
|
- int npages;
|
|
|
int i;
|
|
|
|
|
|
/* No pages, we're done... */
|
|
|
if (!data_len)
|
|
|
break;
|
|
|
|
|
|
- npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
|
|
skb->truesize += data_len;
|
|
|
skb_shinfo(skb)->nr_frags = npages;
|
|
|
for (i = 0; i < npages; i++) {
|