|
@@ -73,17 +73,13 @@ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
|
|
|
static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
|
|
|
struct pipe_buffer *buf)
|
|
|
{
|
|
|
- struct sk_buff *skb = (struct sk_buff *) buf->private;
|
|
|
-
|
|
|
- kfree_skb(skb);
|
|
|
+ put_page(buf->page);
|
|
|
}
|
|
|
|
|
|
static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
|
|
|
struct pipe_buffer *buf)
|
|
|
{
|
|
|
- struct sk_buff *skb = (struct sk_buff *) buf->private;
|
|
|
-
|
|
|
- skb_get(skb);
|
|
|
+ get_page(buf->page);
|
|
|
}
|
|
|
|
|
|
static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
|
|
@@ -1334,9 +1330,19 @@ fault:
|
|
|
*/
|
|
|
static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
|
|
|
{
|
|
|
- struct sk_buff *skb = (struct sk_buff *) spd->partial[i].private;
|
|
|
+ put_page(spd->pages[i]);
|
|
|
+}
|
|
|
|
|
|
- kfree_skb(skb);
|
|
|
+static inline struct page *linear_to_page(struct page *page, unsigned int len,
|
|
|
+ unsigned int offset)
|
|
|
+{
|
|
|
+ struct page *p = alloc_pages(GFP_KERNEL, 0);
|
|
|
+
|
|
|
+ if (!p)
|
|
|
+ return NULL;
|
|
|
+ memcpy(page_address(p) + offset, page_address(page) + offset, len);
|
|
|
+
|
|
|
+ return p;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1344,16 +1350,23 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
|
|
|
*/
|
|
|
static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
|
|
|
unsigned int len, unsigned int offset,
|
|
|
- struct sk_buff *skb)
|
|
|
+ struct sk_buff *skb, int linear)
|
|
|
{
|
|
|
if (unlikely(spd->nr_pages == PIPE_BUFFERS))
|
|
|
return 1;
|
|
|
|
|
|
+ if (linear) {
|
|
|
+ page = linear_to_page(page, len, offset);
|
|
|
+ if (!page)
|
|
|
+ return 1;
|
|
|
+ } else
|
|
|
+ get_page(page);
|
|
|
+
|
|
|
spd->pages[spd->nr_pages] = page;
|
|
|
spd->partial[spd->nr_pages].len = len;
|
|
|
spd->partial[spd->nr_pages].offset = offset;
|
|
|
- spd->partial[spd->nr_pages].private = (unsigned long) skb_get(skb);
|
|
|
spd->nr_pages++;
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1369,7 +1382,7 @@ static inline void __segment_seek(struct page **page, unsigned int *poff,
|
|
|
static inline int __splice_segment(struct page *page, unsigned int poff,
|
|
|
unsigned int plen, unsigned int *off,
|
|
|
unsigned int *len, struct sk_buff *skb,
|
|
|
- struct splice_pipe_desc *spd)
|
|
|
+ struct splice_pipe_desc *spd, int linear)
|
|
|
{
|
|
|
if (!*len)
|
|
|
return 1;
|
|
@@ -1392,7 +1405,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
|
|
|
/* the linear region may spread across several pages */
|
|
|
flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
|
|
|
|
|
|
- if (spd_fill_page(spd, page, flen, poff, skb))
|
|
|
+ if (spd_fill_page(spd, page, flen, poff, skb, linear))
|
|
|
return 1;
|
|
|
|
|
|
__segment_seek(&page, &poff, &plen, flen);
|
|
@@ -1419,7 +1432,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
|
|
|
if (__splice_segment(virt_to_page(skb->data),
|
|
|
(unsigned long) skb->data & (PAGE_SIZE - 1),
|
|
|
skb_headlen(skb),
|
|
|
- offset, len, skb, spd))
|
|
|
+ offset, len, skb, spd, 1))
|
|
|
return 1;
|
|
|
|
|
|
/*
|
|
@@ -1429,7 +1442,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
|
|
|
const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
|
|
|
|
|
|
if (__splice_segment(f->page, f->page_offset, f->size,
|
|
|
- offset, len, skb, spd))
|
|
|
+ offset, len, skb, spd, 0))
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
@@ -1442,7 +1455,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
|
|
|
* the frag list, if such a thing exists. We'd probably need to recurse to
|
|
|
* handle that cleanly.
|
|
|
*/
|
|
|
-int skb_splice_bits(struct sk_buff *__skb, unsigned int offset,
|
|
|
+int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
|
|
|
struct pipe_inode_info *pipe, unsigned int tlen,
|
|
|
unsigned int flags)
|
|
|
{
|
|
@@ -1455,16 +1468,6 @@ int skb_splice_bits(struct sk_buff *__skb, unsigned int offset,
|
|
|
.ops = &sock_pipe_buf_ops,
|
|
|
.spd_release = sock_spd_release,
|
|
|
};
|
|
|
- struct sk_buff *skb;
|
|
|
-
|
|
|
- /*
|
|
|
- * I'd love to avoid the clone here, but tcp_read_sock()
|
|
|
- * ignores reference counts and unconditonally kills the sk_buff
|
|
|
- * on return from the actor.
|
|
|
- */
|
|
|
- skb = skb_clone(__skb, GFP_KERNEL);
|
|
|
- if (unlikely(!skb))
|
|
|
- return -ENOMEM;
|
|
|
|
|
|
/*
|
|
|
* __skb_splice_bits() only fails if the output has no room left,
|
|
@@ -1488,15 +1491,9 @@ int skb_splice_bits(struct sk_buff *__skb, unsigned int offset,
|
|
|
}
|
|
|
|
|
|
done:
|
|
|
- /*
|
|
|
- * drop our reference to the clone, the pipe consumption will
|
|
|
- * drop the rest.
|
|
|
- */
|
|
|
- kfree_skb(skb);
|
|
|
-
|
|
|
if (spd.nr_pages) {
|
|
|
+ struct sock *sk = skb->sk;
|
|
|
int ret;
|
|
|
- struct sock *sk = __skb->sk;
|
|
|
|
|
|
/*
|
|
|
* Drop the socket lock, otherwise we have reverse
|