|
@@ -573,6 +573,99 @@ fault:
|
|
|
}
|
|
|
EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
|
|
|
|
|
|
+/**
|
|
|
+ * zerocopy_sg_from_iovec - Build a zerocopy datagram from an iovec
|
|
|
+ * @skb: buffer to copy
|
|
|
+ * @from: io vector to copy to
|
|
|
+ * @offset: offset in the io vector to start copying from
|
|
|
+ * @count: amount of vectors to copy to buffer from
|
|
|
+ *
|
|
|
+ * The function will first copy up to headlen, and then pin the userspace
|
|
|
+ * pages and build frags through them.
|
|
|
+ *
|
|
|
+ * Returns 0, -EFAULT or -EMSGSIZE.
|
|
|
+ * Note: the iovec is not modified during the copy
|
|
|
+ */
|
|
|
+int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
|
|
|
+ int offset, size_t count)
|
|
|
+{
|
|
|
+ int len = iov_length(from, count) - offset;
|
|
|
+ int copy = skb_headlen(skb);
|
|
|
+ int size, offset1 = 0;
|
|
|
+ int i = 0;
|
|
|
+
|
|
|
+ /* Skip over from offset */
|
|
|
+ while (count && (offset >= from->iov_len)) {
|
|
|
+ offset -= from->iov_len;
|
|
|
+ ++from;
|
|
|
+ --count;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* copy up to skb headlen */
|
|
|
+ while (count && (copy > 0)) {
|
|
|
+ size = min_t(unsigned int, copy, from->iov_len - offset);
|
|
|
+ if (copy_from_user(skb->data + offset1, from->iov_base + offset,
|
|
|
+ size))
|
|
|
+ return -EFAULT;
|
|
|
+ if (copy > size) {
|
|
|
+ ++from;
|
|
|
+ --count;
|
|
|
+ offset = 0;
|
|
|
+ } else
|
|
|
+ offset += size;
|
|
|
+ copy -= size;
|
|
|
+ offset1 += size;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (len == offset1)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ while (count--) {
|
|
|
+ struct page *page[MAX_SKB_FRAGS];
|
|
|
+ int num_pages;
|
|
|
+ unsigned long base;
|
|
|
+ unsigned long truesize;
|
|
|
+
|
|
|
+ len = from->iov_len - offset;
|
|
|
+ if (!len) {
|
|
|
+ offset = 0;
|
|
|
+ ++from;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ base = (unsigned long)from->iov_base + offset;
|
|
|
+ size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
|
|
|
+ if (i + size > MAX_SKB_FRAGS)
|
|
|
+ return -EMSGSIZE;
|
|
|
+ num_pages = get_user_pages_fast(base, size, 0, &page[i]);
|
|
|
+ if (num_pages != size) {
|
|
|
+ int j;
|
|
|
+
|
|
|
+ for (j = 0; j < num_pages; j++)
|
|
|
+ put_page(page[i + j]);
|
|
|
+ return -EFAULT;
|
|
|
+ }
|
|
|
+ truesize = size * PAGE_SIZE;
|
|
|
+ skb->data_len += len;
|
|
|
+ skb->len += len;
|
|
|
+ skb->truesize += truesize;
|
|
|
+ atomic_add(truesize, &skb->sk->sk_wmem_alloc);
|
|
|
+ while (len) {
|
|
|
+ int off = base & ~PAGE_MASK;
|
|
|
+ int size = min_t(int, len, PAGE_SIZE - off);
|
|
|
+ __skb_fill_page_desc(skb, i, page[i], off, size);
|
|
|
+ skb_shinfo(skb)->nr_frags++;
|
|
|
+ /* increase sk_wmem_alloc */
|
|
|
+ base += size;
|
|
|
+ len -= size;
|
|
|
+ i++;
|
|
|
+ }
|
|
|
+ offset = 0;
|
|
|
+ ++from;
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(zerocopy_sg_from_iovec);
|
|
|
+
|
|
|
static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
|
|
|
u8 __user *to, int len,
|
|
|
__wsum *csump)
|