|
@@ -474,7 +474,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
|
|
|
* LATER: this step can be merged to real generation of fragments,
|
|
|
* we can switch to copy when see the first bad fragment.
|
|
|
*/
|
|
|
- if (skb_shinfo(skb)->frag_list) {
|
|
|
+ if (skb_has_frags(skb)) {
|
|
|
struct sk_buff *frag;
|
|
|
int first_len = skb_pagelen(skb);
|
|
|
int truesizes = 0;
|
|
@@ -485,7 +485,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
|
|
|
skb_cloned(skb))
|
|
|
goto slow_path;
|
|
|
|
|
|
- for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
|
|
|
+ skb_walk_frags(skb, frag) {
|
|
|
/* Correct geometry. */
|
|
|
if (frag->len > mtu ||
|
|
|
((frag->len & 7) && frag->next) ||
|
|
@@ -510,7 +510,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
|
|
|
err = 0;
|
|
|
offset = 0;
|
|
|
frag = skb_shinfo(skb)->frag_list;
|
|
|
- skb_shinfo(skb)->frag_list = NULL;
|
|
|
+ skb_frag_list_init(skb);
|
|
|
skb->data_len = first_len - skb_headlen(skb);
|
|
|
skb->truesize -= truesizes;
|
|
|
skb->len = first_len;
|