|
@@ -816,27 +816,24 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|
|
|
|
|
if (ctx->mss) { /* TSO */
|
|
|
ctx->eth_ip_hdr_size = skb_transport_offset(skb);
|
|
|
- ctx->l4_hdr_size = ((struct tcphdr *)
|
|
|
- skb_transport_header(skb))->doff * 4;
|
|
|
+ ctx->l4_hdr_size = tcp_hdrlen(skb);
|
|
|
ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
|
|
|
} else {
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
|
|
ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
|
|
|
|
|
|
if (ctx->ipv4) {
|
|
|
- struct iphdr *iph = (struct iphdr *)
|
|
|
- skb_network_header(skb);
|
|
|
+ const struct iphdr *iph = ip_hdr(skb);
|
|
|
+
|
|
|
if (iph->protocol == IPPROTO_TCP)
|
|
|
- ctx->l4_hdr_size = ((struct tcphdr *)
|
|
|
- skb_transport_header(skb))->doff * 4;
|
|
|
+ ctx->l4_hdr_size = tcp_hdrlen(skb);
|
|
|
else if (iph->protocol == IPPROTO_UDP)
|
|
|
/*
|
|
|
* Use tcp header size so that bytes to
|
|
|
* be copied are more than required by
|
|
|
* the device.
|
|
|
*/
|
|
|
- ctx->l4_hdr_size =
|
|
|
- sizeof(struct tcphdr);
|
|
|
+ ctx->l4_hdr_size = sizeof(struct tcphdr);
|
|
|
else
|
|
|
ctx->l4_hdr_size = 0;
|
|
|
} else {
|
|
@@ -881,14 +878,17 @@ static void
|
|
|
vmxnet3_prepare_tso(struct sk_buff *skb,
|
|
|
struct vmxnet3_tx_ctx *ctx)
|
|
|
{
|
|
|
- struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb);
|
|
|
+ struct tcphdr *tcph = tcp_hdr(skb);
|
|
|
+
|
|
|
if (ctx->ipv4) {
|
|
|
- struct iphdr *iph = (struct iphdr *)skb_network_header(skb);
|
|
|
+ struct iphdr *iph = ip_hdr(skb);
|
|
|
+
|
|
|
iph->check = 0;
|
|
|
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
|
|
|
IPPROTO_TCP, 0);
|
|
|
} else {
|
|
|
- struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb);
|
|
|
+ struct ipv6hdr *iph = ipv6_hdr(skb);
|
|
|
+
|
|
|
tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
|
|
|
IPPROTO_TCP, 0);
|
|
|
}
|