|
@@ -188,7 +188,6 @@ __ip_vs_reroute_locally(struct sk_buff *skb)
|
|
|
},
|
|
|
.mark = skb->mark,
|
|
|
};
|
|
|
- struct rtable *rt;
|
|
|
|
|
|
if (ip_route_output_key(net, &rt, &fl))
|
|
|
return 0;
|
|
@@ -408,7 +407,8 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
|
|
|
|
|
|
/* MTU checking */
|
|
|
mtu = dst_mtu(&rt->dst);
|
|
|
- if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
|
|
|
+ if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
|
|
|
+ !skb_is_gso(skb)) {
|
|
|
ip_rt_put(rt);
|
|
|
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
|
|
|
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
|
|
@@ -461,7 +461,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
|
|
|
|
|
|
/* MTU checking */
|
|
|
mtu = dst_mtu(&rt->dst);
|
|
|
- if (skb->len > mtu) {
|
|
|
+ if (skb->len > mtu && !skb_is_gso(skb)) {
|
|
|
if (!skb->dev) {
|
|
|
struct net *net = dev_net(skb_dst(skb)->dev);
|
|
|
|
|
@@ -561,7 +561,8 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
|
|
|
|
|
|
/* MTU checking */
|
|
|
mtu = dst_mtu(&rt->dst);
|
|
|
- if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
|
|
|
+ if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
|
|
|
+ !skb_is_gso(skb)) {
|
|
|
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
|
|
|
IP_VS_DBG_RL_PKT(0, AF_INET, pp, skb, 0,
|
|
|
"ip_vs_nat_xmit(): frag needed for");
|
|
@@ -676,7 +677,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
|
|
|
|
|
|
/* MTU checking */
|
|
|
mtu = dst_mtu(&rt->dst);
|
|
|
- if (skb->len > mtu) {
|
|
|
+ if (skb->len > mtu && !skb_is_gso(skb)) {
|
|
|
if (!skb->dev) {
|
|
|
struct net *net = dev_net(skb_dst(skb)->dev);
|
|
|
|
|
@@ -791,8 +792,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
|
|
|
|
|
|
df |= (old_iph->frag_off & htons(IP_DF));
|
|
|
|
|
|
- if ((old_iph->frag_off & htons(IP_DF))
|
|
|
- && mtu < ntohs(old_iph->tot_len)) {
|
|
|
+ if ((old_iph->frag_off & htons(IP_DF) &&
|
|
|
+ mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb))) {
|
|
|
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
|
|
|
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
|
|
|
goto tx_error_put;
|
|
@@ -904,7 +905,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
|
|
|
if (skb_dst(skb))
|
|
|
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
|
|
|
|
|
|
- if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
|
|
|
+ if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr) &&
|
|
|
+ !skb_is_gso(skb)) {
|
|
|
if (!skb->dev) {
|
|
|
struct net *net = dev_net(skb_dst(skb)->dev);
|
|
|
|
|
@@ -1009,7 +1011,8 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
|
|
|
|
|
|
/* MTU checking */
|
|
|
mtu = dst_mtu(&rt->dst);
|
|
|
- if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) {
|
|
|
+ if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu &&
|
|
|
+ !skb_is_gso(skb)) {
|
|
|
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
|
|
|
ip_rt_put(rt);
|
|
|
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
|
|
@@ -1176,7 +1179,8 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
|
|
|
|
|
|
/* MTU checking */
|
|
|
mtu = dst_mtu(&rt->dst);
|
|
|
- if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) {
|
|
|
+ if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF)) &&
|
|
|
+ !skb_is_gso(skb)) {
|
|
|
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
|
|
|
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
|
|
|
goto tx_error_put;
|
|
@@ -1290,7 +1294,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
|
|
|
|
|
|
/* MTU checking */
|
|
|
mtu = dst_mtu(&rt->dst);
|
|
|
- if (skb->len > mtu) {
|
|
|
+ if (skb->len > mtu && !skb_is_gso(skb)) {
|
|
|
if (!skb->dev) {
|
|
|
struct net *net = dev_net(skb_dst(skb)->dev);
|
|
|
|