|
@@ -60,15 +60,6 @@ MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
|
|
module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
|
|
module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
|
|
MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
|
|
MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
|
|
|
|
|
|
-static int lro;
|
|
|
|
-module_param(lro, bool, 0444);
|
|
|
|
-MODULE_PARM_DESC(lro, "Enable LRO (Large Receive Offload)");
|
|
|
|
-
|
|
|
|
-static int lro_max_aggr = IPOIB_LRO_MAX_AGGR;
|
|
|
|
-module_param(lro_max_aggr, int, 0644);
|
|
|
|
-MODULE_PARM_DESC(lro_max_aggr, "LRO: Max packets to be aggregated "
|
|
|
|
- "(default = 64)");
|
|
|
|
-
|
|
|
|
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
|
|
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
|
|
int ipoib_debug_level;
|
|
int ipoib_debug_level;
|
|
|
|
|
|
@@ -976,54 +967,6 @@ static const struct header_ops ipoib_header_ops = {
|
|
.create = ipoib_hard_header,
|
|
.create = ipoib_hard_header,
|
|
};
|
|
};
|
|
|
|
|
|
-static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
|
|
|
|
- void **tcph, u64 *hdr_flags, void *priv)
|
|
|
|
-{
|
|
|
|
- unsigned int ip_len;
|
|
|
|
- struct iphdr *iph;
|
|
|
|
-
|
|
|
|
- if (unlikely(skb->protocol != htons(ETH_P_IP)))
|
|
|
|
- return -1;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * In the future we may add an else clause that verifies the
|
|
|
|
- * checksum and allows devices which do not calculate checksum
|
|
|
|
- * to use LRO.
|
|
|
|
- */
|
|
|
|
- if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY))
|
|
|
|
- return -1;
|
|
|
|
-
|
|
|
|
- /* Check for non-TCP packet */
|
|
|
|
- skb_reset_network_header(skb);
|
|
|
|
- iph = ip_hdr(skb);
|
|
|
|
- if (iph->protocol != IPPROTO_TCP)
|
|
|
|
- return -1;
|
|
|
|
-
|
|
|
|
- ip_len = ip_hdrlen(skb);
|
|
|
|
- skb_set_transport_header(skb, ip_len);
|
|
|
|
- *tcph = tcp_hdr(skb);
|
|
|
|
-
|
|
|
|
- /* check if IP header and TCP header are complete */
|
|
|
|
- if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
|
|
|
|
- return -1;
|
|
|
|
-
|
|
|
|
- *hdr_flags = LRO_IPV4 | LRO_TCP;
|
|
|
|
- *iphdr = iph;
|
|
|
|
-
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void ipoib_lro_setup(struct ipoib_dev_priv *priv)
|
|
|
|
-{
|
|
|
|
- priv->lro.lro_mgr.max_aggr = lro_max_aggr;
|
|
|
|
- priv->lro.lro_mgr.max_desc = IPOIB_MAX_LRO_DESCRIPTORS;
|
|
|
|
- priv->lro.lro_mgr.lro_arr = priv->lro.lro_desc;
|
|
|
|
- priv->lro.lro_mgr.get_skb_header = get_skb_hdr;
|
|
|
|
- priv->lro.lro_mgr.features = LRO_F_NAPI;
|
|
|
|
- priv->lro.lro_mgr.dev = priv->dev;
|
|
|
|
- priv->lro.lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static const struct net_device_ops ipoib_netdev_ops = {
|
|
static const struct net_device_ops ipoib_netdev_ops = {
|
|
.ndo_open = ipoib_open,
|
|
.ndo_open = ipoib_open,
|
|
.ndo_stop = ipoib_stop,
|
|
.ndo_stop = ipoib_stop,
|
|
@@ -1067,8 +1010,6 @@ static void ipoib_setup(struct net_device *dev)
|
|
|
|
|
|
priv->dev = dev;
|
|
priv->dev = dev;
|
|
|
|
|
|
- ipoib_lro_setup(priv);
|
|
|
|
-
|
|
|
|
spin_lock_init(&priv->lock);
|
|
spin_lock_init(&priv->lock);
|
|
|
|
|
|
mutex_init(&priv->vlan_mutex);
|
|
mutex_init(&priv->vlan_mutex);
|
|
@@ -1218,9 +1159,6 @@ int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
|
|
priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
|
|
priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
|
|
}
|
|
}
|
|
|
|
|
|
- if (lro)
|
|
|
|
- priv->dev->features |= NETIF_F_LRO;
|
|
|
|
-
|
|
|
|
if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
|
|
if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
|
|
priv->dev->features |= NETIF_F_TSO;
|
|
priv->dev->features |= NETIF_F_TSO;
|
|
|
|
|