|
@@ -41,6 +41,7 @@
|
|
|
#include <linux/init.h>
|
|
|
#include <linux/slab.h>
|
|
|
#include <linux/vmalloc.h>
|
|
|
+#include <linux/kernel.h>
|
|
|
|
|
|
#include <linux/if_arp.h> /* For ARPHRD_xxx */
|
|
|
|
|
@@ -53,6 +54,14 @@ MODULE_AUTHOR("Roland Dreier");
|
|
|
MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
|
|
|
MODULE_LICENSE("Dual BSD/GPL");
|
|
|
|
|
|
+int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
|
|
|
+int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
|
|
|
+
|
|
|
+module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
|
|
|
+MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
|
|
|
+module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
|
|
|
+MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
|
|
|
+
|
|
|
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
|
|
|
int ipoib_debug_level;
|
|
|
|
|
@@ -252,8 +261,8 @@ static void path_free(struct net_device *dev, struct ipoib_path *path)
|
|
|
*/
|
|
|
if (neigh->ah)
|
|
|
ipoib_put_ah(neigh->ah);
|
|
|
- *to_ipoib_neigh(neigh->neighbour) = NULL;
|
|
|
- kfree(neigh);
|
|
|
+
|
|
|
+ ipoib_neigh_free(neigh);
|
|
|
}
|
|
|
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
@@ -327,9 +336,8 @@ void ipoib_flush_paths(struct net_device *dev)
|
|
|
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
|
|
struct ipoib_path *path, *tp;
|
|
|
LIST_HEAD(remove_list);
|
|
|
- unsigned long flags;
|
|
|
|
|
|
- spin_lock_irqsave(&priv->lock, flags);
|
|
|
+ spin_lock_irq(&priv->lock);
|
|
|
|
|
|
list_splice(&priv->path_list, &remove_list);
|
|
|
INIT_LIST_HEAD(&priv->path_list);
|
|
@@ -337,14 +345,15 @@ void ipoib_flush_paths(struct net_device *dev)
|
|
|
list_for_each_entry(path, &remove_list, list)
|
|
|
rb_erase(&path->rb_node, &priv->path_tree);
|
|
|
|
|
|
- spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
-
|
|
|
list_for_each_entry_safe(path, tp, &remove_list, list) {
|
|
|
if (path->query)
|
|
|
ib_sa_cancel_query(path->query_id, path->query);
|
|
|
+ spin_unlock_irq(&priv->lock);
|
|
|
wait_for_completion(&path->done);
|
|
|
path_free(dev, path);
|
|
|
+ spin_lock_irq(&priv->lock);
|
|
|
}
|
|
|
+ spin_unlock_irq(&priv->lock);
|
|
|
}
|
|
|
|
|
|
static void path_rec_completion(int status,
|
|
@@ -373,16 +382,9 @@ static void path_rec_completion(int status,
|
|
|
struct ib_ah_attr av = {
|
|
|
.dlid = be16_to_cpu(pathrec->dlid),
|
|
|
.sl = pathrec->sl,
|
|
|
- .port_num = priv->port
|
|
|
+ .port_num = priv->port,
|
|
|
+ .static_rate = pathrec->rate
|
|
|
};
|
|
|
- int path_rate = ib_sa_rate_enum_to_int(pathrec->rate);
|
|
|
-
|
|
|
- if (path_rate > 0 && priv->local_rate > path_rate)
|
|
|
- av.static_rate = (priv->local_rate - 1) / path_rate;
|
|
|
-
|
|
|
- ipoib_dbg(priv, "static_rate %d for local port %dX, path %dX\n",
|
|
|
- av.static_rate, priv->local_rate,
|
|
|
- ib_sa_rate_enum_to_int(pathrec->rate));
|
|
|
|
|
|
ah = ipoib_create_ah(dev, priv->pd, &av);
|
|
|
}
|
|
@@ -481,7 +483,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
|
|
|
struct ipoib_path *path;
|
|
|
struct ipoib_neigh *neigh;
|
|
|
|
|
|
- neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
|
|
|
+ neigh = ipoib_neigh_alloc(skb->dst->neighbour);
|
|
|
if (!neigh) {
|
|
|
++priv->stats.tx_dropped;
|
|
|
dev_kfree_skb_any(skb);
|
|
@@ -489,8 +491,6 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
|
|
|
}
|
|
|
|
|
|
skb_queue_head_init(&neigh->queue);
|
|
|
- neigh->neighbour = skb->dst->neighbour;
|
|
|
- *to_ipoib_neigh(skb->dst->neighbour) = neigh;
|
|
|
|
|
|
/*
|
|
|
* We can only be called from ipoib_start_xmit, so we're
|
|
@@ -503,7 +503,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
|
|
|
path = path_rec_create(dev,
|
|
|
(union ib_gid *) (skb->dst->neighbour->ha + 4));
|
|
|
if (!path)
|
|
|
- goto err;
|
|
|
+ goto err_path;
|
|
|
|
|
|
__path_add(dev, path);
|
|
|
}
|
|
@@ -521,17 +521,17 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
|
|
|
__skb_queue_tail(&neigh->queue, skb);
|
|
|
|
|
|
if (!path->query && path_rec_start(dev, path))
|
|
|
- goto err;
|
|
|
+ goto err_list;
|
|
|
}
|
|
|
|
|
|
spin_unlock(&priv->lock);
|
|
|
return;
|
|
|
|
|
|
-err:
|
|
|
- *to_ipoib_neigh(skb->dst->neighbour) = NULL;
|
|
|
+err_list:
|
|
|
list_del(&neigh->list);
|
|
|
- kfree(neigh);
|
|
|
|
|
|
+err_path:
|
|
|
+ ipoib_neigh_free(neigh);
|
|
|
++priv->stats.tx_dropped;
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
|
@@ -763,8 +763,7 @@ static void ipoib_neigh_destructor(struct neighbour *n)
|
|
|
if (neigh->ah)
|
|
|
ah = neigh->ah;
|
|
|
list_del(&neigh->list);
|
|
|
- *to_ipoib_neigh(n) = NULL;
|
|
|
- kfree(neigh);
|
|
|
+ ipoib_neigh_free(neigh);
|
|
|
}
|
|
|
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
@@ -773,6 +772,26 @@ static void ipoib_neigh_destructor(struct neighbour *n)
|
|
|
ipoib_put_ah(ah);
|
|
|
}
|
|
|
|
|
|
+struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour)
|
|
|
+{
|
|
|
+ struct ipoib_neigh *neigh;
|
|
|
+
|
|
|
+ neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
|
|
|
+ if (!neigh)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ neigh->neighbour = neighbour;
|
|
|
+ *to_ipoib_neigh(neighbour) = neigh;
|
|
|
+
|
|
|
+ return neigh;
|
|
|
+}
|
|
|
+
|
|
|
+void ipoib_neigh_free(struct ipoib_neigh *neigh)
|
|
|
+{
|
|
|
+ *to_ipoib_neigh(neigh->neighbour) = NULL;
|
|
|
+ kfree(neigh);
|
|
|
+}
|
|
|
+
|
|
|
static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
|
|
|
{
|
|
|
parms->neigh_destructor = ipoib_neigh_destructor;
|
|
@@ -785,20 +804,19 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
|
|
|
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
|
|
|
|
|
/* Allocate RX/TX "rings" to hold queued skbs */
|
|
|
-
|
|
|
- priv->rx_ring = kzalloc(IPOIB_RX_RING_SIZE * sizeof (struct ipoib_rx_buf),
|
|
|
+ priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
|
|
|
GFP_KERNEL);
|
|
|
if (!priv->rx_ring) {
|
|
|
printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
|
|
|
- ca->name, IPOIB_RX_RING_SIZE);
|
|
|
+ ca->name, ipoib_recvq_size);
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- priv->tx_ring = kzalloc(IPOIB_TX_RING_SIZE * sizeof (struct ipoib_tx_buf),
|
|
|
+ priv->tx_ring = kzalloc(ipoib_sendq_size * sizeof *priv->tx_ring,
|
|
|
GFP_KERNEL);
|
|
|
if (!priv->tx_ring) {
|
|
|
printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
|
|
|
- ca->name, IPOIB_TX_RING_SIZE);
|
|
|
+ ca->name, ipoib_sendq_size);
|
|
|
goto out_rx_ring_cleanup;
|
|
|
}
|
|
|
|
|
@@ -866,7 +884,7 @@ static void ipoib_setup(struct net_device *dev)
|
|
|
dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
|
|
|
dev->addr_len = INFINIBAND_ALEN;
|
|
|
dev->type = ARPHRD_INFINIBAND;
|
|
|
- dev->tx_queue_len = IPOIB_TX_RING_SIZE * 2;
|
|
|
+ dev->tx_queue_len = ipoib_sendq_size * 2;
|
|
|
dev->features = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
|
|
|
|
|
|
/* MTU will be reset when mcast join happens */
|
|
@@ -1118,6 +1136,14 @@ static int __init ipoib_init_module(void)
|
|
|
{
|
|
|
int ret;
|
|
|
|
|
|
+ ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
|
|
|
+ ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
|
|
|
+ ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
|
|
|
+
|
|
|
+ ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
|
|
|
+ ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
|
|
|
+ ipoib_sendq_size = max(ipoib_sendq_size, IPOIB_MIN_QUEUE_SIZE);
|
|
|
+
|
|
|
ret = ipoib_register_debugfs();
|
|
|
if (ret)
|
|
|
return ret;
|