|
@@ -35,9 +35,13 @@
|
|
#include <linux/init.h>
|
|
#include <linux/init.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/errno.h>
|
|
|
|
+#include <linux/netdevice.h>
|
|
|
|
+#include <linux/inetdevice.h>
|
|
|
|
+#include <linux/rtnetlink.h>
|
|
|
|
|
|
#include <rdma/ib_smi.h>
|
|
#include <rdma/ib_smi.h>
|
|
#include <rdma/ib_user_verbs.h>
|
|
#include <rdma/ib_user_verbs.h>
|
|
|
|
+#include <rdma/ib_addr.h>
|
|
|
|
|
|
#include <linux/mlx4/driver.h>
|
|
#include <linux/mlx4/driver.h>
|
|
#include <linux/mlx4/cmd.h>
|
|
#include <linux/mlx4/cmd.h>
|
|
@@ -58,6 +62,15 @@ static const char mlx4_ib_version[] =
|
|
DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
|
|
DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
|
|
DRV_VERSION " (" DRV_RELDATE ")\n";
|
|
DRV_VERSION " (" DRV_RELDATE ")\n";
|
|
|
|
|
|
|
|
+struct update_gid_work {
|
|
|
|
+ struct work_struct work;
|
|
|
|
+ union ib_gid gids[128];
|
|
|
|
+ struct mlx4_ib_dev *dev;
|
|
|
|
+ int port;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static struct workqueue_struct *wq;
|
|
|
|
+
|
|
static void init_query_mad(struct ib_smp *mad)
|
|
static void init_query_mad(struct ib_smp *mad)
|
|
{
|
|
{
|
|
mad->base_version = 1;
|
|
mad->base_version = 1;
|
|
@@ -154,28 +167,19 @@ out:
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
|
|
|
|
-static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
|
|
|
|
- struct ib_port_attr *props)
|
|
|
|
|
|
+static enum rdma_link_layer
|
|
|
|
+mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
|
|
{
|
|
{
|
|
- struct ib_smp *in_mad = NULL;
|
|
|
|
- struct ib_smp *out_mad = NULL;
|
|
|
|
- int err = -ENOMEM;
|
|
|
|
|
|
+ struct mlx4_dev *dev = to_mdev(device)->dev;
|
|
|
|
|
|
- in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
|
|
|
|
- out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
|
|
|
|
- if (!in_mad || !out_mad)
|
|
|
|
- goto out;
|
|
|
|
-
|
|
|
|
- memset(props, 0, sizeof *props);
|
|
|
|
-
|
|
|
|
- init_query_mad(in_mad);
|
|
|
|
- in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
|
|
|
|
- in_mad->attr_mod = cpu_to_be32(port);
|
|
|
|
-
|
|
|
|
- err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
|
|
|
|
- if (err)
|
|
|
|
- goto out;
|
|
|
|
|
|
+ return dev->caps.port_mask & (1 << (port_num - 1)) ?
|
|
|
|
+ IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
|
|
|
|
+}
|
|
|
|
|
|
|
|
+static int ib_link_query_port(struct ib_device *ibdev, u8 port,
|
|
|
|
+ struct ib_port_attr *props,
|
|
|
|
+ struct ib_smp *out_mad)
|
|
|
|
+{
|
|
props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
|
|
props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
|
|
props->lmc = out_mad->data[34] & 0x7;
|
|
props->lmc = out_mad->data[34] & 0x7;
|
|
props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
|
|
props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
|
|
@@ -196,6 +200,80 @@ static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
|
|
props->max_vl_num = out_mad->data[37] >> 4;
|
|
props->max_vl_num = out_mad->data[37] >> 4;
|
|
props->init_type_reply = out_mad->data[41] >> 4;
|
|
props->init_type_reply = out_mad->data[41] >> 4;
|
|
|
|
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static u8 state_to_phys_state(enum ib_port_state state)
|
|
|
|
+{
|
|
|
|
+ return state == IB_PORT_ACTIVE ? 5 : 3;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int eth_link_query_port(struct ib_device *ibdev, u8 port,
|
|
|
|
+ struct ib_port_attr *props,
|
|
|
|
+ struct ib_smp *out_mad)
|
|
|
|
+{
|
|
|
|
+ struct mlx4_ib_iboe *iboe = &to_mdev(ibdev)->iboe;
|
|
|
|
+ struct net_device *ndev;
|
|
|
|
+ enum ib_mtu tmp;
|
|
|
|
+
|
|
|
|
+ props->active_width = IB_WIDTH_4X;
|
|
|
|
+ props->active_speed = 4;
|
|
|
|
+ props->port_cap_flags = IB_PORT_CM_SUP;
|
|
|
|
+ props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
|
|
|
|
+ props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
|
|
|
|
+ props->pkey_tbl_len = 1;
|
|
|
|
+ props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
|
|
|
|
+ props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
|
|
|
|
+ props->max_mtu = IB_MTU_2048;
|
|
|
|
+ props->subnet_timeout = 0;
|
|
|
|
+ props->max_vl_num = out_mad->data[37] >> 4;
|
|
|
|
+ props->init_type_reply = 0;
|
|
|
|
+ props->state = IB_PORT_DOWN;
|
|
|
|
+ props->phys_state = state_to_phys_state(props->state);
|
|
|
|
+ props->active_mtu = IB_MTU_256;
|
|
|
|
+ spin_lock(&iboe->lock);
|
|
|
|
+ ndev = iboe->netdevs[port - 1];
|
|
|
|
+ if (!ndev)
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ tmp = iboe_get_mtu(ndev->mtu);
|
|
|
|
+ props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
|
|
|
|
+
|
|
|
|
+ props->state = netif_running(ndev) && netif_oper_up(ndev) ?
|
|
|
|
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
|
|
|
|
+ props->phys_state = state_to_phys_state(props->state);
|
|
|
|
+
|
|
|
|
+out:
|
|
|
|
+ spin_unlock(&iboe->lock);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
|
|
|
|
+ struct ib_port_attr *props)
|
|
|
|
+{
|
|
|
|
+ struct ib_smp *in_mad = NULL;
|
|
|
|
+ struct ib_smp *out_mad = NULL;
|
|
|
|
+ int err = -ENOMEM;
|
|
|
|
+
|
|
|
|
+ in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
|
|
|
|
+ out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
|
|
|
|
+ if (!in_mad || !out_mad)
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ memset(props, 0, sizeof *props);
|
|
|
|
+
|
|
|
|
+ init_query_mad(in_mad);
|
|
|
|
+ in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
|
|
|
|
+ in_mad->attr_mod = cpu_to_be32(port);
|
|
|
|
+
|
|
|
|
+ err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
|
|
|
|
+ if (err)
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
|
|
|
|
+ ib_link_query_port(ibdev, port, props, out_mad) :
|
|
|
|
+ eth_link_query_port(ibdev, port, props, out_mad);
|
|
|
|
+
|
|
out:
|
|
out:
|
|
kfree(in_mad);
|
|
kfree(in_mad);
|
|
kfree(out_mad);
|
|
kfree(out_mad);
|
|
@@ -203,8 +281,8 @@ out:
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
|
|
|
|
-static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
|
|
|
|
- union ib_gid *gid)
|
|
|
|
|
|
+static int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
|
|
|
|
+ union ib_gid *gid)
|
|
{
|
|
{
|
|
struct ib_smp *in_mad = NULL;
|
|
struct ib_smp *in_mad = NULL;
|
|
struct ib_smp *out_mad = NULL;
|
|
struct ib_smp *out_mad = NULL;
|
|
@@ -241,6 +319,25 @@ out:
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
|
|
|
|
+ union ib_gid *gid)
|
|
|
|
+{
|
|
|
|
+ struct mlx4_ib_dev *dev = to_mdev(ibdev);
|
|
|
|
+
|
|
|
|
+ *gid = dev->iboe.gid_table[port - 1][index];
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
|
|
|
|
+ union ib_gid *gid)
|
|
|
|
+{
|
|
|
|
+ if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
|
|
|
|
+ return __mlx4_ib_query_gid(ibdev, port, index, gid);
|
|
|
|
+ else
|
|
|
|
+ return iboe_query_gid(ibdev, port, index, gid);
|
|
|
|
+}
|
|
|
|
+
|
|
static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
|
|
static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
|
|
u16 *pkey)
|
|
u16 *pkey)
|
|
{
|
|
{
|
|
@@ -289,6 +386,7 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
|
|
{
|
|
{
|
|
struct mlx4_cmd_mailbox *mailbox;
|
|
struct mlx4_cmd_mailbox *mailbox;
|
|
int err;
|
|
int err;
|
|
|
|
+ u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
|
|
|
|
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
|
|
if (IS_ERR(mailbox))
|
|
if (IS_ERR(mailbox))
|
|
@@ -304,7 +402,7 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
|
|
((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
|
|
((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
|
|
}
|
|
}
|
|
|
|
|
|
- err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
|
|
|
|
|
|
+ err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
|
|
MLX4_CMD_TIME_CLASS_B);
|
|
MLX4_CMD_TIME_CLASS_B);
|
|
|
|
|
|
mlx4_free_cmd_mailbox(dev->dev, mailbox);
|
|
mlx4_free_cmd_mailbox(dev->dev, mailbox);
|
|
@@ -447,18 +545,132 @@ static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
|
|
|
|
+{
|
|
|
|
+ struct mlx4_ib_qp *mqp = to_mqp(ibqp);
|
|
|
|
+ struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
|
|
|
|
+ struct mlx4_ib_gid_entry *ge;
|
|
|
|
+
|
|
|
|
+ ge = kzalloc(sizeof *ge, GFP_KERNEL);
|
|
|
|
+ if (!ge)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ ge->gid = *gid;
|
|
|
|
+ if (mlx4_ib_add_mc(mdev, mqp, gid)) {
|
|
|
|
+ ge->port = mqp->port;
|
|
|
|
+ ge->added = 1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ mutex_lock(&mqp->mutex);
|
|
|
|
+ list_add_tail(&ge->list, &mqp->gid_list);
|
|
|
|
+ mutex_unlock(&mqp->mutex);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
|
|
|
|
+ union ib_gid *gid)
|
|
|
|
+{
|
|
|
|
+ u8 mac[6];
|
|
|
|
+ struct net_device *ndev;
|
|
|
|
+ int ret = 0;
|
|
|
|
+
|
|
|
|
+ if (!mqp->port)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ spin_lock(&mdev->iboe.lock);
|
|
|
|
+ ndev = mdev->iboe.netdevs[mqp->port - 1];
|
|
|
|
+ if (ndev)
|
|
|
|
+ dev_hold(ndev);
|
|
|
|
+ spin_unlock(&mdev->iboe.lock);
|
|
|
|
+
|
|
|
|
+ if (ndev) {
|
|
|
|
+ rdma_get_mcast_mac((struct in6_addr *)gid, mac);
|
|
|
|
+ rtnl_lock();
|
|
|
|
+ dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac);
|
|
|
|
+ ret = 1;
|
|
|
|
+ rtnl_unlock();
|
|
|
|
+ dev_put(ndev);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
|
|
static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
|
|
{
|
|
{
|
|
- return mlx4_multicast_attach(to_mdev(ibqp->device)->dev,
|
|
|
|
- &to_mqp(ibqp)->mqp, gid->raw,
|
|
|
|
- !!(to_mqp(ibqp)->flags &
|
|
|
|
- MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK));
|
|
|
|
|
|
+ int err;
|
|
|
|
+ struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
|
|
|
|
+ struct mlx4_ib_qp *mqp = to_mqp(ibqp);
|
|
|
|
+
|
|
|
|
+ err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, !!(mqp->flags &
|
|
|
|
+ MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK));
|
|
|
|
+ if (err)
|
|
|
|
+ return err;
|
|
|
|
+
|
|
|
|
+ err = add_gid_entry(ibqp, gid);
|
|
|
|
+ if (err)
|
|
|
|
+ goto err_add;
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+err_add:
|
|
|
|
+ mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw);
|
|
|
|
+ return err;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
|
|
|
|
+{
|
|
|
|
+ struct mlx4_ib_gid_entry *ge;
|
|
|
|
+ struct mlx4_ib_gid_entry *tmp;
|
|
|
|
+ struct mlx4_ib_gid_entry *ret = NULL;
|
|
|
|
+
|
|
|
|
+ list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
|
|
|
|
+ if (!memcmp(raw, ge->gid.raw, 16)) {
|
|
|
|
+ ret = ge;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
|
|
|
|
static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
|
|
static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
|
|
{
|
|
{
|
|
- return mlx4_multicast_detach(to_mdev(ibqp->device)->dev,
|
|
|
|
- &to_mqp(ibqp)->mqp, gid->raw);
|
|
|
|
|
|
+ int err;
|
|
|
|
+ struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
|
|
|
|
+ struct mlx4_ib_qp *mqp = to_mqp(ibqp);
|
|
|
|
+ u8 mac[6];
|
|
|
|
+ struct net_device *ndev;
|
|
|
|
+ struct mlx4_ib_gid_entry *ge;
|
|
|
|
+
|
|
|
|
+ err = mlx4_multicast_detach(mdev->dev,
|
|
|
|
+ &mqp->mqp, gid->raw);
|
|
|
|
+ if (err)
|
|
|
|
+ return err;
|
|
|
|
+
|
|
|
|
+ mutex_lock(&mqp->mutex);
|
|
|
|
+ ge = find_gid_entry(mqp, gid->raw);
|
|
|
|
+ if (ge) {
|
|
|
|
+ spin_lock(&mdev->iboe.lock);
|
|
|
|
+ ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
|
|
|
|
+ if (ndev)
|
|
|
|
+ dev_hold(ndev);
|
|
|
|
+ spin_unlock(&mdev->iboe.lock);
|
|
|
|
+ rdma_get_mcast_mac((struct in6_addr *)gid, mac);
|
|
|
|
+ if (ndev) {
|
|
|
|
+ rtnl_lock();
|
|
|
|
+ dev_mc_del(mdev->iboe.netdevs[ge->port - 1], mac);
|
|
|
|
+ rtnl_unlock();
|
|
|
|
+ dev_put(ndev);
|
|
|
|
+ }
|
|
|
|
+ list_del(&ge->list);
|
|
|
|
+ kfree(ge);
|
|
|
|
+ } else
|
|
|
|
+ printk(KERN_WARNING "could not find mgid entry\n");
|
|
|
|
+
|
|
|
|
+ mutex_unlock(&mqp->mutex);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
|
|
static int init_node_data(struct mlx4_ib_dev *dev)
|
|
static int init_node_data(struct mlx4_ib_dev *dev)
|
|
@@ -543,15 +755,143 @@ static struct device_attribute *mlx4_class_attributes[] = {
|
|
&dev_attr_board_id
|
|
&dev_attr_board_id
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+static void mlx4_addrconf_ifid_eui48(u8 *eui, struct net_device *dev)
|
|
|
|
+{
|
|
|
|
+ memcpy(eui, dev->dev_addr, 3);
|
|
|
|
+ memcpy(eui + 5, dev->dev_addr + 3, 3);
|
|
|
|
+ eui[3] = 0xFF;
|
|
|
|
+ eui[4] = 0xFE;
|
|
|
|
+ eui[0] ^= 2;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void update_gids_task(struct work_struct *work)
|
|
|
|
+{
|
|
|
|
+ struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
|
|
|
|
+ struct mlx4_cmd_mailbox *mailbox;
|
|
|
|
+ union ib_gid *gids;
|
|
|
|
+ int err;
|
|
|
|
+ struct mlx4_dev *dev = gw->dev->dev;
|
|
|
|
+ struct ib_event event;
|
|
|
|
+
|
|
|
|
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
|
|
|
|
+ if (IS_ERR(mailbox)) {
|
|
|
|
+ printk(KERN_WARNING "update gid table failed %ld\n", PTR_ERR(mailbox));
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ gids = mailbox->buf;
|
|
|
|
+ memcpy(gids, gw->gids, sizeof gw->gids);
|
|
|
|
+
|
|
|
|
+ err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
|
|
|
|
+ 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B);
|
|
|
|
+ if (err)
|
|
|
|
+ printk(KERN_WARNING "set port command failed\n");
|
|
|
|
+ else {
|
|
|
|
+ memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
|
|
|
|
+ event.device = &gw->dev->ib_dev;
|
|
|
|
+ event.element.port_num = gw->port;
|
|
|
|
+ event.event = IB_EVENT_LID_CHANGE;
|
|
|
|
+ ib_dispatch_event(&event);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ mlx4_free_cmd_mailbox(dev, mailbox);
|
|
|
|
+ kfree(gw);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
|
|
|
|
+{
|
|
|
|
+ struct net_device *ndev = dev->iboe.netdevs[port - 1];
|
|
|
|
+ struct update_gid_work *work;
|
|
|
|
+
|
|
|
|
+ work = kzalloc(sizeof *work, GFP_ATOMIC);
|
|
|
|
+ if (!work)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ if (!clear) {
|
|
|
|
+ mlx4_addrconf_ifid_eui48(&work->gids[0].raw[8], ndev);
|
|
|
|
+ work->gids[0].global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ INIT_WORK(&work->work, update_gids_task);
|
|
|
|
+ work->port = port;
|
|
|
|
+ work->dev = dev;
|
|
|
|
+ queue_work(wq, &work->work);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event)
|
|
|
|
+{
|
|
|
|
+ switch (event) {
|
|
|
|
+ case NETDEV_UP:
|
|
|
|
+ update_ipv6_gids(dev, port, 0);
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ case NETDEV_DOWN:
|
|
|
|
+ update_ipv6_gids(dev, port, 1);
|
|
|
|
+ dev->iboe.netdevs[port - 1] = NULL;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void netdev_added(struct mlx4_ib_dev *dev, int port)
|
|
|
|
+{
|
|
|
|
+ update_ipv6_gids(dev, port, 0);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void netdev_removed(struct mlx4_ib_dev *dev, int port)
|
|
|
|
+{
|
|
|
|
+ update_ipv6_gids(dev, port, 1);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event,
|
|
|
|
+ void *ptr)
|
|
|
|
+{
|
|
|
|
+ struct net_device *dev = ptr;
|
|
|
|
+ struct mlx4_ib_dev *ibdev;
|
|
|
|
+ struct net_device *oldnd;
|
|
|
|
+ struct mlx4_ib_iboe *iboe;
|
|
|
|
+ int port;
|
|
|
|
+
|
|
|
|
+ if (!net_eq(dev_net(dev), &init_net))
|
|
|
|
+ return NOTIFY_DONE;
|
|
|
|
+
|
|
|
|
+ ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
|
|
|
|
+ iboe = &ibdev->iboe;
|
|
|
|
+
|
|
|
|
+ spin_lock(&iboe->lock);
|
|
|
|
+ mlx4_foreach_ib_transport_port(port, ibdev->dev) {
|
|
|
|
+ oldnd = iboe->netdevs[port - 1];
|
|
|
|
+ iboe->netdevs[port - 1] =
|
|
|
|
+ mlx4_get_protocol_dev(ibdev->dev, MLX4_PROTOCOL_EN, port);
|
|
|
|
+ if (oldnd != iboe->netdevs[port - 1]) {
|
|
|
|
+ if (iboe->netdevs[port - 1])
|
|
|
|
+ netdev_added(ibdev, port);
|
|
|
|
+ else
|
|
|
|
+ netdev_removed(ibdev, port);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (dev == iboe->netdevs[0])
|
|
|
|
+ handle_en_event(ibdev, 1, event);
|
|
|
|
+ else if (dev == iboe->netdevs[1])
|
|
|
|
+ handle_en_event(ibdev, 2, event);
|
|
|
|
+
|
|
|
|
+ spin_unlock(&iboe->lock);
|
|
|
|
+
|
|
|
|
+ return NOTIFY_DONE;
|
|
|
|
+}
|
|
|
|
+
|
|
static void *mlx4_ib_add(struct mlx4_dev *dev)
|
|
static void *mlx4_ib_add(struct mlx4_dev *dev)
|
|
{
|
|
{
|
|
struct mlx4_ib_dev *ibdev;
|
|
struct mlx4_ib_dev *ibdev;
|
|
int num_ports = 0;
|
|
int num_ports = 0;
|
|
int i;
|
|
int i;
|
|
|
|
+ int err;
|
|
|
|
+ struct mlx4_ib_iboe *iboe;
|
|
|
|
|
|
printk_once(KERN_INFO "%s", mlx4_ib_version);
|
|
printk_once(KERN_INFO "%s", mlx4_ib_version);
|
|
|
|
|
|
- mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
|
|
|
|
|
|
+ mlx4_foreach_ib_transport_port(i, dev)
|
|
num_ports++;
|
|
num_ports++;
|
|
|
|
|
|
/* No point in registering a device with no ports... */
|
|
/* No point in registering a device with no ports... */
|
|
@@ -564,6 +904,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ iboe = &ibdev->iboe;
|
|
|
|
+
|
|
if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
|
|
if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
|
|
goto err_dealloc;
|
|
goto err_dealloc;
|
|
|
|
|
|
@@ -612,6 +954,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
|
|
|
|
|
ibdev->ib_dev.query_device = mlx4_ib_query_device;
|
|
ibdev->ib_dev.query_device = mlx4_ib_query_device;
|
|
ibdev->ib_dev.query_port = mlx4_ib_query_port;
|
|
ibdev->ib_dev.query_port = mlx4_ib_query_port;
|
|
|
|
+ ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
|
|
ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
|
|
ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
|
|
ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
|
|
ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
|
|
ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
|
|
ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
|
|
@@ -656,6 +999,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
|
ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
|
|
ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
|
|
ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
|
|
ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
|
|
|
|
|
|
|
|
+ spin_lock_init(&iboe->lock);
|
|
|
|
+
|
|
if (init_node_data(ibdev))
|
|
if (init_node_data(ibdev))
|
|
goto err_map;
|
|
goto err_map;
|
|
|
|
|
|
@@ -668,16 +1013,28 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
|
if (mlx4_ib_mad_init(ibdev))
|
|
if (mlx4_ib_mad_init(ibdev))
|
|
goto err_reg;
|
|
goto err_reg;
|
|
|
|
|
|
|
|
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) {
|
|
|
|
+ iboe->nb.notifier_call = mlx4_ib_netdev_event;
|
|
|
|
+ err = register_netdevice_notifier(&iboe->nb);
|
|
|
|
+ if (err)
|
|
|
|
+ goto err_reg;
|
|
|
|
+ }
|
|
|
|
+
|
|
for (i = 0; i < ARRAY_SIZE(mlx4_class_attributes); ++i) {
|
|
for (i = 0; i < ARRAY_SIZE(mlx4_class_attributes); ++i) {
|
|
if (device_create_file(&ibdev->ib_dev.dev,
|
|
if (device_create_file(&ibdev->ib_dev.dev,
|
|
mlx4_class_attributes[i]))
|
|
mlx4_class_attributes[i]))
|
|
- goto err_reg;
|
|
|
|
|
|
+ goto err_notif;
|
|
}
|
|
}
|
|
|
|
|
|
ibdev->ib_active = true;
|
|
ibdev->ib_active = true;
|
|
|
|
|
|
return ibdev;
|
|
return ibdev;
|
|
|
|
|
|
|
|
+err_notif:
|
|
|
|
+ if (unregister_netdevice_notifier(&ibdev->iboe.nb))
|
|
|
|
+ printk(KERN_WARNING "failure unregistering notifier\n");
|
|
|
|
+ flush_workqueue(wq);
|
|
|
|
+
|
|
err_reg:
|
|
err_reg:
|
|
ib_unregister_device(&ibdev->ib_dev);
|
|
ib_unregister_device(&ibdev->ib_dev);
|
|
|
|
|
|
@@ -703,11 +1060,16 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
|
|
|
|
|
|
mlx4_ib_mad_cleanup(ibdev);
|
|
mlx4_ib_mad_cleanup(ibdev);
|
|
ib_unregister_device(&ibdev->ib_dev);
|
|
ib_unregister_device(&ibdev->ib_dev);
|
|
|
|
+ if (ibdev->iboe.nb.notifier_call) {
|
|
|
|
+ if (unregister_netdevice_notifier(&ibdev->iboe.nb))
|
|
|
|
+ printk(KERN_WARNING "failure unregistering notifier\n");
|
|
|
|
+ ibdev->iboe.nb.notifier_call = NULL;
|
|
|
|
+ }
|
|
|
|
+ iounmap(ibdev->uar_map);
|
|
|
|
|
|
- for (p = 1; p <= ibdev->num_ports; ++p)
|
|
|
|
|
|
+ mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
|
|
mlx4_CLOSE_PORT(dev, p);
|
|
mlx4_CLOSE_PORT(dev, p);
|
|
|
|
|
|
- iounmap(ibdev->uar_map);
|
|
|
|
mlx4_uar_free(dev, &ibdev->priv_uar);
|
|
mlx4_uar_free(dev, &ibdev->priv_uar);
|
|
mlx4_pd_free(dev, ibdev->priv_pdn);
|
|
mlx4_pd_free(dev, ibdev->priv_pdn);
|
|
ib_dealloc_device(&ibdev->ib_dev);
|
|
ib_dealloc_device(&ibdev->ib_dev);
|
|
@@ -747,19 +1109,33 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
|
|
}
|
|
}
|
|
|
|
|
|
static struct mlx4_interface mlx4_ib_interface = {
|
|
static struct mlx4_interface mlx4_ib_interface = {
|
|
- .add = mlx4_ib_add,
|
|
|
|
- .remove = mlx4_ib_remove,
|
|
|
|
- .event = mlx4_ib_event
|
|
|
|
|
|
+ .add = mlx4_ib_add,
|
|
|
|
+ .remove = mlx4_ib_remove,
|
|
|
|
+ .event = mlx4_ib_event,
|
|
|
|
+ .protocol = MLX4_PROTOCOL_IB
|
|
};
|
|
};
|
|
|
|
|
|
static int __init mlx4_ib_init(void)
|
|
static int __init mlx4_ib_init(void)
|
|
{
|
|
{
|
|
- return mlx4_register_interface(&mlx4_ib_interface);
|
|
|
|
|
|
+ int err;
|
|
|
|
+
|
|
|
|
+ wq = create_singlethread_workqueue("mlx4_ib");
|
|
|
|
+ if (!wq)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ err = mlx4_register_interface(&mlx4_ib_interface);
|
|
|
|
+ if (err) {
|
|
|
|
+ destroy_workqueue(wq);
|
|
|
|
+ return err;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
|
|
static void __exit mlx4_ib_cleanup(void)
|
|
static void __exit mlx4_ib_cleanup(void)
|
|
{
|
|
{
|
|
mlx4_unregister_interface(&mlx4_ib_interface);
|
|
mlx4_unregister_interface(&mlx4_ib_interface);
|
|
|
|
+ destroy_workqueue(wq);
|
|
}
|
|
}
|
|
|
|
|
|
module_init(mlx4_ib_init);
|
|
module_init(mlx4_ib_init);
|