|
@@ -47,6 +47,7 @@
|
|
|
|
|
|
#include <rdma/rdma_cm.h>
|
|
|
#include <rdma/rdma_cm_ib.h>
|
|
|
+#include <rdma/rdma_netlink.h>
|
|
|
#include <rdma/ib_cache.h>
|
|
|
#include <rdma/ib_cm.h>
|
|
|
#include <rdma/ib_sa.h>
|
|
@@ -89,20 +90,6 @@ struct cma_device {
|
|
|
struct list_head id_list;
|
|
|
};
|
|
|
|
|
|
-enum cma_state {
|
|
|
- CMA_IDLE,
|
|
|
- CMA_ADDR_QUERY,
|
|
|
- CMA_ADDR_RESOLVED,
|
|
|
- CMA_ROUTE_QUERY,
|
|
|
- CMA_ROUTE_RESOLVED,
|
|
|
- CMA_CONNECT,
|
|
|
- CMA_DISCONNECT,
|
|
|
- CMA_ADDR_BOUND,
|
|
|
- CMA_LISTEN,
|
|
|
- CMA_DEVICE_REMOVAL,
|
|
|
- CMA_DESTROYING
|
|
|
-};
|
|
|
-
|
|
|
struct rdma_bind_list {
|
|
|
struct idr *ps;
|
|
|
struct hlist_head owners;
|
|
@@ -126,7 +113,7 @@ struct rdma_id_private {
|
|
|
struct list_head mc_list;
|
|
|
|
|
|
int internal_id;
|
|
|
- enum cma_state state;
|
|
|
+ enum rdma_cm_state state;
|
|
|
spinlock_t lock;
|
|
|
struct mutex qp_mutex;
|
|
|
|
|
@@ -146,6 +133,7 @@ struct rdma_id_private {
|
|
|
u32 seq_num;
|
|
|
u32 qkey;
|
|
|
u32 qp_num;
|
|
|
+ pid_t owner;
|
|
|
u8 srq;
|
|
|
u8 tos;
|
|
|
u8 reuseaddr;
|
|
@@ -165,8 +153,8 @@ struct cma_multicast {
|
|
|
struct cma_work {
|
|
|
struct work_struct work;
|
|
|
struct rdma_id_private *id;
|
|
|
- enum cma_state old_state;
|
|
|
- enum cma_state new_state;
|
|
|
+ enum rdma_cm_state old_state;
|
|
|
+ enum rdma_cm_state new_state;
|
|
|
struct rdma_cm_event event;
|
|
|
};
|
|
|
|
|
@@ -217,7 +205,7 @@ struct sdp_hah {
|
|
|
#define CMA_VERSION 0x00
|
|
|
#define SDP_MAJ_VERSION 0x2
|
|
|
|
|
|
-static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp)
|
|
|
+static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
int ret;
|
|
@@ -229,7 +217,7 @@ static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp)
|
|
|
}
|
|
|
|
|
|
static int cma_comp_exch(struct rdma_id_private *id_priv,
|
|
|
- enum cma_state comp, enum cma_state exch)
|
|
|
+ enum rdma_cm_state comp, enum rdma_cm_state exch)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
int ret;
|
|
@@ -241,11 +229,11 @@ static int cma_comp_exch(struct rdma_id_private *id_priv,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static enum cma_state cma_exch(struct rdma_id_private *id_priv,
|
|
|
- enum cma_state exch)
|
|
|
+static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv,
|
|
|
+ enum rdma_cm_state exch)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
- enum cma_state old;
|
|
|
+ enum rdma_cm_state old;
|
|
|
|
|
|
spin_lock_irqsave(&id_priv->lock, flags);
|
|
|
old = id_priv->state;
|
|
@@ -279,11 +267,6 @@ static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver)
|
|
|
hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF);
|
|
|
}
|
|
|
|
|
|
-static inline int cma_is_ud_ps(enum rdma_port_space ps)
|
|
|
-{
|
|
|
- return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB);
|
|
|
-}
|
|
|
-
|
|
|
static void cma_attach_to_dev(struct rdma_id_private *id_priv,
|
|
|
struct cma_device *cma_dev)
|
|
|
{
|
|
@@ -413,7 +396,7 @@ static void cma_deref_id(struct rdma_id_private *id_priv)
|
|
|
}
|
|
|
|
|
|
static int cma_disable_callback(struct rdma_id_private *id_priv,
|
|
|
- enum cma_state state)
|
|
|
+ enum rdma_cm_state state)
|
|
|
{
|
|
|
mutex_lock(&id_priv->handler_mutex);
|
|
|
if (id_priv->state != state) {
|
|
@@ -429,7 +412,8 @@ static int cma_has_cm_dev(struct rdma_id_private *id_priv)
|
|
|
}
|
|
|
|
|
|
struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
|
|
|
- void *context, enum rdma_port_space ps)
|
|
|
+ void *context, enum rdma_port_space ps,
|
|
|
+ enum ib_qp_type qp_type)
|
|
|
{
|
|
|
struct rdma_id_private *id_priv;
|
|
|
|
|
@@ -437,10 +421,12 @@ struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
|
|
|
if (!id_priv)
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
- id_priv->state = CMA_IDLE;
|
|
|
+ id_priv->owner = task_pid_nr(current);
|
|
|
+ id_priv->state = RDMA_CM_IDLE;
|
|
|
id_priv->id.context = context;
|
|
|
id_priv->id.event_handler = event_handler;
|
|
|
id_priv->id.ps = ps;
|
|
|
+ id_priv->id.qp_type = qp_type;
|
|
|
spin_lock_init(&id_priv->lock);
|
|
|
mutex_init(&id_priv->qp_mutex);
|
|
|
init_completion(&id_priv->comp);
|
|
@@ -508,7 +494,7 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
|
|
|
if (IS_ERR(qp))
|
|
|
return PTR_ERR(qp);
|
|
|
|
|
|
- if (cma_is_ud_ps(id_priv->id.ps))
|
|
|
+ if (id->qp_type == IB_QPT_UD)
|
|
|
ret = cma_init_ud_qp(id_priv, qp);
|
|
|
else
|
|
|
ret = cma_init_conn_qp(id_priv, qp);
|
|
@@ -636,7 +622,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
|
|
|
qp_attr->port_num = id_priv->id.port_num;
|
|
|
*qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
|
|
|
|
|
|
- if (cma_is_ud_ps(id_priv->id.ps)) {
|
|
|
+ if (id_priv->id.qp_type == IB_QPT_UD) {
|
|
|
ret = cma_set_qkey(id_priv);
|
|
|
if (ret)
|
|
|
return ret;
|
|
@@ -659,7 +645,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
|
|
|
id_priv = container_of(id, struct rdma_id_private, id);
|
|
|
switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
|
|
|
case RDMA_TRANSPORT_IB:
|
|
|
- if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps))
|
|
|
+ if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
|
|
|
ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
|
|
|
else
|
|
|
ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
|
|
@@ -858,16 +844,16 @@ static void cma_cancel_listens(struct rdma_id_private *id_priv)
|
|
|
}
|
|
|
|
|
|
static void cma_cancel_operation(struct rdma_id_private *id_priv,
|
|
|
- enum cma_state state)
|
|
|
+ enum rdma_cm_state state)
|
|
|
{
|
|
|
switch (state) {
|
|
|
- case CMA_ADDR_QUERY:
|
|
|
+ case RDMA_CM_ADDR_QUERY:
|
|
|
rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
|
|
|
break;
|
|
|
- case CMA_ROUTE_QUERY:
|
|
|
+ case RDMA_CM_ROUTE_QUERY:
|
|
|
cma_cancel_route(id_priv);
|
|
|
break;
|
|
|
- case CMA_LISTEN:
|
|
|
+ case RDMA_CM_LISTEN:
|
|
|
if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)
|
|
|
&& !id_priv->cma_dev)
|
|
|
cma_cancel_listens(id_priv);
|
|
@@ -918,10 +904,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
|
|
|
void rdma_destroy_id(struct rdma_cm_id *id)
|
|
|
{
|
|
|
struct rdma_id_private *id_priv;
|
|
|
- enum cma_state state;
|
|
|
+ enum rdma_cm_state state;
|
|
|
|
|
|
id_priv = container_of(id, struct rdma_id_private, id);
|
|
|
- state = cma_exch(id_priv, CMA_DESTROYING);
|
|
|
+ state = cma_exch(id_priv, RDMA_CM_DESTROYING);
|
|
|
cma_cancel_operation(id_priv, state);
|
|
|
|
|
|
/*
|
|
@@ -1015,9 +1001,9 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
|
|
int ret = 0;
|
|
|
|
|
|
if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
|
|
|
- cma_disable_callback(id_priv, CMA_CONNECT)) ||
|
|
|
+ cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
|
|
|
(ib_event->event == IB_CM_TIMEWAIT_EXIT &&
|
|
|
- cma_disable_callback(id_priv, CMA_DISCONNECT)))
|
|
|
+ cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
|
|
|
return 0;
|
|
|
|
|
|
memset(&event, 0, sizeof event);
|
|
@@ -1048,7 +1034,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
|
|
event.status = -ETIMEDOUT; /* fall through */
|
|
|
case IB_CM_DREQ_RECEIVED:
|
|
|
case IB_CM_DREP_RECEIVED:
|
|
|
- if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT))
|
|
|
+ if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
|
|
|
+ RDMA_CM_DISCONNECT))
|
|
|
goto out;
|
|
|
event.event = RDMA_CM_EVENT_DISCONNECTED;
|
|
|
break;
|
|
@@ -1075,7 +1062,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
|
|
if (ret) {
|
|
|
/* Destroy the CM ID by returning a non-zero value. */
|
|
|
id_priv->cm_id.ib = NULL;
|
|
|
- cma_exch(id_priv, CMA_DESTROYING);
|
|
|
+ cma_exch(id_priv, RDMA_CM_DESTROYING);
|
|
|
mutex_unlock(&id_priv->handler_mutex);
|
|
|
rdma_destroy_id(&id_priv->id);
|
|
|
return ret;
|
|
@@ -1101,7 +1088,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
|
|
|
goto err;
|
|
|
|
|
|
id = rdma_create_id(listen_id->event_handler, listen_id->context,
|
|
|
- listen_id->ps);
|
|
|
+ listen_id->ps, ib_event->param.req_rcvd.qp_type);
|
|
|
if (IS_ERR(id))
|
|
|
goto err;
|
|
|
|
|
@@ -1132,7 +1119,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
|
|
|
rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
|
|
|
|
|
|
id_priv = container_of(id, struct rdma_id_private, id);
|
|
|
- id_priv->state = CMA_CONNECT;
|
|
|
+ id_priv->state = RDMA_CM_CONNECT;
|
|
|
return id_priv;
|
|
|
|
|
|
destroy_id:
|
|
@@ -1152,7 +1139,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
|
|
|
int ret;
|
|
|
|
|
|
id = rdma_create_id(listen_id->event_handler, listen_id->context,
|
|
|
- listen_id->ps);
|
|
|
+ listen_id->ps, IB_QPT_UD);
|
|
|
if (IS_ERR(id))
|
|
|
return NULL;
|
|
|
|
|
@@ -1172,7 +1159,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
|
|
|
}
|
|
|
|
|
|
id_priv = container_of(id, struct rdma_id_private, id);
|
|
|
- id_priv->state = CMA_CONNECT;
|
|
|
+ id_priv->state = RDMA_CM_CONNECT;
|
|
|
return id_priv;
|
|
|
err:
|
|
|
rdma_destroy_id(id);
|
|
@@ -1201,13 +1188,13 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
|
|
int offset, ret;
|
|
|
|
|
|
listen_id = cm_id->context;
|
|
|
- if (cma_disable_callback(listen_id, CMA_LISTEN))
|
|
|
+ if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
|
|
|
return -ECONNABORTED;
|
|
|
|
|
|
memset(&event, 0, sizeof event);
|
|
|
offset = cma_user_data_offset(listen_id->id.ps);
|
|
|
event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
|
|
|
- if (cma_is_ud_ps(listen_id->id.ps)) {
|
|
|
+ if (listen_id->id.qp_type == IB_QPT_UD) {
|
|
|
conn_id = cma_new_udp_id(&listen_id->id, ib_event);
|
|
|
event.param.ud.private_data = ib_event->private_data + offset;
|
|
|
event.param.ud.private_data_len =
|
|
@@ -1243,8 +1230,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
|
|
* while we're accessing the cm_id.
|
|
|
*/
|
|
|
mutex_lock(&lock);
|
|
|
- if (cma_comp(conn_id, CMA_CONNECT) &&
|
|
|
- !cma_is_ud_ps(conn_id->id.ps))
|
|
|
+ if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD))
|
|
|
ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
|
|
|
mutex_unlock(&lock);
|
|
|
mutex_unlock(&conn_id->handler_mutex);
|
|
@@ -1257,7 +1243,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
|
|
conn_id->cm_id.ib = NULL;
|
|
|
|
|
|
release_conn_id:
|
|
|
- cma_exch(conn_id, CMA_DESTROYING);
|
|
|
+ cma_exch(conn_id, RDMA_CM_DESTROYING);
|
|
|
mutex_unlock(&conn_id->handler_mutex);
|
|
|
rdma_destroy_id(&conn_id->id);
|
|
|
|
|
@@ -1328,7 +1314,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
|
|
|
struct sockaddr_in *sin;
|
|
|
int ret = 0;
|
|
|
|
|
|
- if (cma_disable_callback(id_priv, CMA_CONNECT))
|
|
|
+ if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
|
|
|
return 0;
|
|
|
|
|
|
memset(&event, 0, sizeof event);
|
|
@@ -1371,7 +1357,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
|
|
|
if (ret) {
|
|
|
/* Destroy the CM ID by returning a non-zero value. */
|
|
|
id_priv->cm_id.iw = NULL;
|
|
|
- cma_exch(id_priv, CMA_DESTROYING);
|
|
|
+ cma_exch(id_priv, RDMA_CM_DESTROYING);
|
|
|
mutex_unlock(&id_priv->handler_mutex);
|
|
|
rdma_destroy_id(&id_priv->id);
|
|
|
return ret;
|
|
@@ -1393,20 +1379,20 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
|
|
|
struct ib_device_attr attr;
|
|
|
|
|
|
listen_id = cm_id->context;
|
|
|
- if (cma_disable_callback(listen_id, CMA_LISTEN))
|
|
|
+ if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
|
|
|
return -ECONNABORTED;
|
|
|
|
|
|
/* Create a new RDMA id for the new IW CM ID */
|
|
|
new_cm_id = rdma_create_id(listen_id->id.event_handler,
|
|
|
listen_id->id.context,
|
|
|
- RDMA_PS_TCP);
|
|
|
+ RDMA_PS_TCP, IB_QPT_RC);
|
|
|
if (IS_ERR(new_cm_id)) {
|
|
|
ret = -ENOMEM;
|
|
|
goto out;
|
|
|
}
|
|
|
conn_id = container_of(new_cm_id, struct rdma_id_private, id);
|
|
|
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
|
|
|
- conn_id->state = CMA_CONNECT;
|
|
|
+ conn_id->state = RDMA_CM_CONNECT;
|
|
|
|
|
|
dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr);
|
|
|
if (!dev) {
|
|
@@ -1461,7 +1447,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
|
|
|
if (ret) {
|
|
|
/* User wants to destroy the CM ID */
|
|
|
conn_id->cm_id.iw = NULL;
|
|
|
- cma_exch(conn_id, CMA_DESTROYING);
|
|
|
+ cma_exch(conn_id, RDMA_CM_DESTROYING);
|
|
|
mutex_unlock(&conn_id->handler_mutex);
|
|
|
cma_deref_id(conn_id);
|
|
|
rdma_destroy_id(&conn_id->id);
|
|
@@ -1548,13 +1534,14 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
|
|
|
struct rdma_cm_id *id;
|
|
|
int ret;
|
|
|
|
|
|
- id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps);
|
|
|
+ id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
|
|
|
+ id_priv->id.qp_type);
|
|
|
if (IS_ERR(id))
|
|
|
return;
|
|
|
|
|
|
dev_id_priv = container_of(id, struct rdma_id_private, id);
|
|
|
|
|
|
- dev_id_priv->state = CMA_ADDR_BOUND;
|
|
|
+ dev_id_priv->state = RDMA_CM_ADDR_BOUND;
|
|
|
memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
|
|
|
ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr));
|
|
|
|
|
@@ -1601,8 +1588,8 @@ static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
|
|
|
route->num_paths = 1;
|
|
|
*route->path_rec = *path_rec;
|
|
|
} else {
|
|
|
- work->old_state = CMA_ROUTE_QUERY;
|
|
|
- work->new_state = CMA_ADDR_RESOLVED;
|
|
|
+ work->old_state = RDMA_CM_ROUTE_QUERY;
|
|
|
+ work->new_state = RDMA_CM_ADDR_RESOLVED;
|
|
|
work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
|
|
|
work->event.status = status;
|
|
|
}
|
|
@@ -1660,7 +1647,7 @@ static void cma_work_handler(struct work_struct *_work)
|
|
|
goto out;
|
|
|
|
|
|
if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
|
|
|
- cma_exch(id_priv, CMA_DESTROYING);
|
|
|
+ cma_exch(id_priv, RDMA_CM_DESTROYING);
|
|
|
destroy = 1;
|
|
|
}
|
|
|
out:
|
|
@@ -1678,12 +1665,12 @@ static void cma_ndev_work_handler(struct work_struct *_work)
|
|
|
int destroy = 0;
|
|
|
|
|
|
mutex_lock(&id_priv->handler_mutex);
|
|
|
- if (id_priv->state == CMA_DESTROYING ||
|
|
|
- id_priv->state == CMA_DEVICE_REMOVAL)
|
|
|
+ if (id_priv->state == RDMA_CM_DESTROYING ||
|
|
|
+ id_priv->state == RDMA_CM_DEVICE_REMOVAL)
|
|
|
goto out;
|
|
|
|
|
|
if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
|
|
|
- cma_exch(id_priv, CMA_DESTROYING);
|
|
|
+ cma_exch(id_priv, RDMA_CM_DESTROYING);
|
|
|
destroy = 1;
|
|
|
}
|
|
|
|
|
@@ -1707,8 +1694,8 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
|
|
|
|
|
|
work->id = id_priv;
|
|
|
INIT_WORK(&work->work, cma_work_handler);
|
|
|
- work->old_state = CMA_ROUTE_QUERY;
|
|
|
- work->new_state = CMA_ROUTE_RESOLVED;
|
|
|
+ work->old_state = RDMA_CM_ROUTE_QUERY;
|
|
|
+ work->new_state = RDMA_CM_ROUTE_RESOLVED;
|
|
|
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
|
|
|
|
|
|
route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
|
|
@@ -1737,7 +1724,8 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
|
|
|
int ret;
|
|
|
|
|
|
id_priv = container_of(id, struct rdma_id_private, id);
|
|
|
- if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))
|
|
|
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
|
|
|
+ RDMA_CM_ROUTE_RESOLVED))
|
|
|
return -EINVAL;
|
|
|
|
|
|
id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
|
|
@@ -1750,7 +1738,7 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
|
|
|
id->route.num_paths = num_paths;
|
|
|
return 0;
|
|
|
err:
|
|
|
- cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED);
|
|
|
+ cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
|
|
|
return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL(rdma_set_ib_paths);
|
|
@@ -1765,8 +1753,8 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
|
|
|
|
|
|
work->id = id_priv;
|
|
|
INIT_WORK(&work->work, cma_work_handler);
|
|
|
- work->old_state = CMA_ROUTE_QUERY;
|
|
|
- work->new_state = CMA_ROUTE_RESOLVED;
|
|
|
+ work->old_state = RDMA_CM_ROUTE_QUERY;
|
|
|
+ work->new_state = RDMA_CM_ROUTE_RESOLVED;
|
|
|
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
|
|
|
queue_work(cma_wq, &work->work);
|
|
|
return 0;
|
|
@@ -1830,8 +1818,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
|
|
|
goto err2;
|
|
|
}
|
|
|
|
|
|
- work->old_state = CMA_ROUTE_QUERY;
|
|
|
- work->new_state = CMA_ROUTE_RESOLVED;
|
|
|
+ work->old_state = RDMA_CM_ROUTE_QUERY;
|
|
|
+ work->new_state = RDMA_CM_ROUTE_RESOLVED;
|
|
|
work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
|
|
|
work->event.status = 0;
|
|
|
|
|
@@ -1853,7 +1841,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
|
|
|
int ret;
|
|
|
|
|
|
id_priv = container_of(id, struct rdma_id_private, id);
|
|
|
- if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY))
|
|
|
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
|
|
|
return -EINVAL;
|
|
|
|
|
|
atomic_inc(&id_priv->refcount);
|
|
@@ -1882,7 +1870,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
|
|
|
|
|
|
return 0;
|
|
|
err:
|
|
|
- cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED);
|
|
|
+ cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
|
|
|
cma_deref_id(id_priv);
|
|
|
return ret;
|
|
|
}
|
|
@@ -1941,14 +1929,16 @@ static void addr_handler(int status, struct sockaddr *src_addr,
|
|
|
|
|
|
memset(&event, 0, sizeof event);
|
|
|
mutex_lock(&id_priv->handler_mutex);
|
|
|
- if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED))
|
|
|
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
|
|
|
+ RDMA_CM_ADDR_RESOLVED))
|
|
|
goto out;
|
|
|
|
|
|
if (!status && !id_priv->cma_dev)
|
|
|
status = cma_acquire_dev(id_priv);
|
|
|
|
|
|
if (status) {
|
|
|
- if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
|
|
|
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
|
|
|
+ RDMA_CM_ADDR_BOUND))
|
|
|
goto out;
|
|
|
event.event = RDMA_CM_EVENT_ADDR_ERROR;
|
|
|
event.status = status;
|
|
@@ -1959,7 +1949,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
|
|
|
}
|
|
|
|
|
|
if (id_priv->id.event_handler(&id_priv->id, &event)) {
|
|
|
- cma_exch(id_priv, CMA_DESTROYING);
|
|
|
+ cma_exch(id_priv, RDMA_CM_DESTROYING);
|
|
|
mutex_unlock(&id_priv->handler_mutex);
|
|
|
cma_deref_id(id_priv);
|
|
|
rdma_destroy_id(&id_priv->id);
|
|
@@ -2004,8 +1994,8 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
|
|
|
|
|
|
work->id = id_priv;
|
|
|
INIT_WORK(&work->work, cma_work_handler);
|
|
|
- work->old_state = CMA_ADDR_QUERY;
|
|
|
- work->new_state = CMA_ADDR_RESOLVED;
|
|
|
+ work->old_state = RDMA_CM_ADDR_QUERY;
|
|
|
+ work->new_state = RDMA_CM_ADDR_RESOLVED;
|
|
|
work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
|
|
|
queue_work(cma_wq, &work->work);
|
|
|
return 0;
|
|
@@ -2034,13 +2024,13 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
|
|
|
int ret;
|
|
|
|
|
|
id_priv = container_of(id, struct rdma_id_private, id);
|
|
|
- if (id_priv->state == CMA_IDLE) {
|
|
|
+ if (id_priv->state == RDMA_CM_IDLE) {
|
|
|
ret = cma_bind_addr(id, src_addr, dst_addr);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY))
|
|
|
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
|
|
|
return -EINVAL;
|
|
|
|
|
|
atomic_inc(&id_priv->refcount);
|
|
@@ -2056,7 +2046,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
|
|
|
|
|
|
return 0;
|
|
|
err:
|
|
|
- cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND);
|
|
|
+ cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
|
|
|
cma_deref_id(id_priv);
|
|
|
return ret;
|
|
|
}
|
|
@@ -2070,7 +2060,7 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
|
|
|
|
|
|
id_priv = container_of(id, struct rdma_id_private, id);
|
|
|
spin_lock_irqsave(&id_priv->lock, flags);
|
|
|
- if (id_priv->state == CMA_IDLE) {
|
|
|
+ if (id_priv->state == RDMA_CM_IDLE) {
|
|
|
id_priv->reuseaddr = reuse;
|
|
|
ret = 0;
|
|
|
} else {
|
|
@@ -2177,7 +2167,7 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
|
|
|
if (id_priv == cur_id)
|
|
|
continue;
|
|
|
|
|
|
- if ((cur_id->state == CMA_LISTEN) ||
|
|
|
+ if ((cur_id->state == RDMA_CM_LISTEN) ||
|
|
|
!reuseaddr || !cur_id->reuseaddr) {
|
|
|
cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr;
|
|
|
if (cma_any_addr(cur_addr))
|
|
@@ -2280,14 +2270,14 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
|
|
|
int ret;
|
|
|
|
|
|
id_priv = container_of(id, struct rdma_id_private, id);
|
|
|
- if (id_priv->state == CMA_IDLE) {
|
|
|
+ if (id_priv->state == RDMA_CM_IDLE) {
|
|
|
((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
|
|
|
ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))
|
|
|
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))
|
|
|
return -EINVAL;
|
|
|
|
|
|
if (id_priv->reuseaddr) {
|
|
@@ -2319,7 +2309,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
|
|
|
return 0;
|
|
|
err:
|
|
|
id_priv->backlog = 0;
|
|
|
- cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND);
|
|
|
+ cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
|
|
|
return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL(rdma_listen);
|
|
@@ -2333,7 +2323,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
|
|
|
return -EAFNOSUPPORT;
|
|
|
|
|
|
id_priv = container_of(id, struct rdma_id_private, id);
|
|
|
- if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND))
|
|
|
+ if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
|
|
|
return -EINVAL;
|
|
|
|
|
|
ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
|
|
@@ -2360,7 +2350,7 @@ err2:
|
|
|
if (id_priv->cma_dev)
|
|
|
cma_release_dev(id_priv);
|
|
|
err1:
|
|
|
- cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE);
|
|
|
+ cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
|
|
|
return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL(rdma_bind_addr);
|
|
@@ -2433,7 +2423,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
|
|
|
struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
|
|
|
int ret = 0;
|
|
|
|
|
|
- if (cma_disable_callback(id_priv, CMA_CONNECT))
|
|
|
+ if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
|
|
|
return 0;
|
|
|
|
|
|
memset(&event, 0, sizeof event);
|
|
@@ -2479,7 +2469,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
|
|
|
if (ret) {
|
|
|
/* Destroy the CM ID by returning a non-zero value. */
|
|
|
id_priv->cm_id.ib = NULL;
|
|
|
- cma_exch(id_priv, CMA_DESTROYING);
|
|
|
+ cma_exch(id_priv, RDMA_CM_DESTROYING);
|
|
|
mutex_unlock(&id_priv->handler_mutex);
|
|
|
rdma_destroy_id(&id_priv->id);
|
|
|
return ret;
|
|
@@ -2645,7 +2635,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
|
|
|
int ret;
|
|
|
|
|
|
id_priv = container_of(id, struct rdma_id_private, id);
|
|
|
- if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT))
|
|
|
+ if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
|
|
|
return -EINVAL;
|
|
|
|
|
|
if (!id->qp) {
|
|
@@ -2655,7 +2645,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
|
|
|
|
|
|
switch (rdma_node_get_transport(id->device->node_type)) {
|
|
|
case RDMA_TRANSPORT_IB:
|
|
|
- if (cma_is_ud_ps(id->ps))
|
|
|
+ if (id->qp_type == IB_QPT_UD)
|
|
|
ret = cma_resolve_ib_udp(id_priv, conn_param);
|
|
|
else
|
|
|
ret = cma_connect_ib(id_priv, conn_param);
|
|
@@ -2672,7 +2662,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
|
|
|
|
|
|
return 0;
|
|
|
err:
|
|
|
- cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED);
|
|
|
+ cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
|
|
|
return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL(rdma_connect);
|
|
@@ -2758,7 +2748,10 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
|
|
|
int ret;
|
|
|
|
|
|
id_priv = container_of(id, struct rdma_id_private, id);
|
|
|
- if (!cma_comp(id_priv, CMA_CONNECT))
|
|
|
+
|
|
|
+ id_priv->owner = task_pid_nr(current);
|
|
|
+
|
|
|
+ if (!cma_comp(id_priv, RDMA_CM_CONNECT))
|
|
|
return -EINVAL;
|
|
|
|
|
|
if (!id->qp && conn_param) {
|
|
@@ -2768,7 +2761,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
|
|
|
|
|
|
switch (rdma_node_get_transport(id->device->node_type)) {
|
|
|
case RDMA_TRANSPORT_IB:
|
|
|
- if (cma_is_ud_ps(id->ps))
|
|
|
+ if (id->qp_type == IB_QPT_UD)
|
|
|
ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
|
|
|
conn_param->private_data,
|
|
|
conn_param->private_data_len);
|
|
@@ -2829,7 +2822,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
|
|
|
|
|
|
switch (rdma_node_get_transport(id->device->node_type)) {
|
|
|
case RDMA_TRANSPORT_IB:
|
|
|
- if (cma_is_ud_ps(id->ps))
|
|
|
+ if (id->qp_type == IB_QPT_UD)
|
|
|
ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
|
|
|
private_data, private_data_len);
|
|
|
else
|
|
@@ -2887,8 +2880,8 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
|
|
|
int ret;
|
|
|
|
|
|
id_priv = mc->id_priv;
|
|
|
- if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) &&
|
|
|
- cma_disable_callback(id_priv, CMA_ADDR_RESOLVED))
|
|
|
+ if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
|
|
|
+ cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
|
|
|
return 0;
|
|
|
|
|
|
mutex_lock(&id_priv->qp_mutex);
|
|
@@ -2912,7 +2905,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
|
|
|
|
|
|
ret = id_priv->id.event_handler(&id_priv->id, &event);
|
|
|
if (ret) {
|
|
|
- cma_exch(id_priv, CMA_DESTROYING);
|
|
|
+ cma_exch(id_priv, RDMA_CM_DESTROYING);
|
|
|
mutex_unlock(&id_priv->handler_mutex);
|
|
|
rdma_destroy_id(&id_priv->id);
|
|
|
return 0;
|
|
@@ -3095,8 +3088,8 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
|
|
|
int ret;
|
|
|
|
|
|
id_priv = container_of(id, struct rdma_id_private, id);
|
|
|
- if (!cma_comp(id_priv, CMA_ADDR_BOUND) &&
|
|
|
- !cma_comp(id_priv, CMA_ADDR_RESOLVED))
|
|
|
+ if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
|
|
|
+ !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
|
|
|
return -EINVAL;
|
|
|
|
|
|
mc = kmalloc(sizeof *mc, GFP_KERNEL);
|
|
@@ -3261,19 +3254,19 @@ static void cma_add_one(struct ib_device *device)
|
|
|
static int cma_remove_id_dev(struct rdma_id_private *id_priv)
|
|
|
{
|
|
|
struct rdma_cm_event event;
|
|
|
- enum cma_state state;
|
|
|
+ enum rdma_cm_state state;
|
|
|
int ret = 0;
|
|
|
|
|
|
/* Record that we want to remove the device */
|
|
|
- state = cma_exch(id_priv, CMA_DEVICE_REMOVAL);
|
|
|
- if (state == CMA_DESTROYING)
|
|
|
+ state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL);
|
|
|
+ if (state == RDMA_CM_DESTROYING)
|
|
|
return 0;
|
|
|
|
|
|
cma_cancel_operation(id_priv, state);
|
|
|
mutex_lock(&id_priv->handler_mutex);
|
|
|
|
|
|
/* Check for destruction from another callback. */
|
|
|
- if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL))
|
|
|
+ if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL))
|
|
|
goto out;
|
|
|
|
|
|
memset(&event, 0, sizeof event);
|
|
@@ -3328,6 +3321,100 @@ static void cma_remove_one(struct ib_device *device)
|
|
|
kfree(cma_dev);
|
|
|
}
|
|
|
|
|
|
+static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
|
|
|
+{
|
|
|
+ struct nlmsghdr *nlh;
|
|
|
+ struct rdma_cm_id_stats *id_stats;
|
|
|
+ struct rdma_id_private *id_priv;
|
|
|
+ struct rdma_cm_id *id = NULL;
|
|
|
+ struct cma_device *cma_dev;
|
|
|
+ int i_dev = 0, i_id = 0;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We export all of the IDs as a sequence of messages. Each
|
|
|
+ * ID gets its own netlink message.
|
|
|
+ */
|
|
|
+ mutex_lock(&lock);
|
|
|
+
|
|
|
+ list_for_each_entry(cma_dev, &dev_list, list) {
|
|
|
+ if (i_dev < cb->args[0]) {
|
|
|
+ i_dev++;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ i_id = 0;
|
|
|
+ list_for_each_entry(id_priv, &cma_dev->id_list, list) {
|
|
|
+ if (i_id < cb->args[1]) {
|
|
|
+ i_id++;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq,
|
|
|
+ sizeof *id_stats, RDMA_NL_RDMA_CM,
|
|
|
+ RDMA_NL_RDMA_CM_ID_STATS);
|
|
|
+ if (!id_stats)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ memset(id_stats, 0, sizeof *id_stats);
|
|
|
+ id = &id_priv->id;
|
|
|
+ id_stats->node_type = id->route.addr.dev_addr.dev_type;
|
|
|
+ id_stats->port_num = id->port_num;
|
|
|
+ id_stats->bound_dev_if =
|
|
|
+ id->route.addr.dev_addr.bound_dev_if;
|
|
|
+
|
|
|
+ if (id->route.addr.src_addr.ss_family == AF_INET) {
|
|
|
+ if (ibnl_put_attr(skb, nlh,
|
|
|
+ sizeof(struct sockaddr_in),
|
|
|
+ &id->route.addr.src_addr,
|
|
|
+ RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) {
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ if (ibnl_put_attr(skb, nlh,
|
|
|
+ sizeof(struct sockaddr_in),
|
|
|
+ &id->route.addr.dst_addr,
|
|
|
+ RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) {
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ } else if (id->route.addr.src_addr.ss_family == AF_INET6) {
|
|
|
+ if (ibnl_put_attr(skb, nlh,
|
|
|
+ sizeof(struct sockaddr_in6),
|
|
|
+ &id->route.addr.src_addr,
|
|
|
+ RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) {
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ if (ibnl_put_attr(skb, nlh,
|
|
|
+ sizeof(struct sockaddr_in6),
|
|
|
+ &id->route.addr.dst_addr,
|
|
|
+ RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) {
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ id_stats->pid = id_priv->owner;
|
|
|
+ id_stats->port_space = id->ps;
|
|
|
+ id_stats->cm_state = id_priv->state;
|
|
|
+ id_stats->qp_num = id_priv->qp_num;
|
|
|
+ id_stats->qp_type = id->qp_type;
|
|
|
+
|
|
|
+ i_id++;
|
|
|
+ }
|
|
|
+
|
|
|
+ cb->args[1] = 0;
|
|
|
+ i_dev++;
|
|
|
+ }
|
|
|
+
|
|
|
+out:
|
|
|
+ mutex_unlock(&lock);
|
|
|
+ cb->args[0] = i_dev;
|
|
|
+ cb->args[1] = i_id;
|
|
|
+
|
|
|
+ return skb->len;
|
|
|
+}
|
|
|
+
|
|
|
+static const struct ibnl_client_cbs cma_cb_table[] = {
|
|
|
+ [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats },
|
|
|
+};
|
|
|
+
|
|
|
static int __init cma_init(void)
|
|
|
{
|
|
|
int ret;
|
|
@@ -3343,6 +3430,10 @@ static int __init cma_init(void)
|
|
|
ret = ib_register_client(&cma_client);
|
|
|
if (ret)
|
|
|
goto err;
|
|
|
+
|
|
|
+ if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
|
|
|
+ printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n");
|
|
|
+
|
|
|
return 0;
|
|
|
|
|
|
err:
|
|
@@ -3355,6 +3446,7 @@ err:
|
|
|
|
|
|
static void __exit cma_cleanup(void)
|
|
|
{
|
|
|
+ ibnl_remove_client(RDMA_NL_RDMA_CM);
|
|
|
ib_unregister_client(&cma_client);
|
|
|
unregister_netdevice_notifier(&cma_nb);
|
|
|
rdma_addr_unregister_client(&addr_client);
|