|
@@ -461,7 +461,7 @@ static struct ehca_qp *internal_create_qp(
|
|
|
ib_device);
|
|
|
struct ib_ucontext *context = NULL;
|
|
|
u64 h_ret;
|
|
|
- int is_llqp = 0, has_srq = 0;
|
|
|
+ int is_llqp = 0, has_srq = 0, is_user = 0;
|
|
|
int qp_type, max_send_sge, max_recv_sge, ret;
|
|
|
|
|
|
/* h_call's out parameters */
|
|
@@ -609,9 +609,6 @@ static struct ehca_qp *internal_create_qp(
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- if (pd->uobject && udata)
|
|
|
- context = pd->uobject->context;
|
|
|
-
|
|
|
my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
|
|
|
if (!my_qp) {
|
|
|
ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
|
|
@@ -619,6 +616,11 @@ static struct ehca_qp *internal_create_qp(
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
}
|
|
|
|
|
|
+ if (pd->uobject && udata) {
|
|
|
+ is_user = 1;
|
|
|
+ context = pd->uobject->context;
|
|
|
+ }
|
|
|
+
|
|
|
atomic_set(&my_qp->nr_events, 0);
|
|
|
init_waitqueue_head(&my_qp->wait_completion);
|
|
|
spin_lock_init(&my_qp->spinlock_s);
|
|
@@ -707,7 +709,7 @@ static struct ehca_qp *internal_create_qp(
|
|
|
(parms.squeue.is_small || parms.rqueue.is_small);
|
|
|
}
|
|
|
|
|
|
- h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms);
|
|
|
+ h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user);
|
|
|
if (h_ret != H_SUCCESS) {
|
|
|
ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",
|
|
|
h_ret);
|
|
@@ -769,18 +771,20 @@ static struct ehca_qp *internal_create_qp(
|
|
|
goto create_qp_exit2;
|
|
|
}
|
|
|
|
|
|
- my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
|
|
|
- my_qp->ipz_squeue.qe_size;
|
|
|
- my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
|
|
|
- sizeof(struct ehca_qmap_entry));
|
|
|
- if (!my_qp->sq_map.map) {
|
|
|
- ehca_err(pd->device, "Couldn't allocate squeue "
|
|
|
- "map ret=%i", ret);
|
|
|
- goto create_qp_exit3;
|
|
|
+ if (!is_user) {
|
|
|
+ my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
|
|
|
+ my_qp->ipz_squeue.qe_size;
|
|
|
+ my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
|
|
|
+ sizeof(struct ehca_qmap_entry));
|
|
|
+ if (!my_qp->sq_map.map) {
|
|
|
+ ehca_err(pd->device, "Couldn't allocate squeue "
|
|
|
+ "map ret=%i", ret);
|
|
|
+ goto create_qp_exit3;
|
|
|
+ }
|
|
|
+ INIT_LIST_HEAD(&my_qp->sq_err_node);
|
|
|
+ /* to avoid the generation of bogus flush CQEs */
|
|
|
+ reset_queue_map(&my_qp->sq_map);
|
|
|
}
|
|
|
- INIT_LIST_HEAD(&my_qp->sq_err_node);
|
|
|
- /* to avoid the generation of bogus flush CQEs */
|
|
|
- reset_queue_map(&my_qp->sq_map);
|
|
|
}
|
|
|
|
|
|
if (HAS_RQ(my_qp)) {
|
|
@@ -792,20 +796,21 @@ static struct ehca_qp *internal_create_qp(
|
|
|
"and pages ret=%i", ret);
|
|
|
goto create_qp_exit4;
|
|
|
}
|
|
|
-
|
|
|
- my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
|
|
|
- my_qp->ipz_rqueue.qe_size;
|
|
|
- my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
|
|
|
- sizeof(struct ehca_qmap_entry));
|
|
|
- if (!my_qp->rq_map.map) {
|
|
|
- ehca_err(pd->device, "Couldn't allocate squeue "
|
|
|
- "map ret=%i", ret);
|
|
|
- goto create_qp_exit5;
|
|
|
+ if (!is_user) {
|
|
|
+ my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
|
|
|
+ my_qp->ipz_rqueue.qe_size;
|
|
|
+ my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
|
|
|
+ sizeof(struct ehca_qmap_entry));
|
|
|
+ if (!my_qp->rq_map.map) {
|
|
|
+ ehca_err(pd->device, "Couldn't allocate squeue "
|
|
|
+ "map ret=%i", ret);
|
|
|
+ goto create_qp_exit5;
|
|
|
+ }
|
|
|
+ INIT_LIST_HEAD(&my_qp->rq_err_node);
|
|
|
+ /* to avoid the generation of bogus flush CQEs */
|
|
|
+ reset_queue_map(&my_qp->rq_map);
|
|
|
}
|
|
|
- INIT_LIST_HEAD(&my_qp->rq_err_node);
|
|
|
- /* to avoid the generation of bogus flush CQEs */
|
|
|
- reset_queue_map(&my_qp->rq_map);
|
|
|
- } else if (init_attr->srq) {
|
|
|
+ } else if (init_attr->srq && !is_user) {
|
|
|
/* this is a base QP, use the queue map of the SRQ */
|
|
|
my_qp->rq_map = my_srq->rq_map;
|
|
|
INIT_LIST_HEAD(&my_qp->rq_err_node);
|
|
@@ -918,7 +923,7 @@ create_qp_exit7:
|
|
|
kfree(my_qp->mod_qp_parm);
|
|
|
|
|
|
create_qp_exit6:
|
|
|
- if (HAS_RQ(my_qp))
|
|
|
+ if (HAS_RQ(my_qp) && !is_user)
|
|
|
vfree(my_qp->rq_map.map);
|
|
|
|
|
|
create_qp_exit5:
|
|
@@ -926,7 +931,7 @@ create_qp_exit5:
|
|
|
ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
|
|
|
|
|
|
create_qp_exit4:
|
|
|
- if (HAS_SQ(my_qp))
|
|
|
+ if (HAS_SQ(my_qp) && !is_user)
|
|
|
vfree(my_qp->sq_map.map);
|
|
|
|
|
|
create_qp_exit3:
|
|
@@ -1244,6 +1249,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
|
|
|
u64 update_mask;
|
|
|
u64 h_ret;
|
|
|
int bad_wqe_cnt = 0;
|
|
|
+ int is_user = 0;
|
|
|
int squeue_locked = 0;
|
|
|
unsigned long flags = 0;
|
|
|
|
|
@@ -1266,6 +1272,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
|
|
|
ret = ehca2ib_return_code(h_ret);
|
|
|
goto modify_qp_exit1;
|
|
|
}
|
|
|
+ if (ibqp->uobject)
|
|
|
+ is_user = 1;
|
|
|
|
|
|
qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
|
|
|
|
|
@@ -1728,7 +1736,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
|
|
|
goto modify_qp_exit2;
|
|
|
}
|
|
|
}
|
|
|
- if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)) {
|
|
|
+ if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)
|
|
|
+ && !is_user) {
|
|
|
ret = check_for_left_cqes(my_qp, shca);
|
|
|
if (ret)
|
|
|
goto modify_qp_exit2;
|
|
@@ -1738,16 +1747,17 @@ static int internal_modify_qp(struct ib_qp *ibqp,
|
|
|
ipz_qeit_reset(&my_qp->ipz_rqueue);
|
|
|
ipz_qeit_reset(&my_qp->ipz_squeue);
|
|
|
|
|
|
- if (qp_cur_state == IB_QPS_ERR) {
|
|
|
+ if (qp_cur_state == IB_QPS_ERR && !is_user) {
|
|
|
del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
|
|
|
|
|
|
if (HAS_RQ(my_qp))
|
|
|
del_from_err_list(my_qp->recv_cq,
|
|
|
&my_qp->rq_err_node);
|
|
|
}
|
|
|
- reset_queue_map(&my_qp->sq_map);
|
|
|
+ if (!is_user)
|
|
|
+ reset_queue_map(&my_qp->sq_map);
|
|
|
|
|
|
- if (HAS_RQ(my_qp))
|
|
|
+ if (HAS_RQ(my_qp) && !is_user)
|
|
|
reset_queue_map(&my_qp->rq_map);
|
|
|
}
|
|
|
|
|
@@ -2138,10 +2148,12 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
|
|
|
int ret;
|
|
|
u64 h_ret;
|
|
|
u8 port_num;
|
|
|
+ int is_user = 0;
|
|
|
enum ib_qp_type qp_type;
|
|
|
unsigned long flags;
|
|
|
|
|
|
if (uobject) {
|
|
|
+ is_user = 1;
|
|
|
if (my_qp->mm_count_galpa ||
|
|
|
my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
|
|
|
ehca_err(dev, "Resources still referenced in "
|
|
@@ -2168,10 +2180,10 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
|
|
|
* SRQs will never get into an error list and do not have a recv_cq,
|
|
|
* so we need to skip them here.
|
|
|
*/
|
|
|
- if (HAS_RQ(my_qp) && !IS_SRQ(my_qp))
|
|
|
+ if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user)
|
|
|
del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node);
|
|
|
|
|
|
- if (HAS_SQ(my_qp))
|
|
|
+ if (HAS_SQ(my_qp) && !is_user)
|
|
|
del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
|
|
|
|
|
|
/* now wait until all pending events have completed */
|
|
@@ -2209,13 +2221,13 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
|
|
|
|
|
|
if (HAS_RQ(my_qp)) {
|
|
|
ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
|
|
|
-
|
|
|
- vfree(my_qp->rq_map.map);
|
|
|
+ if (!is_user)
|
|
|
+ vfree(my_qp->rq_map.map);
|
|
|
}
|
|
|
if (HAS_SQ(my_qp)) {
|
|
|
ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
|
|
|
-
|
|
|
- vfree(my_qp->sq_map.map);
|
|
|
+ if (!is_user)
|
|
|
+ vfree(my_qp->sq_map.map);
|
|
|
}
|
|
|
kmem_cache_free(qp_cache, my_qp);
|
|
|
atomic_dec(&shca->num_qps);
|