|
@@ -128,7 +128,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
|
|
|
void *vpage;
|
|
|
u32 counter;
|
|
|
u64 rpage, cqx_fec, h_ret;
|
|
|
- int ipz_rc, ret, i;
|
|
|
+ int ipz_rc, i;
|
|
|
unsigned long flags;
|
|
|
|
|
|
if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
|
|
@@ -163,32 +163,19 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
|
|
|
adapter_handle = shca->ipz_hca_handle;
|
|
|
param.eq_handle = shca->eq.ipz_eq_handle;
|
|
|
|
|
|
- do {
|
|
|
- if (!idr_pre_get(&ehca_cq_idr, GFP_KERNEL)) {
|
|
|
- cq = ERR_PTR(-ENOMEM);
|
|
|
- ehca_err(device, "Can't reserve idr nr. device=%p",
|
|
|
- device);
|
|
|
- goto create_cq_exit1;
|
|
|
- }
|
|
|
-
|
|
|
- write_lock_irqsave(&ehca_cq_idr_lock, flags);
|
|
|
- ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token);
|
|
|
- write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
|
|
|
- } while (ret == -EAGAIN);
|
|
|
+ idr_preload(GFP_KERNEL);
|
|
|
+ write_lock_irqsave(&ehca_cq_idr_lock, flags);
|
|
|
+ my_cq->token = idr_alloc(&ehca_cq_idr, my_cq, 0, 0x2000000, GFP_NOWAIT);
|
|
|
+ write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
|
|
|
+ idr_preload_end();
|
|
|
|
|
|
- if (ret) {
|
|
|
+ if (my_cq->token < 0) {
|
|
|
cq = ERR_PTR(-ENOMEM);
|
|
|
ehca_err(device, "Can't allocate new idr entry. device=%p",
|
|
|
device);
|
|
|
goto create_cq_exit1;
|
|
|
}
|
|
|
|
|
|
- if (my_cq->token > 0x1FFFFFF) {
|
|
|
- cq = ERR_PTR(-ENOMEM);
|
|
|
- ehca_err(device, "Invalid number of cq. device=%p", device);
|
|
|
- goto create_cq_exit2;
|
|
|
- }
|
|
|
-
|
|
|
/*
|
|
|
* CQs maximum depth is 4GB-64, but we need additional 20 as buffer
|
|
|
* for receiving errors CQEs.
|