|
@@ -80,7 +80,8 @@ MODULE_PARM_DESC(mellanox_workarounds,
|
|
|
|
|
|
static void srp_add_one(struct ib_device *device);
|
|
|
static void srp_remove_one(struct ib_device *device);
|
|
|
-static void srp_completion(struct ib_cq *cq, void *target_ptr);
|
|
|
+static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
|
|
|
+static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
|
|
|
static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
|
|
|
|
|
|
static struct scsi_transport_template *ib_srp_transport_template;
|
|
@@ -227,14 +228,21 @@ static int srp_create_target_ib(struct srp_target_port *target)
|
|
|
if (!init_attr)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- target->cq = ib_create_cq(target->srp_host->srp_dev->dev,
|
|
|
- srp_completion, NULL, target, SRP_CQ_SIZE, 0);
|
|
|
- if (IS_ERR(target->cq)) {
|
|
|
- ret = PTR_ERR(target->cq);
|
|
|
- goto out;
|
|
|
+ target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
|
|
|
+ srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
|
|
|
+ if (IS_ERR(target->recv_cq)) {
|
|
|
+ ret = PTR_ERR(target->recv_cq);
|
|
|
+ goto err;
|
|
|
}
|
|
|
|
|
|
- ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP);
|
|
|
+ target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
|
|
|
+ srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
|
|
|
+ if (IS_ERR(target->send_cq)) {
|
|
|
+ ret = PTR_ERR(target->send_cq);
|
|
|
+ goto err_recv_cq;
|
|
|
+ }
|
|
|
+
|
|
|
+ ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP);
|
|
|
|
|
|
init_attr->event_handler = srp_qp_event;
|
|
|
init_attr->cap.max_send_wr = SRP_SQ_SIZE;
|
|
@@ -243,24 +251,32 @@ static int srp_create_target_ib(struct srp_target_port *target)
|
|
|
init_attr->cap.max_send_sge = 1;
|
|
|
init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
|
|
|
init_attr->qp_type = IB_QPT_RC;
|
|
|
- init_attr->send_cq = target->cq;
|
|
|
- init_attr->recv_cq = target->cq;
|
|
|
+ init_attr->send_cq = target->send_cq;
|
|
|
+ init_attr->recv_cq = target->recv_cq;
|
|
|
|
|
|
target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
|
|
|
if (IS_ERR(target->qp)) {
|
|
|
ret = PTR_ERR(target->qp);
|
|
|
- ib_destroy_cq(target->cq);
|
|
|
- goto out;
|
|
|
+ goto err_send_cq;
|
|
|
}
|
|
|
|
|
|
ret = srp_init_qp(target, target->qp);
|
|
|
- if (ret) {
|
|
|
- ib_destroy_qp(target->qp);
|
|
|
- ib_destroy_cq(target->cq);
|
|
|
- goto out;
|
|
|
- }
|
|
|
+ if (ret)
|
|
|
+ goto err_qp;
|
|
|
|
|
|
-out:
|
|
|
+ kfree(init_attr);
|
|
|
+ return 0;
|
|
|
+
|
|
|
+err_qp:
|
|
|
+ ib_destroy_qp(target->qp);
|
|
|
+
|
|
|
+err_send_cq:
|
|
|
+ ib_destroy_cq(target->send_cq);
|
|
|
+
|
|
|
+err_recv_cq:
|
|
|
+ ib_destroy_cq(target->recv_cq);
|
|
|
+
|
|
|
+err:
|
|
|
kfree(init_attr);
|
|
|
return ret;
|
|
|
}
|
|
@@ -270,7 +286,8 @@ static void srp_free_target_ib(struct srp_target_port *target)
|
|
|
int i;
|
|
|
|
|
|
ib_destroy_qp(target->qp);
|
|
|
- ib_destroy_cq(target->cq);
|
|
|
+ ib_destroy_cq(target->send_cq);
|
|
|
+ ib_destroy_cq(target->recv_cq);
|
|
|
|
|
|
for (i = 0; i < SRP_RQ_SIZE; ++i)
|
|
|
srp_free_iu(target->srp_host, target->rx_ring[i]);
|
|
@@ -568,7 +585,9 @@ static int srp_reconnect_target(struct srp_target_port *target)
|
|
|
if (ret)
|
|
|
goto err;
|
|
|
|
|
|
- while (ib_poll_cq(target->cq, 1, &wc) > 0)
|
|
|
+ while (ib_poll_cq(target->recv_cq, 1, &wc) > 0)
|
|
|
+ ; /* nothing */
|
|
|
+ while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
|
|
|
; /* nothing */
|
|
|
|
|
|
spin_lock_irq(target->scsi_host->host_lock);
|
|
@@ -851,7 +870,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
|
|
|
struct srp_iu *iu;
|
|
|
u8 opcode;
|
|
|
|
|
|
- iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];
|
|
|
+ iu = target->rx_ring[wc->wr_id];
|
|
|
|
|
|
dev = target->srp_host->srp_dev->dev;
|
|
|
ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
|
|
@@ -898,7 +917,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
|
|
|
DMA_FROM_DEVICE);
|
|
|
}
|
|
|
|
|
|
-static void srp_completion(struct ib_cq *cq, void *target_ptr)
|
|
|
+static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
|
|
|
{
|
|
|
struct srp_target_port *target = target_ptr;
|
|
|
struct ib_wc wc;
|
|
@@ -907,17 +926,31 @@ static void srp_completion(struct ib_cq *cq, void *target_ptr)
|
|
|
while (ib_poll_cq(cq, 1, &wc) > 0) {
|
|
|
if (wc.status) {
|
|
|
shost_printk(KERN_ERR, target->scsi_host,
|
|
|
- PFX "failed %s status %d\n",
|
|
|
- wc.wr_id & SRP_OP_RECV ? "receive" : "send",
|
|
|
+ PFX "failed receive status %d\n",
|
|
|
wc.status);
|
|
|
target->qp_in_error = 1;
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- if (wc.wr_id & SRP_OP_RECV)
|
|
|
- srp_handle_recv(target, &wc);
|
|
|
- else
|
|
|
- ++target->tx_tail;
|
|
|
+ srp_handle_recv(target, &wc);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
|
|
|
+{
|
|
|
+ struct srp_target_port *target = target_ptr;
|
|
|
+ struct ib_wc wc;
|
|
|
+
|
|
|
+ while (ib_poll_cq(cq, 1, &wc) > 0) {
|
|
|
+ if (wc.status) {
|
|
|
+ shost_printk(KERN_ERR, target->scsi_host,
|
|
|
+ PFX "failed send status %d\n",
|
|
|
+ wc.status);
|
|
|
+ target->qp_in_error = 1;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ ++target->tx_tail;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -930,7 +963,7 @@ static int __srp_post_recv(struct srp_target_port *target)
|
|
|
int ret;
|
|
|
|
|
|
next = target->rx_head & (SRP_RQ_SIZE - 1);
|
|
|
- wr.wr_id = next | SRP_OP_RECV;
|
|
|
+ wr.wr_id = next;
|
|
|
iu = target->rx_ring[next];
|
|
|
|
|
|
list.addr = iu->dma;
|
|
@@ -970,6 +1003,8 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
|
|
|
{
|
|
|
s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2;
|
|
|
|
|
|
+ srp_send_completion(target->send_cq, target);
|
|
|
+
|
|
|
if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
|
|
|
return NULL;
|
|
|
|