|
@@ -63,13 +63,13 @@ static void rds_iw_send_rdma_complete(struct rds_message *rm,
|
|
}
|
|
}
|
|
|
|
|
|
static void rds_iw_send_unmap_rdma(struct rds_iw_connection *ic,
|
|
static void rds_iw_send_unmap_rdma(struct rds_iw_connection *ic,
|
|
- struct rds_rdma_op *op)
|
|
|
|
|
|
+ struct rm_rdma_op *op)
|
|
{
|
|
{
|
|
- if (op->r_mapped) {
|
|
|
|
|
|
+ if (op->op_mapped) {
|
|
ib_dma_unmap_sg(ic->i_cm_id->device,
|
|
ib_dma_unmap_sg(ic->i_cm_id->device,
|
|
- op->r_sg, op->r_nents,
|
|
|
|
- op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
|
|
|
- op->r_mapped = 0;
|
|
|
|
|
|
+ op->op_sg, op->op_nents,
|
|
|
|
+ op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
|
|
|
+ op->op_mapped = 0;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -85,8 +85,8 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic,
|
|
rm->data.m_sg, rm->data.m_nents,
|
|
rm->data.m_sg, rm->data.m_nents,
|
|
DMA_TO_DEVICE);
|
|
DMA_TO_DEVICE);
|
|
|
|
|
|
- if (rm->rdma.m_rdma_op.r_active) {
|
|
|
|
- rds_iw_send_unmap_rdma(ic, &rm->rdma.m_rdma_op);
|
|
|
|
|
|
+ if (rm->rdma.op_active) {
|
|
|
|
+ rds_iw_send_unmap_rdma(ic, &rm->rdma);
|
|
|
|
|
|
/* If the user asked for a completion notification on this
|
|
/* If the user asked for a completion notification on this
|
|
* message, we can implement three different semantics:
|
|
* message, we can implement three different semantics:
|
|
@@ -110,10 +110,10 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic,
|
|
*/
|
|
*/
|
|
rds_iw_send_rdma_complete(rm, wc_status);
|
|
rds_iw_send_rdma_complete(rm, wc_status);
|
|
|
|
|
|
- if (rm->rdma.m_rdma_op.r_write)
|
|
|
|
- rds_stats_add(s_send_rdma_bytes, rm->rdma.m_rdma_op.r_bytes);
|
|
|
|
|
|
+ if (rm->rdma.op_write)
|
|
|
|
+ rds_stats_add(s_send_rdma_bytes, rm->rdma.op_bytes);
|
|
else
|
|
else
|
|
- rds_stats_add(s_recv_rdma_bytes, rm->rdma.m_rdma_op.r_bytes);
|
|
|
|
|
|
+ rds_stats_add(s_recv_rdma_bytes, rm->rdma.op_bytes);
|
|
}
|
|
}
|
|
|
|
|
|
/* If anyone waited for this message to get flushed out, wake
|
|
/* If anyone waited for this message to get flushed out, wake
|
|
@@ -591,10 +591,10 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
|
|
|
|
|
|
/* If it has a RDMA op, tell the peer we did it. This is
|
|
/* If it has a RDMA op, tell the peer we did it. This is
|
|
* used by the peer to release use-once RDMA MRs. */
|
|
* used by the peer to release use-once RDMA MRs. */
|
|
- if (rm->rdma.m_rdma_op.r_active) {
|
|
|
|
|
|
+ if (rm->rdma.op_active) {
|
|
struct rds_ext_header_rdma ext_hdr;
|
|
struct rds_ext_header_rdma ext_hdr;
|
|
|
|
|
|
- ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.m_rdma_op.r_key);
|
|
|
|
|
|
+ ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
|
|
rds_message_add_extension(&rm->m_inc.i_hdr,
|
|
rds_message_add_extension(&rm->m_inc.i_hdr,
|
|
RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
|
|
RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
|
|
}
|
|
}
|
|
@@ -632,7 +632,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
|
|
* or when requested by the user. Right now, we let
|
|
* or when requested by the user. Right now, we let
|
|
* the application choose.
|
|
* the application choose.
|
|
*/
|
|
*/
|
|
- if (rm->rdma.m_rdma_op.r_active && rm->rdma.m_rdma_op.r_fence)
|
|
|
|
|
|
+ if (rm->rdma.op_active && rm->rdma.op_fence)
|
|
send_flags = IB_SEND_FENCE;
|
|
send_flags = IB_SEND_FENCE;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -785,7 +785,7 @@ static void rds_iw_build_send_fastreg(struct rds_iw_device *rds_iwdev, struct rd
|
|
ib_update_fast_reg_key(send->s_mr, send->s_remap_count++);
|
|
ib_update_fast_reg_key(send->s_mr, send->s_remap_count++);
|
|
}
|
|
}
|
|
|
|
|
|
-int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
|
|
|
|
|
|
+int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
|
|
{
|
|
{
|
|
struct rds_iw_connection *ic = conn->c_transport_data;
|
|
struct rds_iw_connection *ic = conn->c_transport_data;
|
|
struct rds_iw_send_work *send = NULL;
|
|
struct rds_iw_send_work *send = NULL;
|
|
@@ -795,7 +795,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
|
|
struct rds_iw_device *rds_iwdev;
|
|
struct rds_iw_device *rds_iwdev;
|
|
struct scatterlist *scat;
|
|
struct scatterlist *scat;
|
|
unsigned long len;
|
|
unsigned long len;
|
|
- u64 remote_addr = op->r_remote_addr;
|
|
|
|
|
|
+ u64 remote_addr = op->op_remote_addr;
|
|
u32 pos, fr_pos;
|
|
u32 pos, fr_pos;
|
|
u32 work_alloc;
|
|
u32 work_alloc;
|
|
u32 i;
|
|
u32 i;
|
|
@@ -807,21 +807,21 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
|
|
rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
|
|
rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
|
|
|
|
|
|
/* map the message the first time we see it */
|
|
/* map the message the first time we see it */
|
|
- if (!op->r_mapped) {
|
|
|
|
- op->r_count = ib_dma_map_sg(ic->i_cm_id->device,
|
|
|
|
- op->r_sg, op->r_nents, (op->r_write) ?
|
|
|
|
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
|
|
|
- rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count);
|
|
|
|
- if (op->r_count == 0) {
|
|
|
|
|
|
+ if (!op->op_mapped) {
|
|
|
|
+ op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
|
|
|
|
+ op->op_sg, op->op_nents, (op->op_write) ?
|
|
|
|
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
|
|
|
+ rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
|
|
|
|
+ if (op->op_count == 0) {
|
|
rds_iw_stats_inc(s_iw_tx_sg_mapping_failure);
|
|
rds_iw_stats_inc(s_iw_tx_sg_mapping_failure);
|
|
ret = -ENOMEM; /* XXX ? */
|
|
ret = -ENOMEM; /* XXX ? */
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
|
|
- op->r_mapped = 1;
|
|
|
|
|
|
+ op->op_mapped = 1;
|
|
}
|
|
}
|
|
|
|
|
|
- if (!op->r_write) {
|
|
|
|
|
|
+ if (!op->op_write) {
|
|
/* Alloc space on the send queue for the fastreg */
|
|
/* Alloc space on the send queue for the fastreg */
|
|
work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, 1, &fr_pos);
|
|
work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, 1, &fr_pos);
|
|
if (work_alloc != 1) {
|
|
if (work_alloc != 1) {
|
|
@@ -836,7 +836,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
|
|
* Instead of knowing how to return a partial rdma read/write we insist that there
|
|
* Instead of knowing how to return a partial rdma read/write we insist that there
|
|
* be enough work requests to send the entire message.
|
|
* be enough work requests to send the entire message.
|
|
*/
|
|
*/
|
|
- i = ceil(op->r_count, rds_iwdev->max_sge);
|
|
|
|
|
|
+ i = ceil(op->op_count, rds_iwdev->max_sge);
|
|
|
|
|
|
work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos);
|
|
work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos);
|
|
if (work_alloc != i) {
|
|
if (work_alloc != i) {
|
|
@@ -847,17 +847,17 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
|
|
}
|
|
}
|
|
|
|
|
|
send = &ic->i_sends[pos];
|
|
send = &ic->i_sends[pos];
|
|
- if (!op->r_write) {
|
|
|
|
|
|
+ if (!op->op_write) {
|
|
first = prev = &ic->i_sends[fr_pos];
|
|
first = prev = &ic->i_sends[fr_pos];
|
|
} else {
|
|
} else {
|
|
first = send;
|
|
first = send;
|
|
prev = NULL;
|
|
prev = NULL;
|
|
}
|
|
}
|
|
- scat = &op->r_sg[0];
|
|
|
|
|
|
+ scat = &op->op_sg[0];
|
|
sent = 0;
|
|
sent = 0;
|
|
- num_sge = op->r_count;
|
|
|
|
|
|
+ num_sge = op->op_count;
|
|
|
|
|
|
- for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) {
|
|
|
|
|
|
+ for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
|
|
send->s_wr.send_flags = 0;
|
|
send->s_wr.send_flags = 0;
|
|
send->s_queued = jiffies;
|
|
send->s_queued = jiffies;
|
|
|
|
|
|
@@ -874,13 +874,13 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
|
|
* for local access after RDS is finished with it, using
|
|
* for local access after RDS is finished with it, using
|
|
* IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed.
|
|
* IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed.
|
|
*/
|
|
*/
|
|
- if (op->r_write)
|
|
|
|
|
|
+ if (op->op_write)
|
|
send->s_wr.opcode = IB_WR_RDMA_WRITE;
|
|
send->s_wr.opcode = IB_WR_RDMA_WRITE;
|
|
else
|
|
else
|
|
send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV;
|
|
send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV;
|
|
|
|
|
|
send->s_wr.wr.rdma.remote_addr = remote_addr;
|
|
send->s_wr.wr.rdma.remote_addr = remote_addr;
|
|
- send->s_wr.wr.rdma.rkey = op->r_key;
|
|
|
|
|
|
+ send->s_wr.wr.rdma.rkey = op->op_rkey;
|
|
send->s_op = op;
|
|
send->s_op = op;
|
|
|
|
|
|
if (num_sge > rds_iwdev->max_sge) {
|
|
if (num_sge > rds_iwdev->max_sge) {
|
|
@@ -894,7 +894,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
|
|
if (prev)
|
|
if (prev)
|
|
prev->s_wr.next = &send->s_wr;
|
|
prev->s_wr.next = &send->s_wr;
|
|
|
|
|
|
- for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) {
|
|
|
|
|
|
+ for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) {
|
|
len = ib_sg_dma_len(ic->i_cm_id->device, scat);
|
|
len = ib_sg_dma_len(ic->i_cm_id->device, scat);
|
|
|
|
|
|
if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV)
|
|
if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV)
|
|
@@ -928,7 +928,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
|
|
}
|
|
}
|
|
|
|
|
|
/* if we finished the message then send completion owns it */
|
|
/* if we finished the message then send completion owns it */
|
|
- if (scat == &op->r_sg[op->r_count])
|
|
|
|
|
|
+ if (scat == &op->op_sg[op->op_count])
|
|
first->s_wr.send_flags = IB_SEND_SIGNALED;
|
|
first->s_wr.send_flags = IB_SEND_SIGNALED;
|
|
|
|
|
|
if (i < work_alloc) {
|
|
if (i < work_alloc) {
|
|
@@ -942,9 +942,9 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
|
|
* adapters do not allow using the lkey for this at all. To bypass this use a
|
|
* adapters do not allow using the lkey for this at all. To bypass this use a
|
|
* fastreg_mr (or possibly a dma_mr)
|
|
* fastreg_mr (or possibly a dma_mr)
|
|
*/
|
|
*/
|
|
- if (!op->r_write) {
|
|
|
|
|
|
+ if (!op->op_write) {
|
|
rds_iw_build_send_fastreg(rds_iwdev, ic, &ic->i_sends[fr_pos],
|
|
rds_iw_build_send_fastreg(rds_iwdev, ic, &ic->i_sends[fr_pos],
|
|
- op->r_count, sent, conn->c_xmit_rm->m_rs->rs_user_addr);
|
|
|
|
|
|
+ op->op_count, sent, conn->c_xmit_rm->m_rs->rs_user_addr);
|
|
work_alloc++;
|
|
work_alloc++;
|
|
}
|
|
}
|
|
|
|
|