|
@@ -128,7 +128,8 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt,
|
|
|
page_bytes -= sge_bytes;
|
|
|
|
|
|
frmr->page_list->page_list[page_no] =
|
|
|
- ib_dma_map_page(xprt->sc_cm_id->device, page, 0,
|
|
|
+ ib_dma_map_single(xprt->sc_cm_id->device,
|
|
|
+ page_address(page),
|
|
|
PAGE_SIZE, DMA_TO_DEVICE);
|
|
|
if (ib_dma_mapping_error(xprt->sc_cm_id->device,
|
|
|
frmr->page_list->page_list[page_no]))
|
|
@@ -532,18 +533,17 @@ static int send_reply(struct svcxprt_rdma *rdma,
|
|
|
clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
|
|
|
|
|
|
/* Prepare the SGE for the RPCRDMA Header */
|
|
|
+ ctxt->sge[0].lkey = rdma->sc_dma_lkey;
|
|
|
+ ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
|
|
|
ctxt->sge[0].addr =
|
|
|
- ib_dma_map_page(rdma->sc_cm_id->device,
|
|
|
- page, 0, PAGE_SIZE, DMA_TO_DEVICE);
|
|
|
+ ib_dma_map_single(rdma->sc_cm_id->device, page_address(page),
|
|
|
+ ctxt->sge[0].length, DMA_TO_DEVICE);
|
|
|
if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
|
|
|
goto err;
|
|
|
atomic_inc(&rdma->sc_dma_used);
|
|
|
|
|
|
ctxt->direction = DMA_TO_DEVICE;
|
|
|
|
|
|
- ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
|
|
|
- ctxt->sge[0].lkey = rdma->sc_dma_lkey;
|
|
|
-
|
|
|
/* Determine how many of our SGE are to be transmitted */
|
|
|
for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
|
|
|
sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
|