svc_rdma_sendto.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705
  1. /*
  2. * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the BSD-type
  8. * license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions
  12. * are met:
  13. *
  14. * Redistributions of source code must retain the above copyright
  15. * notice, this list of conditions and the following disclaimer.
  16. *
  17. * Redistributions in binary form must reproduce the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer in the documentation and/or other materials provided
  20. * with the distribution.
  21. *
  22. * Neither the name of the Network Appliance, Inc. nor the names of
  23. * its contributors may be used to endorse or promote products
  24. * derived from this software without specific prior written
  25. * permission.
  26. *
  27. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  28. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  29. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  30. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  31. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  32. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  33. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  34. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  35. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  36. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  37. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  38. *
  39. * Author: Tom Tucker <tom@opengridcomputing.com>
  40. */
  41. #include <linux/sunrpc/debug.h>
  42. #include <linux/sunrpc/rpc_rdma.h>
  43. #include <linux/spinlock.h>
  44. #include <asm/unaligned.h>
  45. #include <rdma/ib_verbs.h>
  46. #include <rdma/rdma_cm.h>
  47. #include <linux/sunrpc/svc_rdma.h>
  48. #define RPCDBG_FACILITY RPCDBG_SVCXPRT
  49. /* Encode an XDR as an array of IB SGE
  50. *
  51. * Assumptions:
  52. * - head[0] is physically contiguous.
  53. * - tail[0] is physically contiguous.
  54. * - pages[] is not physically or virtually contigous and consists of
  55. * PAGE_SIZE elements.
  56. *
  57. * Output:
  58. * SGE[0] reserved for RCPRDMA header
  59. * SGE[1] data from xdr->head[]
  60. * SGE[2..sge_count-2] data from xdr->pages[]
  61. * SGE[sge_count-1] data from xdr->tail.
  62. *
  63. * The max SGE we need is the length of the XDR / pagesize + one for
  64. * head + one for tail + one for RPCRDMA header. Since RPCSVC_MAXPAGES
  65. * reserves a page for both the request and the reply header, and this
  66. * array is only concerned with the reply we are assured that we have
  67. * on extra page for the RPCRMDA header.
  68. */
  69. static int fast_reg_xdr(struct svcxprt_rdma *xprt,
  70. struct xdr_buf *xdr,
  71. struct svc_rdma_req_map *vec)
  72. {
  73. int sge_no;
  74. u32 sge_bytes;
  75. u32 page_bytes;
  76. u32 page_off;
  77. int page_no = 0;
  78. u8 *frva;
  79. struct svc_rdma_fastreg_mr *frmr;
  80. frmr = svc_rdma_get_frmr(xprt);
  81. if (IS_ERR(frmr))
  82. return -ENOMEM;
  83. vec->frmr = frmr;
  84. /* Skip the RPCRDMA header */
  85. sge_no = 1;
  86. /* Map the head. */
  87. frva = (void *)((unsigned long)(xdr->head[0].iov_base) & PAGE_MASK);
  88. vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
  89. vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
  90. vec->count = 2;
  91. sge_no++;
  92. /* Build the FRMR */
  93. frmr->kva = frva;
  94. frmr->direction = DMA_TO_DEVICE;
  95. frmr->access_flags = 0;
  96. frmr->map_len = PAGE_SIZE;
  97. frmr->page_list_len = 1;
  98. frmr->page_list->page_list[page_no] =
  99. ib_dma_map_single(xprt->sc_cm_id->device,
  100. (void *)xdr->head[0].iov_base,
  101. PAGE_SIZE, DMA_TO_DEVICE);
  102. if (ib_dma_mapping_error(xprt->sc_cm_id->device,
  103. frmr->page_list->page_list[page_no]))
  104. goto fatal_err;
  105. atomic_inc(&xprt->sc_dma_used);
  106. page_off = xdr->page_base;
  107. page_bytes = xdr->page_len + page_off;
  108. if (!page_bytes)
  109. goto encode_tail;
  110. /* Map the pages */
  111. vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off;
  112. vec->sge[sge_no].iov_len = page_bytes;
  113. sge_no++;
  114. while (page_bytes) {
  115. struct page *page;
  116. page = xdr->pages[page_no++];
  117. sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
  118. page_bytes -= sge_bytes;
  119. frmr->page_list->page_list[page_no] =
  120. ib_dma_map_single(xprt->sc_cm_id->device,
  121. page_address(page),
  122. PAGE_SIZE, DMA_TO_DEVICE);
  123. if (ib_dma_mapping_error(xprt->sc_cm_id->device,
  124. frmr->page_list->page_list[page_no]))
  125. goto fatal_err;
  126. atomic_inc(&xprt->sc_dma_used);
  127. page_off = 0; /* reset for next time through loop */
  128. frmr->map_len += PAGE_SIZE;
  129. frmr->page_list_len++;
  130. }
  131. vec->count++;
  132. encode_tail:
  133. /* Map tail */
  134. if (0 == xdr->tail[0].iov_len)
  135. goto done;
  136. vec->count++;
  137. vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;
  138. if (((unsigned long)xdr->tail[0].iov_base & PAGE_MASK) ==
  139. ((unsigned long)xdr->head[0].iov_base & PAGE_MASK)) {
  140. /*
  141. * If head and tail use the same page, we don't need
  142. * to map it again.
  143. */
  144. vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
  145. } else {
  146. void *va;
  147. /* Map another page for the tail */
  148. page_off = (unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK;
  149. va = (void *)((unsigned long)xdr->tail[0].iov_base & PAGE_MASK);
  150. vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off;
  151. frmr->page_list->page_list[page_no] =
  152. ib_dma_map_single(xprt->sc_cm_id->device, va, PAGE_SIZE,
  153. DMA_TO_DEVICE);
  154. if (ib_dma_mapping_error(xprt->sc_cm_id->device,
  155. frmr->page_list->page_list[page_no]))
  156. goto fatal_err;
  157. atomic_inc(&xprt->sc_dma_used);
  158. frmr->map_len += PAGE_SIZE;
  159. frmr->page_list_len++;
  160. }
  161. done:
  162. if (svc_rdma_fastreg(xprt, frmr))
  163. goto fatal_err;
  164. return 0;
  165. fatal_err:
  166. printk("svcrdma: Error fast registering memory for xprt %p\n", xprt);
  167. vec->frmr = NULL;
  168. svc_rdma_put_frmr(xprt, frmr);
  169. return -EIO;
  170. }
  171. static int map_xdr(struct svcxprt_rdma *xprt,
  172. struct xdr_buf *xdr,
  173. struct svc_rdma_req_map *vec)
  174. {
  175. int sge_no;
  176. u32 sge_bytes;
  177. u32 page_bytes;
  178. u32 page_off;
  179. int page_no;
  180. BUG_ON(xdr->len !=
  181. (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len));
  182. if (xprt->sc_frmr_pg_list_len)
  183. return fast_reg_xdr(xprt, xdr, vec);
  184. /* Skip the first sge, this is for the RPCRDMA header */
  185. sge_no = 1;
  186. /* Head SGE */
  187. vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
  188. vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
  189. sge_no++;
  190. /* pages SGE */
  191. page_no = 0;
  192. page_bytes = xdr->page_len;
  193. page_off = xdr->page_base;
  194. while (page_bytes) {
  195. vec->sge[sge_no].iov_base =
  196. page_address(xdr->pages[page_no]) + page_off;
  197. sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
  198. page_bytes -= sge_bytes;
  199. vec->sge[sge_no].iov_len = sge_bytes;
  200. sge_no++;
  201. page_no++;
  202. page_off = 0; /* reset for next time through loop */
  203. }
  204. /* Tail SGE */
  205. if (xdr->tail[0].iov_len) {
  206. vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
  207. vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;
  208. sge_no++;
  209. }
  210. dprintk("svcrdma: map_xdr: sge_no %d page_no %d "
  211. "page_base %u page_len %u head_len %zu tail_len %zu\n",
  212. sge_no, page_no, xdr->page_base, xdr->page_len,
  213. xdr->head[0].iov_len, xdr->tail[0].iov_len);
  214. vec->count = sge_no;
  215. return 0;
  216. }
  217. /* Assumptions:
  218. * - We are using FRMR
  219. * - or -
  220. * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
  221. */
  222. static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
  223. u32 rmr, u64 to,
  224. u32 xdr_off, int write_len,
  225. struct svc_rdma_req_map *vec)
  226. {
  227. struct ib_send_wr write_wr;
  228. struct ib_sge *sge;
  229. int xdr_sge_no;
  230. int sge_no;
  231. int sge_bytes;
  232. int sge_off;
  233. int bc;
  234. struct svc_rdma_op_ctxt *ctxt;
  235. BUG_ON(vec->count > RPCSVC_MAXPAGES);
  236. dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
  237. "write_len=%d, vec->sge=%p, vec->count=%lu\n",
  238. rmr, (unsigned long long)to, xdr_off,
  239. write_len, vec->sge, vec->count);
  240. ctxt = svc_rdma_get_context(xprt);
  241. ctxt->direction = DMA_TO_DEVICE;
  242. sge = ctxt->sge;
  243. /* Find the SGE associated with xdr_off */
  244. for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count;
  245. xdr_sge_no++) {
  246. if (vec->sge[xdr_sge_no].iov_len > bc)
  247. break;
  248. bc -= vec->sge[xdr_sge_no].iov_len;
  249. }
  250. sge_off = bc;
  251. bc = write_len;
  252. sge_no = 0;
  253. /* Copy the remaining SGE */
  254. while (bc != 0) {
  255. sge_bytes = min_t(size_t,
  256. bc, vec->sge[xdr_sge_no].iov_len-sge_off);
  257. sge[sge_no].length = sge_bytes;
  258. if (!vec->frmr) {
  259. sge[sge_no].addr =
  260. ib_dma_map_single(xprt->sc_cm_id->device,
  261. (void *)
  262. vec->sge[xdr_sge_no].iov_base + sge_off,
  263. sge_bytes, DMA_TO_DEVICE);
  264. if (ib_dma_mapping_error(xprt->sc_cm_id->device,
  265. sge[sge_no].addr))
  266. goto err;
  267. atomic_inc(&xprt->sc_dma_used);
  268. sge[sge_no].lkey = xprt->sc_dma_lkey;
  269. } else {
  270. sge[sge_no].addr = (unsigned long)
  271. vec->sge[xdr_sge_no].iov_base + sge_off;
  272. sge[sge_no].lkey = vec->frmr->mr->lkey;
  273. }
  274. ctxt->count++;
  275. ctxt->frmr = vec->frmr;
  276. sge_off = 0;
  277. sge_no++;
  278. xdr_sge_no++;
  279. BUG_ON(xdr_sge_no > vec->count);
  280. bc -= sge_bytes;
  281. }
  282. /* Prepare WRITE WR */
  283. memset(&write_wr, 0, sizeof write_wr);
  284. ctxt->wr_op = IB_WR_RDMA_WRITE;
  285. write_wr.wr_id = (unsigned long)ctxt;
  286. write_wr.sg_list = &sge[0];
  287. write_wr.num_sge = sge_no;
  288. write_wr.opcode = IB_WR_RDMA_WRITE;
  289. write_wr.send_flags = IB_SEND_SIGNALED;
  290. write_wr.wr.rdma.rkey = rmr;
  291. write_wr.wr.rdma.remote_addr = to;
  292. /* Post It */
  293. atomic_inc(&rdma_stat_write);
  294. if (svc_rdma_send(xprt, &write_wr))
  295. goto err;
  296. return 0;
  297. err:
  298. svc_rdma_put_context(ctxt, 0);
  299. /* Fatal error, close transport */
  300. return -EIO;
  301. }
  302. static int send_write_chunks(struct svcxprt_rdma *xprt,
  303. struct rpcrdma_msg *rdma_argp,
  304. struct rpcrdma_msg *rdma_resp,
  305. struct svc_rqst *rqstp,
  306. struct svc_rdma_req_map *vec)
  307. {
  308. u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
  309. int write_len;
  310. int max_write;
  311. u32 xdr_off;
  312. int chunk_off;
  313. int chunk_no;
  314. struct rpcrdma_write_array *arg_ary;
  315. struct rpcrdma_write_array *res_ary;
  316. int ret;
  317. arg_ary = svc_rdma_get_write_array(rdma_argp);
  318. if (!arg_ary)
  319. return 0;
  320. res_ary = (struct rpcrdma_write_array *)
  321. &rdma_resp->rm_body.rm_chunks[1];
  322. if (vec->frmr)
  323. max_write = vec->frmr->map_len;
  324. else
  325. max_write = xprt->sc_max_sge * PAGE_SIZE;
  326. /* Write chunks start at the pagelist */
  327. for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
  328. xfer_len && chunk_no < arg_ary->wc_nchunks;
  329. chunk_no++) {
  330. struct rpcrdma_segment *arg_ch;
  331. u64 rs_offset;
  332. arg_ch = &arg_ary->wc_array[chunk_no].wc_target;
  333. write_len = min(xfer_len, arg_ch->rs_length);
  334. /* Prepare the response chunk given the length actually
  335. * written */
  336. rs_offset = get_unaligned(&(arg_ch->rs_offset));
  337. svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
  338. arg_ch->rs_handle,
  339. rs_offset,
  340. write_len);
  341. chunk_off = 0;
  342. while (write_len) {
  343. int this_write;
  344. this_write = min(write_len, max_write);
  345. ret = send_write(xprt, rqstp,
  346. arg_ch->rs_handle,
  347. rs_offset + chunk_off,
  348. xdr_off,
  349. this_write,
  350. vec);
  351. if (ret) {
  352. dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
  353. ret);
  354. return -EIO;
  355. }
  356. chunk_off += this_write;
  357. xdr_off += this_write;
  358. xfer_len -= this_write;
  359. write_len -= this_write;
  360. }
  361. }
  362. /* Update the req with the number of chunks actually used */
  363. svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no);
  364. return rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
  365. }
  366. static int send_reply_chunks(struct svcxprt_rdma *xprt,
  367. struct rpcrdma_msg *rdma_argp,
  368. struct rpcrdma_msg *rdma_resp,
  369. struct svc_rqst *rqstp,
  370. struct svc_rdma_req_map *vec)
  371. {
  372. u32 xfer_len = rqstp->rq_res.len;
  373. int write_len;
  374. int max_write;
  375. u32 xdr_off;
  376. int chunk_no;
  377. int chunk_off;
  378. struct rpcrdma_segment *ch;
  379. struct rpcrdma_write_array *arg_ary;
  380. struct rpcrdma_write_array *res_ary;
  381. int ret;
  382. arg_ary = svc_rdma_get_reply_array(rdma_argp);
  383. if (!arg_ary)
  384. return 0;
  385. /* XXX: need to fix when reply lists occur with read-list and or
  386. * write-list */
  387. res_ary = (struct rpcrdma_write_array *)
  388. &rdma_resp->rm_body.rm_chunks[2];
  389. if (vec->frmr)
  390. max_write = vec->frmr->map_len;
  391. else
  392. max_write = xprt->sc_max_sge * PAGE_SIZE;
  393. /* xdr offset starts at RPC message */
  394. for (xdr_off = 0, chunk_no = 0;
  395. xfer_len && chunk_no < arg_ary->wc_nchunks;
  396. chunk_no++) {
  397. u64 rs_offset;
  398. ch = &arg_ary->wc_array[chunk_no].wc_target;
  399. write_len = min(xfer_len, ch->rs_length);
  400. /* Prepare the reply chunk given the length actually
  401. * written */
  402. rs_offset = get_unaligned(&(ch->rs_offset));
  403. svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
  404. ch->rs_handle, rs_offset,
  405. write_len);
  406. chunk_off = 0;
  407. while (write_len) {
  408. int this_write;
  409. this_write = min(write_len, max_write);
  410. ret = send_write(xprt, rqstp,
  411. ch->rs_handle,
  412. rs_offset + chunk_off,
  413. xdr_off,
  414. this_write,
  415. vec);
  416. if (ret) {
  417. dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
  418. ret);
  419. return -EIO;
  420. }
  421. chunk_off += this_write;
  422. xdr_off += this_write;
  423. xfer_len -= this_write;
  424. write_len -= this_write;
  425. }
  426. }
  427. /* Update the req with the number of chunks actually used */
  428. svc_rdma_xdr_encode_reply_array(res_ary, chunk_no);
  429. return rqstp->rq_res.len;
  430. }
  431. /* This function prepares the portion of the RPCRDMA message to be
  432. * sent in the RDMA_SEND. This function is called after data sent via
  433. * RDMA has already been transmitted. There are three cases:
  434. * - The RPCRDMA header, RPC header, and payload are all sent in a
  435. * single RDMA_SEND. This is the "inline" case.
  436. * - The RPCRDMA header and some portion of the RPC header and data
  437. * are sent via this RDMA_SEND and another portion of the data is
  438. * sent via RDMA.
  439. * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
  440. * header and data are all transmitted via RDMA.
  441. * In all three cases, this function prepares the RPCRDMA header in
  442. * sge[0], the 'type' parameter indicates the type to place in the
  443. * RPCRDMA header, and the 'byte_count' field indicates how much of
  444. * the XDR to include in this RDMA_SEND.
  445. */
  446. static int send_reply(struct svcxprt_rdma *rdma,
  447. struct svc_rqst *rqstp,
  448. struct page *page,
  449. struct rpcrdma_msg *rdma_resp,
  450. struct svc_rdma_op_ctxt *ctxt,
  451. struct svc_rdma_req_map *vec,
  452. int byte_count)
  453. {
  454. struct ib_send_wr send_wr;
  455. struct ib_send_wr inv_wr;
  456. int sge_no;
  457. int sge_bytes;
  458. int page_no;
  459. int ret;
  460. /* Post a recv buffer to handle another request. */
  461. ret = svc_rdma_post_recv(rdma);
  462. if (ret) {
  463. printk(KERN_INFO
  464. "svcrdma: could not post a receive buffer, err=%d."
  465. "Closing transport %p.\n", ret, rdma);
  466. set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
  467. svc_rdma_put_frmr(rdma, vec->frmr);
  468. svc_rdma_put_context(ctxt, 0);
  469. return -ENOTCONN;
  470. }
  471. /* Prepare the context */
  472. ctxt->pages[0] = page;
  473. ctxt->count = 1;
  474. ctxt->frmr = vec->frmr;
  475. if (vec->frmr)
  476. set_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
  477. else
  478. clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
  479. /* Prepare the SGE for the RPCRDMA Header */
  480. ctxt->sge[0].lkey = rdma->sc_dma_lkey;
  481. ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
  482. ctxt->sge[0].addr =
  483. ib_dma_map_single(rdma->sc_cm_id->device, page_address(page),
  484. ctxt->sge[0].length, DMA_TO_DEVICE);
  485. if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
  486. goto err;
  487. atomic_inc(&rdma->sc_dma_used);
  488. ctxt->direction = DMA_TO_DEVICE;
  489. /* Determine how many of our SGE are to be transmitted */
  490. for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
  491. sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
  492. byte_count -= sge_bytes;
  493. if (!vec->frmr) {
  494. ctxt->sge[sge_no].addr =
  495. ib_dma_map_single(rdma->sc_cm_id->device,
  496. vec->sge[sge_no].iov_base,
  497. sge_bytes, DMA_TO_DEVICE);
  498. if (ib_dma_mapping_error(rdma->sc_cm_id->device,
  499. ctxt->sge[sge_no].addr))
  500. goto err;
  501. atomic_inc(&rdma->sc_dma_used);
  502. ctxt->sge[sge_no].lkey = rdma->sc_dma_lkey;
  503. } else {
  504. ctxt->sge[sge_no].addr = (unsigned long)
  505. vec->sge[sge_no].iov_base;
  506. ctxt->sge[sge_no].lkey = vec->frmr->mr->lkey;
  507. }
  508. ctxt->sge[sge_no].length = sge_bytes;
  509. }
  510. BUG_ON(byte_count != 0);
  511. /* Save all respages in the ctxt and remove them from the
  512. * respages array. They are our pages until the I/O
  513. * completes.
  514. */
  515. for (page_no = 0; page_no < rqstp->rq_resused; page_no++) {
  516. ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
  517. ctxt->count++;
  518. rqstp->rq_respages[page_no] = NULL;
  519. /*
  520. * If there are more pages than SGE, terminate SGE
  521. * list so that svc_rdma_unmap_dma doesn't attempt to
  522. * unmap garbage.
  523. */
  524. if (page_no+1 >= sge_no)
  525. ctxt->sge[page_no+1].length = 0;
  526. }
  527. BUG_ON(sge_no > rdma->sc_max_sge);
  528. memset(&send_wr, 0, sizeof send_wr);
  529. ctxt->wr_op = IB_WR_SEND;
  530. send_wr.wr_id = (unsigned long)ctxt;
  531. send_wr.sg_list = ctxt->sge;
  532. send_wr.num_sge = sge_no;
  533. send_wr.opcode = IB_WR_SEND;
  534. send_wr.send_flags = IB_SEND_SIGNALED;
  535. if (vec->frmr) {
  536. /* Prepare INVALIDATE WR */
  537. memset(&inv_wr, 0, sizeof inv_wr);
  538. inv_wr.opcode = IB_WR_LOCAL_INV;
  539. inv_wr.send_flags = IB_SEND_SIGNALED;
  540. inv_wr.ex.invalidate_rkey =
  541. vec->frmr->mr->lkey;
  542. send_wr.next = &inv_wr;
  543. }
  544. ret = svc_rdma_send(rdma, &send_wr);
  545. if (ret)
  546. goto err;
  547. return 0;
  548. err:
  549. svc_rdma_unmap_dma(ctxt);
  550. svc_rdma_put_frmr(rdma, vec->frmr);
  551. svc_rdma_put_context(ctxt, 1);
  552. return -EIO;
  553. }
  554. void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
  555. {
  556. }
  557. /*
  558. * Return the start of an xdr buffer.
  559. */
  560. static void *xdr_start(struct xdr_buf *xdr)
  561. {
  562. return xdr->head[0].iov_base -
  563. (xdr->len -
  564. xdr->page_len -
  565. xdr->tail[0].iov_len -
  566. xdr->head[0].iov_len);
  567. }
  568. int svc_rdma_sendto(struct svc_rqst *rqstp)
  569. {
  570. struct svc_xprt *xprt = rqstp->rq_xprt;
  571. struct svcxprt_rdma *rdma =
  572. container_of(xprt, struct svcxprt_rdma, sc_xprt);
  573. struct rpcrdma_msg *rdma_argp;
  574. struct rpcrdma_msg *rdma_resp;
  575. struct rpcrdma_write_array *reply_ary;
  576. enum rpcrdma_proc reply_type;
  577. int ret;
  578. int inline_bytes;
  579. struct page *res_page;
  580. struct svc_rdma_op_ctxt *ctxt;
  581. struct svc_rdma_req_map *vec;
  582. dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
  583. /* Get the RDMA request header. */
  584. rdma_argp = xdr_start(&rqstp->rq_arg);
  585. /* Build an req vec for the XDR */
  586. ctxt = svc_rdma_get_context(rdma);
  587. ctxt->direction = DMA_TO_DEVICE;
  588. vec = svc_rdma_get_req_map();
  589. ret = map_xdr(rdma, &rqstp->rq_res, vec);
  590. if (ret)
  591. goto err0;
  592. inline_bytes = rqstp->rq_res.len;
  593. /* Create the RDMA response header */
  594. res_page = svc_rdma_get_page();
  595. rdma_resp = page_address(res_page);
  596. reply_ary = svc_rdma_get_reply_array(rdma_argp);
  597. if (reply_ary)
  598. reply_type = RDMA_NOMSG;
  599. else
  600. reply_type = RDMA_MSG;
  601. svc_rdma_xdr_encode_reply_header(rdma, rdma_argp,
  602. rdma_resp, reply_type);
  603. /* Send any write-chunk data and build resp write-list */
  604. ret = send_write_chunks(rdma, rdma_argp, rdma_resp,
  605. rqstp, vec);
  606. if (ret < 0) {
  607. printk(KERN_ERR "svcrdma: failed to send write chunks, rc=%d\n",
  608. ret);
  609. goto err1;
  610. }
  611. inline_bytes -= ret;
  612. /* Send any reply-list data and update resp reply-list */
  613. ret = send_reply_chunks(rdma, rdma_argp, rdma_resp,
  614. rqstp, vec);
  615. if (ret < 0) {
  616. printk(KERN_ERR "svcrdma: failed to send reply chunks, rc=%d\n",
  617. ret);
  618. goto err1;
  619. }
  620. inline_bytes -= ret;
  621. ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec,
  622. inline_bytes);
  623. svc_rdma_put_req_map(vec);
  624. dprintk("svcrdma: send_reply returns %d\n", ret);
  625. return ret;
  626. err1:
  627. put_page(res_page);
  628. err0:
  629. svc_rdma_put_req_map(vec);
  630. svc_rdma_put_context(ctxt, 0);
  631. return ret;
  632. }