svc_rdma_transport.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355
  1. /*
  2. * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the BSD-type
  8. * license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or without
  11. * modification, are permitted provided that the following conditions
  12. * are met:
  13. *
  14. * Redistributions of source code must retain the above copyright
  15. * notice, this list of conditions and the following disclaimer.
  16. *
  17. * Redistributions in binary form must reproduce the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer in the documentation and/or other materials provided
  20. * with the distribution.
  21. *
  22. * Neither the name of the Network Appliance, Inc. nor the names of
  23. * its contributors may be used to endorse or promote products
  24. * derived from this software without specific prior written
  25. * permission.
  26. *
  27. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  28. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  29. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  30. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  31. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  32. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  33. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  34. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  35. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  36. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  37. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  38. *
  39. * Author: Tom Tucker <tom@opengridcomputing.com>
  40. */
  41. #include <linux/sunrpc/svc_xprt.h>
  42. #include <linux/sunrpc/debug.h>
  43. #include <linux/sunrpc/rpc_rdma.h>
  44. #include <linux/sched.h>
  45. #include <linux/spinlock.h>
  46. #include <rdma/ib_verbs.h>
  47. #include <rdma/rdma_cm.h>
  48. #include <linux/sunrpc/svc_rdma.h>
  49. #define RPCDBG_FACILITY RPCDBG_SVCXPRT
  50. static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
  51. struct sockaddr *sa, int salen,
  52. int flags);
  53. static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
  54. static void svc_rdma_release_rqst(struct svc_rqst *);
  55. static void dto_tasklet_func(unsigned long data);
  56. static void svc_rdma_detach(struct svc_xprt *xprt);
  57. static void svc_rdma_free(struct svc_xprt *xprt);
  58. static int svc_rdma_has_wspace(struct svc_xprt *xprt);
  59. static void rq_cq_reap(struct svcxprt_rdma *xprt);
  60. static void sq_cq_reap(struct svcxprt_rdma *xprt);
  61. static DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL);
  62. static DEFINE_SPINLOCK(dto_lock);
  63. static LIST_HEAD(dto_xprt_q);
  64. static struct svc_xprt_ops svc_rdma_ops = {
  65. .xpo_create = svc_rdma_create,
  66. .xpo_recvfrom = svc_rdma_recvfrom,
  67. .xpo_sendto = svc_rdma_sendto,
  68. .xpo_release_rqst = svc_rdma_release_rqst,
  69. .xpo_detach = svc_rdma_detach,
  70. .xpo_free = svc_rdma_free,
  71. .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
  72. .xpo_has_wspace = svc_rdma_has_wspace,
  73. .xpo_accept = svc_rdma_accept,
  74. };
  75. struct svc_xprt_class svc_rdma_class = {
  76. .xcl_name = "rdma",
  77. .xcl_owner = THIS_MODULE,
  78. .xcl_ops = &svc_rdma_ops,
  79. .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
  80. };
  81. /* WR context cache. Created in svc_rdma.c */
  82. extern struct kmem_cache *svc_rdma_ctxt_cachep;
  83. struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
  84. {
  85. struct svc_rdma_op_ctxt *ctxt;
  86. while (1) {
  87. ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, GFP_KERNEL);
  88. if (ctxt)
  89. break;
  90. schedule_timeout_uninterruptible(msecs_to_jiffies(500));
  91. }
  92. ctxt->xprt = xprt;
  93. INIT_LIST_HEAD(&ctxt->dto_q);
  94. ctxt->count = 0;
  95. ctxt->frmr = NULL;
  96. atomic_inc(&xprt->sc_ctxt_used);
  97. return ctxt;
  98. }
  99. void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
  100. {
  101. struct svcxprt_rdma *xprt = ctxt->xprt;
  102. int i;
  103. for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
  104. /*
  105. * Unmap the DMA addr in the SGE if the lkey matches
  106. * the sc_dma_lkey, otherwise, ignore it since it is
  107. * an FRMR lkey and will be unmapped later when the
  108. * last WR that uses it completes.
  109. */
  110. if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) {
  111. atomic_dec(&xprt->sc_dma_used);
  112. ib_dma_unmap_single(xprt->sc_cm_id->device,
  113. ctxt->sge[i].addr,
  114. ctxt->sge[i].length,
  115. ctxt->direction);
  116. }
  117. }
  118. }
  119. void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
  120. {
  121. struct svcxprt_rdma *xprt;
  122. int i;
  123. BUG_ON(!ctxt);
  124. xprt = ctxt->xprt;
  125. if (free_pages)
  126. for (i = 0; i < ctxt->count; i++)
  127. put_page(ctxt->pages[i]);
  128. kmem_cache_free(svc_rdma_ctxt_cachep, ctxt);
  129. atomic_dec(&xprt->sc_ctxt_used);
  130. }
  131. /* Temporary NFS request map cache. Created in svc_rdma.c */
  132. extern struct kmem_cache *svc_rdma_map_cachep;
  133. /*
  134. * Temporary NFS req mappings are shared across all transport
  135. * instances. These are short lived and should be bounded by the number
  136. * of concurrent server threads * depth of the SQ.
  137. */
  138. struct svc_rdma_req_map *svc_rdma_get_req_map(void)
  139. {
  140. struct svc_rdma_req_map *map;
  141. while (1) {
  142. map = kmem_cache_alloc(svc_rdma_map_cachep, GFP_KERNEL);
  143. if (map)
  144. break;
  145. schedule_timeout_uninterruptible(msecs_to_jiffies(500));
  146. }
  147. map->count = 0;
  148. map->frmr = NULL;
  149. return map;
  150. }
  151. void svc_rdma_put_req_map(struct svc_rdma_req_map *map)
  152. {
  153. kmem_cache_free(svc_rdma_map_cachep, map);
  154. }
  155. /* ib_cq event handler */
  156. static void cq_event_handler(struct ib_event *event, void *context)
  157. {
  158. struct svc_xprt *xprt = context;
  159. dprintk("svcrdma: received CQ event id=%d, context=%p\n",
  160. event->event, context);
  161. set_bit(XPT_CLOSE, &xprt->xpt_flags);
  162. }
  163. /* QP event handler */
  164. static void qp_event_handler(struct ib_event *event, void *context)
  165. {
  166. struct svc_xprt *xprt = context;
  167. switch (event->event) {
  168. /* These are considered benign events */
  169. case IB_EVENT_PATH_MIG:
  170. case IB_EVENT_COMM_EST:
  171. case IB_EVENT_SQ_DRAINED:
  172. case IB_EVENT_QP_LAST_WQE_REACHED:
  173. dprintk("svcrdma: QP event %d received for QP=%p\n",
  174. event->event, event->element.qp);
  175. break;
  176. /* These are considered fatal events */
  177. case IB_EVENT_PATH_MIG_ERR:
  178. case IB_EVENT_QP_FATAL:
  179. case IB_EVENT_QP_REQ_ERR:
  180. case IB_EVENT_QP_ACCESS_ERR:
  181. case IB_EVENT_DEVICE_FATAL:
  182. default:
  183. dprintk("svcrdma: QP ERROR event %d received for QP=%p, "
  184. "closing transport\n",
  185. event->event, event->element.qp);
  186. set_bit(XPT_CLOSE, &xprt->xpt_flags);
  187. break;
  188. }
  189. }
  190. /*
  191. * Data Transfer Operation Tasklet
  192. *
  193. * Walks a list of transports with I/O pending, removing entries as
  194. * they are added to the server's I/O pending list. Two bits indicate
  195. * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave
  196. * spinlock that serializes access to the transport list with the RQ
  197. * and SQ interrupt handlers.
  198. */
  199. static void dto_tasklet_func(unsigned long data)
  200. {
  201. struct svcxprt_rdma *xprt;
  202. unsigned long flags;
  203. spin_lock_irqsave(&dto_lock, flags);
  204. while (!list_empty(&dto_xprt_q)) {
  205. xprt = list_entry(dto_xprt_q.next,
  206. struct svcxprt_rdma, sc_dto_q);
  207. list_del_init(&xprt->sc_dto_q);
  208. spin_unlock_irqrestore(&dto_lock, flags);
  209. rq_cq_reap(xprt);
  210. sq_cq_reap(xprt);
  211. svc_xprt_put(&xprt->sc_xprt);
  212. spin_lock_irqsave(&dto_lock, flags);
  213. }
  214. spin_unlock_irqrestore(&dto_lock, flags);
  215. }
  216. /*
  217. * Receive Queue Completion Handler
  218. *
  219. * Since an RQ completion handler is called on interrupt context, we
  220. * need to defer the handling of the I/O to a tasklet
  221. */
  222. static void rq_comp_handler(struct ib_cq *cq, void *cq_context)
  223. {
  224. struct svcxprt_rdma *xprt = cq_context;
  225. unsigned long flags;
  226. /* Guard against unconditional flush call for destroyed QP */
  227. if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
  228. return;
  229. /*
  230. * Set the bit regardless of whether or not it's on the list
  231. * because it may be on the list already due to an SQ
  232. * completion.
  233. */
  234. set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags);
  235. /*
  236. * If this transport is not already on the DTO transport queue,
  237. * add it
  238. */
  239. spin_lock_irqsave(&dto_lock, flags);
  240. if (list_empty(&xprt->sc_dto_q)) {
  241. svc_xprt_get(&xprt->sc_xprt);
  242. list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
  243. }
  244. spin_unlock_irqrestore(&dto_lock, flags);
  245. /* Tasklet does all the work to avoid irqsave locks. */
  246. tasklet_schedule(&dto_tasklet);
  247. }
  248. /*
  249. * rq_cq_reap - Process the RQ CQ.
  250. *
  251. * Take all completing WC off the CQE and enqueue the associated DTO
  252. * context on the dto_q for the transport.
  253. *
  254. * Note that caller must hold a transport reference.
  255. */
  256. static void rq_cq_reap(struct svcxprt_rdma *xprt)
  257. {
  258. int ret;
  259. struct ib_wc wc;
  260. struct svc_rdma_op_ctxt *ctxt = NULL;
  261. if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags))
  262. return;
  263. ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
  264. atomic_inc(&rdma_stat_rq_poll);
  265. while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
  266. ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
  267. ctxt->wc_status = wc.status;
  268. ctxt->byte_len = wc.byte_len;
  269. svc_rdma_unmap_dma(ctxt);
  270. if (wc.status != IB_WC_SUCCESS) {
  271. /* Close the transport */
  272. dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt);
  273. set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
  274. svc_rdma_put_context(ctxt, 1);
  275. svc_xprt_put(&xprt->sc_xprt);
  276. continue;
  277. }
  278. spin_lock_bh(&xprt->sc_rq_dto_lock);
  279. list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q);
  280. spin_unlock_bh(&xprt->sc_rq_dto_lock);
  281. svc_xprt_put(&xprt->sc_xprt);
  282. }
  283. if (ctxt)
  284. atomic_inc(&rdma_stat_rq_prod);
  285. set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
  286. /*
  287. * If data arrived before established event,
  288. * don't enqueue. This defers RPC I/O until the
  289. * RDMA connection is complete.
  290. */
  291. if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
  292. svc_xprt_enqueue(&xprt->sc_xprt);
  293. }
  294. /*
  295. * Processs a completion context
  296. */
  297. static void process_context(struct svcxprt_rdma *xprt,
  298. struct svc_rdma_op_ctxt *ctxt)
  299. {
  300. svc_rdma_unmap_dma(ctxt);
  301. switch (ctxt->wr_op) {
  302. case IB_WR_SEND:
  303. if (test_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags))
  304. svc_rdma_put_frmr(xprt, ctxt->frmr);
  305. svc_rdma_put_context(ctxt, 1);
  306. break;
  307. case IB_WR_RDMA_WRITE:
  308. svc_rdma_put_context(ctxt, 0);
  309. break;
  310. case IB_WR_RDMA_READ:
  311. case IB_WR_RDMA_READ_WITH_INV:
  312. if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
  313. struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
  314. BUG_ON(!read_hdr);
  315. if (test_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags))
  316. svc_rdma_put_frmr(xprt, ctxt->frmr);
  317. spin_lock_bh(&xprt->sc_rq_dto_lock);
  318. set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
  319. list_add_tail(&read_hdr->dto_q,
  320. &xprt->sc_read_complete_q);
  321. spin_unlock_bh(&xprt->sc_rq_dto_lock);
  322. svc_xprt_enqueue(&xprt->sc_xprt);
  323. }
  324. svc_rdma_put_context(ctxt, 0);
  325. break;
  326. default:
  327. printk(KERN_ERR "svcrdma: unexpected completion type, "
  328. "opcode=%d\n",
  329. ctxt->wr_op);
  330. break;
  331. }
  332. }
  333. /*
  334. * Send Queue Completion Handler - potentially called on interrupt context.
  335. *
  336. * Note that caller must hold a transport reference.
  337. */
  338. static void sq_cq_reap(struct svcxprt_rdma *xprt)
  339. {
  340. struct svc_rdma_op_ctxt *ctxt = NULL;
  341. struct ib_wc wc;
  342. struct ib_cq *cq = xprt->sc_sq_cq;
  343. int ret;
  344. if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags))
  345. return;
  346. ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
  347. atomic_inc(&rdma_stat_sq_poll);
  348. while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
  349. if (wc.status != IB_WC_SUCCESS)
  350. /* Close the transport */
  351. set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
  352. /* Decrement used SQ WR count */
  353. atomic_dec(&xprt->sc_sq_count);
  354. wake_up(&xprt->sc_send_wait);
  355. ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
  356. if (ctxt)
  357. process_context(xprt, ctxt);
  358. svc_xprt_put(&xprt->sc_xprt);
  359. }
  360. if (ctxt)
  361. atomic_inc(&rdma_stat_sq_prod);
  362. }
  363. static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
  364. {
  365. struct svcxprt_rdma *xprt = cq_context;
  366. unsigned long flags;
  367. /* Guard against unconditional flush call for destroyed QP */
  368. if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
  369. return;
  370. /*
  371. * Set the bit regardless of whether or not it's on the list
  372. * because it may be on the list already due to an RQ
  373. * completion.
  374. */
  375. set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags);
  376. /*
  377. * If this transport is not already on the DTO transport queue,
  378. * add it
  379. */
  380. spin_lock_irqsave(&dto_lock, flags);
  381. if (list_empty(&xprt->sc_dto_q)) {
  382. svc_xprt_get(&xprt->sc_xprt);
  383. list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
  384. }
  385. spin_unlock_irqrestore(&dto_lock, flags);
  386. /* Tasklet does all the work to avoid irqsave locks. */
  387. tasklet_schedule(&dto_tasklet);
  388. }
  389. static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
  390. int listener)
  391. {
  392. struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
  393. if (!cma_xprt)
  394. return NULL;
  395. svc_xprt_init(&svc_rdma_class, &cma_xprt->sc_xprt, serv);
  396. INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
  397. INIT_LIST_HEAD(&cma_xprt->sc_dto_q);
  398. INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
  399. INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
  400. INIT_LIST_HEAD(&cma_xprt->sc_frmr_q);
  401. init_waitqueue_head(&cma_xprt->sc_send_wait);
  402. spin_lock_init(&cma_xprt->sc_lock);
  403. spin_lock_init(&cma_xprt->sc_rq_dto_lock);
  404. spin_lock_init(&cma_xprt->sc_frmr_q_lock);
  405. cma_xprt->sc_ord = svcrdma_ord;
  406. cma_xprt->sc_max_req_size = svcrdma_max_req_size;
  407. cma_xprt->sc_max_requests = svcrdma_max_requests;
  408. cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT;
  409. atomic_set(&cma_xprt->sc_sq_count, 0);
  410. atomic_set(&cma_xprt->sc_ctxt_used, 0);
  411. if (listener)
  412. set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
  413. return cma_xprt;
  414. }
  415. struct page *svc_rdma_get_page(void)
  416. {
  417. struct page *page;
  418. while ((page = alloc_page(GFP_KERNEL)) == NULL) {
  419. /* If we can't get memory, wait a bit and try again */
  420. printk(KERN_INFO "svcrdma: out of memory...retrying in 1000 "
  421. "jiffies.\n");
  422. schedule_timeout_uninterruptible(msecs_to_jiffies(1000));
  423. }
  424. return page;
  425. }
  426. int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
  427. {
  428. struct ib_recv_wr recv_wr, *bad_recv_wr;
  429. struct svc_rdma_op_ctxt *ctxt;
  430. struct page *page;
  431. dma_addr_t pa;
  432. int sge_no;
  433. int buflen;
  434. int ret;
  435. ctxt = svc_rdma_get_context(xprt);
  436. buflen = 0;
  437. ctxt->direction = DMA_FROM_DEVICE;
  438. for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
  439. BUG_ON(sge_no >= xprt->sc_max_sge);
  440. page = svc_rdma_get_page();
  441. ctxt->pages[sge_no] = page;
  442. pa = ib_dma_map_single(xprt->sc_cm_id->device,
  443. page_address(page), PAGE_SIZE,
  444. DMA_FROM_DEVICE);
  445. if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
  446. goto err_put_ctxt;
  447. atomic_inc(&xprt->sc_dma_used);
  448. ctxt->sge[sge_no].addr = pa;
  449. ctxt->sge[sge_no].length = PAGE_SIZE;
  450. ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey;
  451. buflen += PAGE_SIZE;
  452. }
  453. ctxt->count = sge_no;
  454. recv_wr.next = NULL;
  455. recv_wr.sg_list = &ctxt->sge[0];
  456. recv_wr.num_sge = ctxt->count;
  457. recv_wr.wr_id = (u64)(unsigned long)ctxt;
  458. svc_xprt_get(&xprt->sc_xprt);
  459. ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
  460. if (ret) {
  461. svc_rdma_unmap_dma(ctxt);
  462. svc_rdma_put_context(ctxt, 1);
  463. svc_xprt_put(&xprt->sc_xprt);
  464. }
  465. return ret;
  466. err_put_ctxt:
  467. svc_rdma_put_context(ctxt, 1);
  468. return -ENOMEM;
  469. }
  470. /*
  471. * This function handles the CONNECT_REQUEST event on a listening
  472. * endpoint. It is passed the cma_id for the _new_ connection. The context in
  473. * this cma_id is inherited from the listening cma_id and is the svc_xprt
  474. * structure for the listening endpoint.
  475. *
  476. * This function creates a new xprt for the new connection and enqueues it on
  477. * the accept queue for the listent xprt. When the listen thread is kicked, it
  478. * will call the recvfrom method on the listen xprt which will accept the new
  479. * connection.
  480. */
  481. static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird)
  482. {
  483. struct svcxprt_rdma *listen_xprt = new_cma_id->context;
  484. struct svcxprt_rdma *newxprt;
  485. struct sockaddr *sa;
  486. /* Create a new transport */
  487. newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0);
  488. if (!newxprt) {
  489. dprintk("svcrdma: failed to create new transport\n");
  490. return;
  491. }
  492. newxprt->sc_cm_id = new_cma_id;
  493. new_cma_id->context = newxprt;
  494. dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
  495. newxprt, newxprt->sc_cm_id, listen_xprt);
  496. /* Save client advertised inbound read limit for use later in accept. */
  497. newxprt->sc_ord = client_ird;
  498. /* Set the local and remote addresses in the transport */
  499. sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
  500. svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
  501. sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
  502. svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
  503. /*
  504. * Enqueue the new transport on the accept queue of the listening
  505. * transport
  506. */
  507. spin_lock_bh(&listen_xprt->sc_lock);
  508. list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
  509. spin_unlock_bh(&listen_xprt->sc_lock);
  510. /*
  511. * Can't use svc_xprt_received here because we are not on a
  512. * rqstp thread
  513. */
  514. set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
  515. svc_xprt_enqueue(&listen_xprt->sc_xprt);
  516. }
  517. /*
  518. * Handles events generated on the listening endpoint. These events will be
  519. * either be incoming connect requests or adapter removal events.
  520. */
  521. static int rdma_listen_handler(struct rdma_cm_id *cma_id,
  522. struct rdma_cm_event *event)
  523. {
  524. struct svcxprt_rdma *xprt = cma_id->context;
  525. int ret = 0;
  526. switch (event->event) {
  527. case RDMA_CM_EVENT_CONNECT_REQUEST:
  528. dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
  529. "event=%d\n", cma_id, cma_id->context, event->event);
  530. handle_connect_req(cma_id,
  531. event->param.conn.initiator_depth);
  532. break;
  533. case RDMA_CM_EVENT_ESTABLISHED:
  534. /* Accept complete */
  535. dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
  536. "cm_id=%p\n", xprt, cma_id);
  537. break;
  538. case RDMA_CM_EVENT_DEVICE_REMOVAL:
  539. dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
  540. xprt, cma_id);
  541. if (xprt)
  542. set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
  543. break;
  544. default:
  545. dprintk("svcrdma: Unexpected event on listening endpoint %p, "
  546. "event=%d\n", cma_id, event->event);
  547. break;
  548. }
  549. return ret;
  550. }
  551. static int rdma_cma_handler(struct rdma_cm_id *cma_id,
  552. struct rdma_cm_event *event)
  553. {
  554. struct svc_xprt *xprt = cma_id->context;
  555. struct svcxprt_rdma *rdma =
  556. container_of(xprt, struct svcxprt_rdma, sc_xprt);
  557. switch (event->event) {
  558. case RDMA_CM_EVENT_ESTABLISHED:
  559. /* Accept complete */
  560. svc_xprt_get(xprt);
  561. dprintk("svcrdma: Connection completed on DTO xprt=%p, "
  562. "cm_id=%p\n", xprt, cma_id);
  563. clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
  564. svc_xprt_enqueue(xprt);
  565. break;
  566. case RDMA_CM_EVENT_DISCONNECTED:
  567. dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
  568. xprt, cma_id);
  569. if (xprt) {
  570. set_bit(XPT_CLOSE, &xprt->xpt_flags);
  571. svc_xprt_enqueue(xprt);
  572. svc_xprt_put(xprt);
  573. }
  574. break;
  575. case RDMA_CM_EVENT_DEVICE_REMOVAL:
  576. dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
  577. "event=%d\n", cma_id, xprt, event->event);
  578. if (xprt) {
  579. set_bit(XPT_CLOSE, &xprt->xpt_flags);
  580. svc_xprt_enqueue(xprt);
  581. }
  582. break;
  583. default:
  584. dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
  585. "event=%d\n", cma_id, event->event);
  586. break;
  587. }
  588. return 0;
  589. }
  590. /*
  591. * Create a listening RDMA service endpoint.
  592. */
  593. static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
  594. struct sockaddr *sa, int salen,
  595. int flags)
  596. {
  597. struct rdma_cm_id *listen_id;
  598. struct svcxprt_rdma *cma_xprt;
  599. struct svc_xprt *xprt;
  600. int ret;
  601. dprintk("svcrdma: Creating RDMA socket\n");
  602. if (sa->sa_family != AF_INET) {
  603. dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
  604. return ERR_PTR(-EAFNOSUPPORT);
  605. }
  606. cma_xprt = rdma_create_xprt(serv, 1);
  607. if (!cma_xprt)
  608. return ERR_PTR(-ENOMEM);
  609. xprt = &cma_xprt->sc_xprt;
  610. listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP);
  611. if (IS_ERR(listen_id)) {
  612. ret = PTR_ERR(listen_id);
  613. dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
  614. goto err0;
  615. }
  616. ret = rdma_bind_addr(listen_id, sa);
  617. if (ret) {
  618. dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
  619. goto err1;
  620. }
  621. cma_xprt->sc_cm_id = listen_id;
  622. ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
  623. if (ret) {
  624. dprintk("svcrdma: rdma_listen failed = %d\n", ret);
  625. goto err1;
  626. }
  627. /*
  628. * We need to use the address from the cm_id in case the
  629. * caller specified 0 for the port number.
  630. */
  631. sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
  632. svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
  633. return &cma_xprt->sc_xprt;
  634. err1:
  635. rdma_destroy_id(listen_id);
  636. err0:
  637. kfree(cma_xprt);
  638. return ERR_PTR(ret);
  639. }
  640. static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt)
  641. {
  642. struct ib_mr *mr;
  643. struct ib_fast_reg_page_list *pl;
  644. struct svc_rdma_fastreg_mr *frmr;
  645. frmr = kmalloc(sizeof(*frmr), GFP_KERNEL);
  646. if (!frmr)
  647. goto err;
  648. mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES);
  649. if (IS_ERR(mr))
  650. goto err_free_frmr;
  651. pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device,
  652. RPCSVC_MAXPAGES);
  653. if (IS_ERR(pl))
  654. goto err_free_mr;
  655. frmr->mr = mr;
  656. frmr->page_list = pl;
  657. INIT_LIST_HEAD(&frmr->frmr_list);
  658. return frmr;
  659. err_free_mr:
  660. ib_dereg_mr(mr);
  661. err_free_frmr:
  662. kfree(frmr);
  663. err:
  664. return ERR_PTR(-ENOMEM);
  665. }
  666. static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt)
  667. {
  668. struct svc_rdma_fastreg_mr *frmr;
  669. while (!list_empty(&xprt->sc_frmr_q)) {
  670. frmr = list_entry(xprt->sc_frmr_q.next,
  671. struct svc_rdma_fastreg_mr, frmr_list);
  672. list_del_init(&frmr->frmr_list);
  673. ib_dereg_mr(frmr->mr);
  674. ib_free_fast_reg_page_list(frmr->page_list);
  675. kfree(frmr);
  676. }
  677. }
  678. struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma)
  679. {
  680. struct svc_rdma_fastreg_mr *frmr = NULL;
  681. spin_lock_bh(&rdma->sc_frmr_q_lock);
  682. if (!list_empty(&rdma->sc_frmr_q)) {
  683. frmr = list_entry(rdma->sc_frmr_q.next,
  684. struct svc_rdma_fastreg_mr, frmr_list);
  685. list_del_init(&frmr->frmr_list);
  686. frmr->map_len = 0;
  687. frmr->page_list_len = 0;
  688. }
  689. spin_unlock_bh(&rdma->sc_frmr_q_lock);
  690. if (frmr)
  691. return frmr;
  692. return rdma_alloc_frmr(rdma);
  693. }
  694. static void frmr_unmap_dma(struct svcxprt_rdma *xprt,
  695. struct svc_rdma_fastreg_mr *frmr)
  696. {
  697. int page_no;
  698. for (page_no = 0; page_no < frmr->page_list_len; page_no++) {
  699. dma_addr_t addr = frmr->page_list->page_list[page_no];
  700. if (ib_dma_mapping_error(frmr->mr->device, addr))
  701. continue;
  702. atomic_dec(&xprt->sc_dma_used);
  703. ib_dma_unmap_single(frmr->mr->device, addr, PAGE_SIZE,
  704. frmr->direction);
  705. }
  706. }
  707. void svc_rdma_put_frmr(struct svcxprt_rdma *rdma,
  708. struct svc_rdma_fastreg_mr *frmr)
  709. {
  710. if (frmr) {
  711. frmr_unmap_dma(rdma, frmr);
  712. spin_lock_bh(&rdma->sc_frmr_q_lock);
  713. BUG_ON(!list_empty(&frmr->frmr_list));
  714. list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
  715. spin_unlock_bh(&rdma->sc_frmr_q_lock);
  716. }
  717. }
  718. /*
  719. * This is the xpo_recvfrom function for listening endpoints. Its
  720. * purpose is to accept incoming connections. The CMA callback handler
  721. * has already created a new transport and attached it to the new CMA
  722. * ID.
  723. *
  724. * There is a queue of pending connections hung on the listening
  725. * transport. This queue contains the new svc_xprt structure. This
  726. * function takes svc_xprt structures off the accept_q and completes
  727. * the connection.
  728. */
  729. static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
  730. {
  731. struct svcxprt_rdma *listen_rdma;
  732. struct svcxprt_rdma *newxprt = NULL;
  733. struct rdma_conn_param conn_param;
  734. struct ib_qp_init_attr qp_attr;
  735. struct ib_device_attr devattr;
  736. int uninitialized_var(dma_mr_acc);
  737. int need_dma_mr;
  738. int ret;
  739. int i;
  740. listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
  741. clear_bit(XPT_CONN, &xprt->xpt_flags);
  742. /* Get the next entry off the accept list */
  743. spin_lock_bh(&listen_rdma->sc_lock);
  744. if (!list_empty(&listen_rdma->sc_accept_q)) {
  745. newxprt = list_entry(listen_rdma->sc_accept_q.next,
  746. struct svcxprt_rdma, sc_accept_q);
  747. list_del_init(&newxprt->sc_accept_q);
  748. }
  749. if (!list_empty(&listen_rdma->sc_accept_q))
  750. set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
  751. spin_unlock_bh(&listen_rdma->sc_lock);
  752. if (!newxprt)
  753. return NULL;
  754. dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
  755. newxprt, newxprt->sc_cm_id);
  756. ret = ib_query_device(newxprt->sc_cm_id->device, &devattr);
  757. if (ret) {
  758. dprintk("svcrdma: could not query device attributes on "
  759. "device %p, rc=%d\n", newxprt->sc_cm_id->device, ret);
  760. goto errout;
  761. }
  762. /* Qualify the transport resource defaults with the
  763. * capabilities of this particular device */
  764. newxprt->sc_max_sge = min((size_t)devattr.max_sge,
  765. (size_t)RPCSVC_MAXPAGES);
  766. newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr,
  767. (size_t)svcrdma_max_requests);
  768. newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests;
  769. /*
  770. * Limit ORD based on client limit, local device limit, and
  771. * configured svcrdma limit.
  772. */
  773. newxprt->sc_ord = min_t(size_t, devattr.max_qp_rd_atom, newxprt->sc_ord);
  774. newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord);
  775. newxprt->sc_pd = ib_alloc_pd(newxprt->sc_cm_id->device);
  776. if (IS_ERR(newxprt->sc_pd)) {
  777. dprintk("svcrdma: error creating PD for connect request\n");
  778. goto errout;
  779. }
  780. newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device,
  781. sq_comp_handler,
  782. cq_event_handler,
  783. newxprt,
  784. newxprt->sc_sq_depth,
  785. 0);
  786. if (IS_ERR(newxprt->sc_sq_cq)) {
  787. dprintk("svcrdma: error creating SQ CQ for connect request\n");
  788. goto errout;
  789. }
  790. newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device,
  791. rq_comp_handler,
  792. cq_event_handler,
  793. newxprt,
  794. newxprt->sc_max_requests,
  795. 0);
  796. if (IS_ERR(newxprt->sc_rq_cq)) {
  797. dprintk("svcrdma: error creating RQ CQ for connect request\n");
  798. goto errout;
  799. }
  800. memset(&qp_attr, 0, sizeof qp_attr);
  801. qp_attr.event_handler = qp_event_handler;
  802. qp_attr.qp_context = &newxprt->sc_xprt;
  803. qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
  804. qp_attr.cap.max_recv_wr = newxprt->sc_max_requests;
  805. qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
  806. qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
  807. qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
  808. qp_attr.qp_type = IB_QPT_RC;
  809. qp_attr.send_cq = newxprt->sc_sq_cq;
  810. qp_attr.recv_cq = newxprt->sc_rq_cq;
  811. dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n"
  812. " cm_id->device=%p, sc_pd->device=%p\n"
  813. " cap.max_send_wr = %d\n"
  814. " cap.max_recv_wr = %d\n"
  815. " cap.max_send_sge = %d\n"
  816. " cap.max_recv_sge = %d\n",
  817. newxprt->sc_cm_id, newxprt->sc_pd,
  818. newxprt->sc_cm_id->device, newxprt->sc_pd->device,
  819. qp_attr.cap.max_send_wr,
  820. qp_attr.cap.max_recv_wr,
  821. qp_attr.cap.max_send_sge,
  822. qp_attr.cap.max_recv_sge);
  823. ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
  824. if (ret) {
  825. /*
  826. * XXX: This is a hack. We need a xx_request_qp interface
  827. * that will adjust the qp_attr's with a best-effort
  828. * number
  829. */
  830. qp_attr.cap.max_send_sge -= 2;
  831. qp_attr.cap.max_recv_sge -= 2;
  832. ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd,
  833. &qp_attr);
  834. if (ret) {
  835. dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
  836. goto errout;
  837. }
  838. newxprt->sc_max_sge = qp_attr.cap.max_send_sge;
  839. newxprt->sc_max_sge = qp_attr.cap.max_recv_sge;
  840. newxprt->sc_sq_depth = qp_attr.cap.max_send_wr;
  841. newxprt->sc_max_requests = qp_attr.cap.max_recv_wr;
  842. }
  843. newxprt->sc_qp = newxprt->sc_cm_id->qp;
  844. /*
  845. * Use the most secure set of MR resources based on the
  846. * transport type and available memory management features in
  847. * the device. Here's the table implemented below:
  848. *
  849. * Fast Global DMA Remote WR
  850. * Reg LKEY MR Access
  851. * Sup'd Sup'd Needed Needed
  852. *
  853. * IWARP N N Y Y
  854. * N Y Y Y
  855. * Y N Y N
  856. * Y Y N -
  857. *
  858. * IB N N Y N
  859. * N Y N -
  860. * Y N Y N
  861. * Y Y N -
  862. *
  863. * NB: iWARP requires remote write access for the data sink
  864. * of an RDMA_READ. IB does not.
  865. */
  866. if (devattr.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
  867. newxprt->sc_frmr_pg_list_len =
  868. devattr.max_fast_reg_page_list_len;
  869. newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG;
  870. }
  871. /*
  872. * Determine if a DMA MR is required and if so, what privs are required
  873. */
  874. switch (rdma_node_get_transport(newxprt->sc_cm_id->device->node_type)) {
  875. case RDMA_TRANSPORT_IWARP:
  876. newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
  877. if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG)) {
  878. need_dma_mr = 1;
  879. dma_mr_acc =
  880. (IB_ACCESS_LOCAL_WRITE |
  881. IB_ACCESS_REMOTE_WRITE);
  882. } else if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) {
  883. need_dma_mr = 1;
  884. dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
  885. } else
  886. need_dma_mr = 0;
  887. break;
  888. case RDMA_TRANSPORT_IB:
  889. if (!(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) {
  890. need_dma_mr = 1;
  891. dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
  892. } else
  893. need_dma_mr = 0;
  894. break;
  895. default:
  896. goto errout;
  897. }
  898. /* Create the DMA MR if needed, otherwise, use the DMA LKEY */
  899. if (need_dma_mr) {
  900. /* Register all of physical memory */
  901. newxprt->sc_phys_mr =
  902. ib_get_dma_mr(newxprt->sc_pd, dma_mr_acc);
  903. if (IS_ERR(newxprt->sc_phys_mr)) {
  904. dprintk("svcrdma: Failed to create DMA MR ret=%d\n",
  905. ret);
  906. goto errout;
  907. }
  908. newxprt->sc_dma_lkey = newxprt->sc_phys_mr->lkey;
  909. } else
  910. newxprt->sc_dma_lkey =
  911. newxprt->sc_cm_id->device->local_dma_lkey;
  912. /* Post receive buffers */
  913. for (i = 0; i < newxprt->sc_max_requests; i++) {
  914. ret = svc_rdma_post_recv(newxprt);
  915. if (ret) {
  916. dprintk("svcrdma: failure posting receive buffers\n");
  917. goto errout;
  918. }
  919. }
  920. /* Swap out the handler */
  921. newxprt->sc_cm_id->event_handler = rdma_cma_handler;
  922. /*
  923. * Arm the CQs for the SQ and RQ before accepting so we can't
  924. * miss the first message
  925. */
  926. ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP);
  927. ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP);
  928. /* Accept Connection */
  929. set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
  930. memset(&conn_param, 0, sizeof conn_param);
  931. conn_param.responder_resources = 0;
  932. conn_param.initiator_depth = newxprt->sc_ord;
  933. ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
  934. if (ret) {
  935. dprintk("svcrdma: failed to accept new connection, ret=%d\n",
  936. ret);
  937. goto errout;
  938. }
  939. dprintk("svcrdma: new connection %p accepted with the following "
  940. "attributes:\n"
  941. " local_ip : %pI4\n"
  942. " local_port : %d\n"
  943. " remote_ip : %pI4\n"
  944. " remote_port : %d\n"
  945. " max_sge : %d\n"
  946. " sq_depth : %d\n"
  947. " max_requests : %d\n"
  948. " ord : %d\n",
  949. newxprt,
  950. &((struct sockaddr_in *)&newxprt->sc_cm_id->
  951. route.addr.src_addr)->sin_addr.s_addr,
  952. ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
  953. route.addr.src_addr)->sin_port),
  954. &((struct sockaddr_in *)&newxprt->sc_cm_id->
  955. route.addr.dst_addr)->sin_addr.s_addr,
  956. ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
  957. route.addr.dst_addr)->sin_port),
  958. newxprt->sc_max_sge,
  959. newxprt->sc_sq_depth,
  960. newxprt->sc_max_requests,
  961. newxprt->sc_ord);
  962. return &newxprt->sc_xprt;
  963. errout:
  964. dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
  965. /* Take a reference in case the DTO handler runs */
  966. svc_xprt_get(&newxprt->sc_xprt);
  967. if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
  968. ib_destroy_qp(newxprt->sc_qp);
  969. rdma_destroy_id(newxprt->sc_cm_id);
  970. /* This call to put will destroy the transport */
  971. svc_xprt_put(&newxprt->sc_xprt);
  972. return NULL;
  973. }
  974. static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
  975. {
  976. }
  977. /*
  978. * When connected, an svc_xprt has at least two references:
  979. *
  980. * - A reference held by the cm_id between the ESTABLISHED and
  981. * DISCONNECTED events. If the remote peer disconnected first, this
  982. * reference could be gone.
  983. *
  984. * - A reference held by the svc_recv code that called this function
  985. * as part of close processing.
  986. *
  987. * At a minimum one references should still be held.
  988. */
  989. static void svc_rdma_detach(struct svc_xprt *xprt)
  990. {
  991. struct svcxprt_rdma *rdma =
  992. container_of(xprt, struct svcxprt_rdma, sc_xprt);
  993. dprintk("svc: svc_rdma_detach(%p)\n", xprt);
  994. /* Disconnect and flush posted WQE */
  995. rdma_disconnect(rdma->sc_cm_id);
  996. }
  997. static void __svc_rdma_free(struct work_struct *work)
  998. {
  999. struct svcxprt_rdma *rdma =
  1000. container_of(work, struct svcxprt_rdma, sc_work);
  1001. dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
  1002. /* We should only be called from kref_put */
  1003. BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0);
  1004. /*
  1005. * Destroy queued, but not processed read completions. Note
  1006. * that this cleanup has to be done before destroying the
  1007. * cm_id because the device ptr is needed to unmap the dma in
  1008. * svc_rdma_put_context.
  1009. */
  1010. while (!list_empty(&rdma->sc_read_complete_q)) {
  1011. struct svc_rdma_op_ctxt *ctxt;
  1012. ctxt = list_entry(rdma->sc_read_complete_q.next,
  1013. struct svc_rdma_op_ctxt,
  1014. dto_q);
  1015. list_del_init(&ctxt->dto_q);
  1016. svc_rdma_put_context(ctxt, 1);
  1017. }
  1018. /* Destroy queued, but not processed recv completions */
  1019. while (!list_empty(&rdma->sc_rq_dto_q)) {
  1020. struct svc_rdma_op_ctxt *ctxt;
  1021. ctxt = list_entry(rdma->sc_rq_dto_q.next,
  1022. struct svc_rdma_op_ctxt,
  1023. dto_q);
  1024. list_del_init(&ctxt->dto_q);
  1025. svc_rdma_put_context(ctxt, 1);
  1026. }
  1027. /* Warn if we leaked a resource or under-referenced */
  1028. WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0);
  1029. WARN_ON(atomic_read(&rdma->sc_dma_used) != 0);
  1030. /* De-allocate fastreg mr */
  1031. rdma_dealloc_frmr_q(rdma);
  1032. /* Destroy the QP if present (not a listener) */
  1033. if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
  1034. ib_destroy_qp(rdma->sc_qp);
  1035. if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
  1036. ib_destroy_cq(rdma->sc_sq_cq);
  1037. if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
  1038. ib_destroy_cq(rdma->sc_rq_cq);
  1039. if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr))
  1040. ib_dereg_mr(rdma->sc_phys_mr);
  1041. if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
  1042. ib_dealloc_pd(rdma->sc_pd);
  1043. /* Destroy the CM ID */
  1044. rdma_destroy_id(rdma->sc_cm_id);
  1045. kfree(rdma);
  1046. }
  1047. static void svc_rdma_free(struct svc_xprt *xprt)
  1048. {
  1049. struct svcxprt_rdma *rdma =
  1050. container_of(xprt, struct svcxprt_rdma, sc_xprt);
  1051. INIT_WORK(&rdma->sc_work, __svc_rdma_free);
  1052. schedule_work(&rdma->sc_work);
  1053. }
  1054. static int svc_rdma_has_wspace(struct svc_xprt *xprt)
  1055. {
  1056. struct svcxprt_rdma *rdma =
  1057. container_of(xprt, struct svcxprt_rdma, sc_xprt);
  1058. /*
  1059. * If there are fewer SQ WR available than required to send a
  1060. * simple response, return false.
  1061. */
  1062. if ((rdma->sc_sq_depth - atomic_read(&rdma->sc_sq_count) < 3))
  1063. return 0;
  1064. /*
  1065. * ...or there are already waiters on the SQ,
  1066. * return false.
  1067. */
  1068. if (waitqueue_active(&rdma->sc_send_wait))
  1069. return 0;
  1070. /* Otherwise return true. */
  1071. return 1;
  1072. }
  1073. /*
  1074. * Attempt to register the kvec representing the RPC memory with the
  1075. * device.
  1076. *
  1077. * Returns:
  1078. * NULL : The device does not support fastreg or there were no more
  1079. * fastreg mr.
  1080. * frmr : The kvec register request was successfully posted.
  1081. * <0 : An error was encountered attempting to register the kvec.
  1082. */
  1083. int svc_rdma_fastreg(struct svcxprt_rdma *xprt,
  1084. struct svc_rdma_fastreg_mr *frmr)
  1085. {
  1086. struct ib_send_wr fastreg_wr;
  1087. u8 key;
  1088. /* Bump the key */
  1089. key = (u8)(frmr->mr->lkey & 0x000000FF);
  1090. ib_update_fast_reg_key(frmr->mr, ++key);
  1091. /* Prepare FASTREG WR */
  1092. memset(&fastreg_wr, 0, sizeof fastreg_wr);
  1093. fastreg_wr.opcode = IB_WR_FAST_REG_MR;
  1094. fastreg_wr.send_flags = IB_SEND_SIGNALED;
  1095. fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva;
  1096. fastreg_wr.wr.fast_reg.page_list = frmr->page_list;
  1097. fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len;
  1098. fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
  1099. fastreg_wr.wr.fast_reg.length = frmr->map_len;
  1100. fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags;
  1101. fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey;
  1102. return svc_rdma_send(xprt, &fastreg_wr);
  1103. }
  1104. int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
  1105. {
  1106. struct ib_send_wr *bad_wr, *n_wr;
  1107. int wr_count;
  1108. int i;
  1109. int ret;
  1110. if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
  1111. return -ENOTCONN;
  1112. BUG_ON(wr->send_flags != IB_SEND_SIGNALED);
  1113. wr_count = 1;
  1114. for (n_wr = wr->next; n_wr; n_wr = n_wr->next)
  1115. wr_count++;
  1116. /* If the SQ is full, wait until an SQ entry is available */
  1117. while (1) {
  1118. spin_lock_bh(&xprt->sc_lock);
  1119. if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
  1120. spin_unlock_bh(&xprt->sc_lock);
  1121. atomic_inc(&rdma_stat_sq_starve);
  1122. /* See if we can opportunistically reap SQ WR to make room */
  1123. sq_cq_reap(xprt);
  1124. /* Wait until SQ WR available if SQ still full */
  1125. wait_event(xprt->sc_send_wait,
  1126. atomic_read(&xprt->sc_sq_count) <
  1127. xprt->sc_sq_depth);
  1128. if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
  1129. return 0;
  1130. continue;
  1131. }
  1132. /* Take a transport ref for each WR posted */
  1133. for (i = 0; i < wr_count; i++)
  1134. svc_xprt_get(&xprt->sc_xprt);
  1135. /* Bump used SQ WR count and post */
  1136. atomic_add(wr_count, &xprt->sc_sq_count);
  1137. ret = ib_post_send(xprt->sc_qp, wr, &bad_wr);
  1138. if (ret) {
  1139. set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
  1140. atomic_sub(wr_count, &xprt->sc_sq_count);
  1141. for (i = 0; i < wr_count; i ++)
  1142. svc_xprt_put(&xprt->sc_xprt);
  1143. dprintk("svcrdma: failed to post SQ WR rc=%d, "
  1144. "sc_sq_count=%d, sc_sq_depth=%d\n",
  1145. ret, atomic_read(&xprt->sc_sq_count),
  1146. xprt->sc_sq_depth);
  1147. }
  1148. spin_unlock_bh(&xprt->sc_lock);
  1149. if (ret)
  1150. wake_up(&xprt->sc_send_wait);
  1151. break;
  1152. }
  1153. return ret;
  1154. }
  1155. void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
  1156. enum rpcrdma_errcode err)
  1157. {
  1158. struct ib_send_wr err_wr;
  1159. struct ib_sge sge;
  1160. struct page *p;
  1161. struct svc_rdma_op_ctxt *ctxt;
  1162. u32 *va;
  1163. int length;
  1164. int ret;
  1165. p = svc_rdma_get_page();
  1166. va = page_address(p);
  1167. /* XDR encode error */
  1168. length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
  1169. /* Prepare SGE for local address */
  1170. sge.addr = ib_dma_map_single(xprt->sc_cm_id->device,
  1171. page_address(p), PAGE_SIZE, DMA_FROM_DEVICE);
  1172. if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) {
  1173. put_page(p);
  1174. return;
  1175. }
  1176. atomic_inc(&xprt->sc_dma_used);
  1177. sge.lkey = xprt->sc_dma_lkey;
  1178. sge.length = length;
  1179. ctxt = svc_rdma_get_context(xprt);
  1180. ctxt->count = 1;
  1181. ctxt->pages[0] = p;
  1182. /* Prepare SEND WR */
  1183. memset(&err_wr, 0, sizeof err_wr);
  1184. ctxt->wr_op = IB_WR_SEND;
  1185. err_wr.wr_id = (unsigned long)ctxt;
  1186. err_wr.sg_list = &sge;
  1187. err_wr.num_sge = 1;
  1188. err_wr.opcode = IB_WR_SEND;
  1189. err_wr.send_flags = IB_SEND_SIGNALED;
  1190. /* Post It */
  1191. ret = svc_rdma_send(xprt, &err_wr);
  1192. if (ret) {
  1193. dprintk("svcrdma: Error %d posting send for protocol error\n",
  1194. ret);
  1195. ib_dma_unmap_single(xprt->sc_cm_id->device,
  1196. sge.addr, PAGE_SIZE,
  1197. DMA_FROM_DEVICE);
  1198. svc_rdma_put_context(ctxt, 1);
  1199. }
  1200. }