ib_cm.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833
  1. /*
  2. * Copyright (c) 2006 Oracle. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/kernel.h>
  34. #include <linux/in.h>
  35. #include <linux/slab.h>
  36. #include <linux/vmalloc.h>
  37. #include "rds.h"
  38. #include "ib.h"
  39. static char *rds_ib_event_type_strings[] = {
  40. #define RDS_IB_EVENT_STRING(foo) \
  41. [IB_EVENT_##foo] = __stringify(IB_EVENT_##foo)
  42. RDS_IB_EVENT_STRING(CQ_ERR),
  43. RDS_IB_EVENT_STRING(QP_FATAL),
  44. RDS_IB_EVENT_STRING(QP_REQ_ERR),
  45. RDS_IB_EVENT_STRING(QP_ACCESS_ERR),
  46. RDS_IB_EVENT_STRING(COMM_EST),
  47. RDS_IB_EVENT_STRING(SQ_DRAINED),
  48. RDS_IB_EVENT_STRING(PATH_MIG),
  49. RDS_IB_EVENT_STRING(PATH_MIG_ERR),
  50. RDS_IB_EVENT_STRING(DEVICE_FATAL),
  51. RDS_IB_EVENT_STRING(PORT_ACTIVE),
  52. RDS_IB_EVENT_STRING(PORT_ERR),
  53. RDS_IB_EVENT_STRING(LID_CHANGE),
  54. RDS_IB_EVENT_STRING(PKEY_CHANGE),
  55. RDS_IB_EVENT_STRING(SM_CHANGE),
  56. RDS_IB_EVENT_STRING(SRQ_ERR),
  57. RDS_IB_EVENT_STRING(SRQ_LIMIT_REACHED),
  58. RDS_IB_EVENT_STRING(QP_LAST_WQE_REACHED),
  59. RDS_IB_EVENT_STRING(CLIENT_REREGISTER),
  60. #undef RDS_IB_EVENT_STRING
  61. };
  62. static char *rds_ib_event_str(enum ib_event_type type)
  63. {
  64. return rds_str_array(rds_ib_event_type_strings,
  65. ARRAY_SIZE(rds_ib_event_type_strings), type);
  66. };
  67. /*
  68. * Set the selected protocol version
  69. */
  70. static void rds_ib_set_protocol(struct rds_connection *conn, unsigned int version)
  71. {
  72. conn->c_version = version;
  73. }
  74. /*
  75. * Set up flow control
  76. */
  77. static void rds_ib_set_flow_control(struct rds_connection *conn, u32 credits)
  78. {
  79. struct rds_ib_connection *ic = conn->c_transport_data;
  80. if (rds_ib_sysctl_flow_control && credits != 0) {
  81. /* We're doing flow control */
  82. ic->i_flowctl = 1;
  83. rds_ib_send_add_credits(conn, credits);
  84. } else {
  85. ic->i_flowctl = 0;
  86. }
  87. }
  88. /*
  89. * Tune RNR behavior. Without flow control, we use a rather
  90. * low timeout, but not the absolute minimum - this should
  91. * be tunable.
  92. *
  93. * We already set the RNR retry count to 7 (which is the
  94. * smallest infinite number :-) above.
  95. * If flow control is off, we want to change this back to 0
  96. * so that we learn quickly when our credit accounting is
  97. * buggy.
  98. *
  99. * Caller passes in a qp_attr pointer - don't waste stack spacv
  100. * by allocation this twice.
  101. */
  102. static void
  103. rds_ib_tune_rnr(struct rds_ib_connection *ic, struct ib_qp_attr *attr)
  104. {
  105. int ret;
  106. attr->min_rnr_timer = IB_RNR_TIMER_000_32;
  107. ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER);
  108. if (ret)
  109. printk(KERN_NOTICE "ib_modify_qp(IB_QP_MIN_RNR_TIMER): err=%d\n", -ret);
  110. }
  111. /*
  112. * Connection established.
  113. * We get here for both outgoing and incoming connection.
  114. */
  115. void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event)
  116. {
  117. const struct rds_ib_connect_private *dp = NULL;
  118. struct rds_ib_connection *ic = conn->c_transport_data;
  119. struct ib_qp_attr qp_attr;
  120. int err;
  121. if (event->param.conn.private_data_len >= sizeof(*dp)) {
  122. dp = event->param.conn.private_data;
  123. /* make sure it isn't empty data */
  124. if (dp->dp_protocol_major) {
  125. rds_ib_set_protocol(conn,
  126. RDS_PROTOCOL(dp->dp_protocol_major,
  127. dp->dp_protocol_minor));
  128. rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
  129. }
  130. }
  131. if (conn->c_version < RDS_PROTOCOL(3,1)) {
  132. printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed,"
  133. " no longer supported\n",
  134. &conn->c_faddr,
  135. RDS_PROTOCOL_MAJOR(conn->c_version),
  136. RDS_PROTOCOL_MINOR(conn->c_version));
  137. rds_conn_destroy(conn);
  138. return;
  139. } else {
  140. printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n",
  141. &conn->c_faddr,
  142. RDS_PROTOCOL_MAJOR(conn->c_version),
  143. RDS_PROTOCOL_MINOR(conn->c_version),
  144. ic->i_flowctl ? ", flow control" : "");
  145. }
  146. /*
  147. * Init rings and fill recv. this needs to wait until protocol negotiation
  148. * is complete, since ring layout is different from 3.0 to 3.1.
  149. */
  150. rds_ib_send_init_ring(ic);
  151. rds_ib_recv_init_ring(ic);
  152. /* Post receive buffers - as a side effect, this will update
  153. * the posted credit count. */
  154. rds_ib_recv_refill(conn, 1);
  155. /* Tune RNR behavior */
  156. rds_ib_tune_rnr(ic, &qp_attr);
  157. qp_attr.qp_state = IB_QPS_RTS;
  158. err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE);
  159. if (err)
  160. printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err);
  161. /* update ib_device with this local ipaddr */
  162. err = rds_ib_update_ipaddr(ic->rds_ibdev, conn->c_laddr);
  163. if (err)
  164. printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n",
  165. err);
  166. /* If the peer gave us the last packet it saw, process this as if
  167. * we had received a regular ACK. */
  168. if (dp && dp->dp_ack_seq)
  169. rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL);
  170. rds_connect_complete(conn);
  171. }
  172. static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
  173. struct rdma_conn_param *conn_param,
  174. struct rds_ib_connect_private *dp,
  175. u32 protocol_version,
  176. u32 max_responder_resources,
  177. u32 max_initiator_depth)
  178. {
  179. struct rds_ib_connection *ic = conn->c_transport_data;
  180. struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
  181. memset(conn_param, 0, sizeof(struct rdma_conn_param));
  182. conn_param->responder_resources =
  183. min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources);
  184. conn_param->initiator_depth =
  185. min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth);
  186. conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7);
  187. conn_param->rnr_retry_count = 7;
  188. if (dp) {
  189. memset(dp, 0, sizeof(*dp));
  190. dp->dp_saddr = conn->c_laddr;
  191. dp->dp_daddr = conn->c_faddr;
  192. dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
  193. dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
  194. dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
  195. dp->dp_ack_seq = rds_ib_piggyb_ack(ic);
  196. /* Advertise flow control */
  197. if (ic->i_flowctl) {
  198. unsigned int credits;
  199. credits = IB_GET_POST_CREDITS(atomic_read(&ic->i_credits));
  200. dp->dp_credit = cpu_to_be32(credits);
  201. atomic_sub(IB_SET_POST_CREDITS(credits), &ic->i_credits);
  202. }
  203. conn_param->private_data = dp;
  204. conn_param->private_data_len = sizeof(*dp);
  205. }
  206. }
  207. static void rds_ib_cq_event_handler(struct ib_event *event, void *data)
  208. {
  209. rdsdebug("event %u (%s) data %p\n",
  210. event->event, rds_ib_event_str(event->event), data);
  211. }
  212. static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
  213. {
  214. struct rds_connection *conn = data;
  215. struct rds_ib_connection *ic = conn->c_transport_data;
  216. rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event,
  217. rds_ib_event_str(event->event));
  218. switch (event->event) {
  219. case IB_EVENT_COMM_EST:
  220. rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
  221. break;
  222. default:
  223. rdsdebug("Fatal QP Event %u (%s) "
  224. "- connection %pI4->%pI4, reconnecting\n",
  225. event->event, rds_ib_event_str(event->event),
  226. &conn->c_laddr, &conn->c_faddr);
  227. rds_conn_drop(conn);
  228. break;
  229. }
  230. }
  231. /*
  232. * This needs to be very careful to not leave IS_ERR pointers around for
  233. * cleanup to trip over.
  234. */
  235. static int rds_ib_setup_qp(struct rds_connection *conn)
  236. {
  237. struct rds_ib_connection *ic = conn->c_transport_data;
  238. struct ib_device *dev = ic->i_cm_id->device;
  239. struct ib_qp_init_attr attr;
  240. struct rds_ib_device *rds_ibdev;
  241. int ret;
  242. /*
  243. * It's normal to see a null device if an incoming connection races
  244. * with device removal, so we don't print a warning.
  245. */
  246. rds_ibdev = rds_ib_get_client_data(dev);
  247. if (!rds_ibdev)
  248. return -EOPNOTSUPP;
  249. /* add the conn now so that connection establishment has the dev */
  250. rds_ib_add_conn(rds_ibdev, conn);
  251. if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1)
  252. rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1);
  253. if (rds_ibdev->max_wrs < ic->i_recv_ring.w_nr + 1)
  254. rds_ib_ring_resize(&ic->i_recv_ring, rds_ibdev->max_wrs - 1);
  255. /* Protection domain and memory range */
  256. ic->i_pd = rds_ibdev->pd;
  257. ic->i_mr = rds_ibdev->mr;
  258. ic->i_send_cq = ib_create_cq(dev, rds_ib_send_cq_comp_handler,
  259. rds_ib_cq_event_handler, conn,
  260. ic->i_send_ring.w_nr + 1, 0);
  261. if (IS_ERR(ic->i_send_cq)) {
  262. ret = PTR_ERR(ic->i_send_cq);
  263. ic->i_send_cq = NULL;
  264. rdsdebug("ib_create_cq send failed: %d\n", ret);
  265. goto out;
  266. }
  267. ic->i_recv_cq = ib_create_cq(dev, rds_ib_recv_cq_comp_handler,
  268. rds_ib_cq_event_handler, conn,
  269. ic->i_recv_ring.w_nr, 0);
  270. if (IS_ERR(ic->i_recv_cq)) {
  271. ret = PTR_ERR(ic->i_recv_cq);
  272. ic->i_recv_cq = NULL;
  273. rdsdebug("ib_create_cq recv failed: %d\n", ret);
  274. goto out;
  275. }
  276. ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
  277. if (ret) {
  278. rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
  279. goto out;
  280. }
  281. ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
  282. if (ret) {
  283. rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
  284. goto out;
  285. }
  286. /* XXX negotiate max send/recv with remote? */
  287. memset(&attr, 0, sizeof(attr));
  288. attr.event_handler = rds_ib_qp_event_handler;
  289. attr.qp_context = conn;
  290. /* + 1 to allow for the single ack message */
  291. attr.cap.max_send_wr = ic->i_send_ring.w_nr + 1;
  292. attr.cap.max_recv_wr = ic->i_recv_ring.w_nr + 1;
  293. attr.cap.max_send_sge = rds_ibdev->max_sge;
  294. attr.cap.max_recv_sge = RDS_IB_RECV_SGE;
  295. attr.sq_sig_type = IB_SIGNAL_REQ_WR;
  296. attr.qp_type = IB_QPT_RC;
  297. attr.send_cq = ic->i_send_cq;
  298. attr.recv_cq = ic->i_recv_cq;
  299. /*
  300. * XXX this can fail if max_*_wr is too large? Are we supposed
  301. * to back off until we get a value that the hardware can support?
  302. */
  303. ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
  304. if (ret) {
  305. rdsdebug("rdma_create_qp failed: %d\n", ret);
  306. goto out;
  307. }
  308. ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
  309. ic->i_send_ring.w_nr *
  310. sizeof(struct rds_header),
  311. &ic->i_send_hdrs_dma, GFP_KERNEL);
  312. if (!ic->i_send_hdrs) {
  313. ret = -ENOMEM;
  314. rdsdebug("ib_dma_alloc_coherent send failed\n");
  315. goto out;
  316. }
  317. ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
  318. ic->i_recv_ring.w_nr *
  319. sizeof(struct rds_header),
  320. &ic->i_recv_hdrs_dma, GFP_KERNEL);
  321. if (!ic->i_recv_hdrs) {
  322. ret = -ENOMEM;
  323. rdsdebug("ib_dma_alloc_coherent recv failed\n");
  324. goto out;
  325. }
  326. ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
  327. &ic->i_ack_dma, GFP_KERNEL);
  328. if (!ic->i_ack) {
  329. ret = -ENOMEM;
  330. rdsdebug("ib_dma_alloc_coherent ack failed\n");
  331. goto out;
  332. }
  333. ic->i_sends = vmalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
  334. ibdev_to_node(dev));
  335. if (!ic->i_sends) {
  336. ret = -ENOMEM;
  337. rdsdebug("send allocation failed\n");
  338. goto out;
  339. }
  340. memset(ic->i_sends, 0, ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work));
  341. ic->i_recvs = vmalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
  342. ibdev_to_node(dev));
  343. if (!ic->i_recvs) {
  344. ret = -ENOMEM;
  345. rdsdebug("recv allocation failed\n");
  346. goto out;
  347. }
  348. memset(ic->i_recvs, 0, ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work));
  349. rds_ib_recv_init_ack(ic);
  350. rdsdebug("conn %p pd %p mr %p cq %p %p\n", conn, ic->i_pd, ic->i_mr,
  351. ic->i_send_cq, ic->i_recv_cq);
  352. out:
  353. rds_ib_dev_put(rds_ibdev);
  354. return ret;
  355. }
  356. static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event)
  357. {
  358. const struct rds_ib_connect_private *dp = event->param.conn.private_data;
  359. u16 common;
  360. u32 version = 0;
  361. /*
  362. * rdma_cm private data is odd - when there is any private data in the
  363. * request, we will be given a pretty large buffer without telling us the
  364. * original size. The only way to tell the difference is by looking at
  365. * the contents, which are initialized to zero.
  366. * If the protocol version fields aren't set, this is a connection attempt
  367. * from an older version. This could could be 3.0 or 2.0 - we can't tell.
  368. * We really should have changed this for OFED 1.3 :-(
  369. */
  370. /* Be paranoid. RDS always has privdata */
  371. if (!event->param.conn.private_data_len) {
  372. printk(KERN_NOTICE "RDS incoming connection has no private data, "
  373. "rejecting\n");
  374. return 0;
  375. }
  376. /* Even if len is crap *now* I still want to check it. -ASG */
  377. if (event->param.conn.private_data_len < sizeof (*dp) ||
  378. dp->dp_protocol_major == 0)
  379. return RDS_PROTOCOL_3_0;
  380. common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS;
  381. if (dp->dp_protocol_major == 3 && common) {
  382. version = RDS_PROTOCOL_3_0;
  383. while ((common >>= 1) != 0)
  384. version++;
  385. } else if (printk_ratelimit()) {
  386. printk(KERN_NOTICE "RDS: Connection from %pI4 using "
  387. "incompatible protocol version %u.%u\n",
  388. &dp->dp_saddr,
  389. dp->dp_protocol_major,
  390. dp->dp_protocol_minor);
  391. }
  392. return version;
  393. }
  394. int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
  395. struct rdma_cm_event *event)
  396. {
  397. __be64 lguid = cm_id->route.path_rec->sgid.global.interface_id;
  398. __be64 fguid = cm_id->route.path_rec->dgid.global.interface_id;
  399. const struct rds_ib_connect_private *dp = event->param.conn.private_data;
  400. struct rds_ib_connect_private dp_rep;
  401. struct rds_connection *conn = NULL;
  402. struct rds_ib_connection *ic = NULL;
  403. struct rdma_conn_param conn_param;
  404. u32 version;
  405. int err = 1, destroy = 1;
  406. /* Check whether the remote protocol version matches ours. */
  407. version = rds_ib_protocol_compatible(event);
  408. if (!version)
  409. goto out;
  410. rdsdebug("saddr %pI4 daddr %pI4 RDSv%u.%u lguid 0x%llx fguid "
  411. "0x%llx\n", &dp->dp_saddr, &dp->dp_daddr,
  412. RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version),
  413. (unsigned long long)be64_to_cpu(lguid),
  414. (unsigned long long)be64_to_cpu(fguid));
  415. conn = rds_conn_create(dp->dp_daddr, dp->dp_saddr, &rds_ib_transport,
  416. GFP_KERNEL);
  417. if (IS_ERR(conn)) {
  418. rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
  419. conn = NULL;
  420. goto out;
  421. }
  422. /*
  423. * The connection request may occur while the
  424. * previous connection exist, e.g. in case of failover.
  425. * But as connections may be initiated simultaneously
  426. * by both hosts, we have a random backoff mechanism -
  427. * see the comment above rds_queue_reconnect()
  428. */
  429. mutex_lock(&conn->c_cm_lock);
  430. if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
  431. if (rds_conn_state(conn) == RDS_CONN_UP) {
  432. rdsdebug("incoming connect while connecting\n");
  433. rds_conn_drop(conn);
  434. rds_ib_stats_inc(s_ib_listen_closed_stale);
  435. } else
  436. if (rds_conn_state(conn) == RDS_CONN_CONNECTING) {
  437. /* Wait and see - our connect may still be succeeding */
  438. rds_ib_stats_inc(s_ib_connect_raced);
  439. }
  440. goto out;
  441. }
  442. ic = conn->c_transport_data;
  443. rds_ib_set_protocol(conn, version);
  444. rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
  445. /* If the peer gave us the last packet it saw, process this as if
  446. * we had received a regular ACK. */
  447. if (dp->dp_ack_seq)
  448. rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL);
  449. BUG_ON(cm_id->context);
  450. BUG_ON(ic->i_cm_id);
  451. ic->i_cm_id = cm_id;
  452. cm_id->context = conn;
  453. /* We got halfway through setting up the ib_connection, if we
  454. * fail now, we have to take the long route out of this mess. */
  455. destroy = 0;
  456. err = rds_ib_setup_qp(conn);
  457. if (err) {
  458. rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err);
  459. mutex_unlock(&conn->c_cm_lock);
  460. goto out;
  461. }
  462. rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version,
  463. event->param.conn.responder_resources,
  464. event->param.conn.initiator_depth);
  465. /* rdma_accept() calls rdma_reject() internally if it fails */
  466. err = rdma_accept(cm_id, &conn_param);
  467. if (err)
  468. rds_ib_conn_error(conn, "rdma_accept failed (%d)\n", err);
  469. out:
  470. if (conn)
  471. mutex_unlock(&conn->c_cm_lock);
  472. if (err)
  473. rdma_reject(cm_id, NULL, 0);
  474. return destroy;
  475. }
  476. int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id)
  477. {
  478. struct rds_connection *conn = cm_id->context;
  479. struct rds_ib_connection *ic = conn->c_transport_data;
  480. struct rdma_conn_param conn_param;
  481. struct rds_ib_connect_private dp;
  482. int ret;
  483. /* If the peer doesn't do protocol negotiation, we must
  484. * default to RDSv3.0 */
  485. rds_ib_set_protocol(conn, RDS_PROTOCOL_3_0);
  486. ic->i_flowctl = rds_ib_sysctl_flow_control; /* advertise flow control */
  487. ret = rds_ib_setup_qp(conn);
  488. if (ret) {
  489. rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", ret);
  490. goto out;
  491. }
  492. rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION,
  493. UINT_MAX, UINT_MAX);
  494. ret = rdma_connect(cm_id, &conn_param);
  495. if (ret)
  496. rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret);
  497. out:
  498. /* Beware - returning non-zero tells the rdma_cm to destroy
  499. * the cm_id. We should certainly not do it as long as we still
  500. * "own" the cm_id. */
  501. if (ret) {
  502. if (ic->i_cm_id == cm_id)
  503. ret = 0;
  504. }
  505. return ret;
  506. }
  507. int rds_ib_conn_connect(struct rds_connection *conn)
  508. {
  509. struct rds_ib_connection *ic = conn->c_transport_data;
  510. struct sockaddr_in src, dest;
  511. int ret;
  512. /* XXX I wonder what affect the port space has */
  513. /* delegate cm event handler to rdma_transport */
  514. ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn,
  515. RDMA_PS_TCP);
  516. if (IS_ERR(ic->i_cm_id)) {
  517. ret = PTR_ERR(ic->i_cm_id);
  518. ic->i_cm_id = NULL;
  519. rdsdebug("rdma_create_id() failed: %d\n", ret);
  520. goto out;
  521. }
  522. rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn);
  523. src.sin_family = AF_INET;
  524. src.sin_addr.s_addr = (__force u32)conn->c_laddr;
  525. src.sin_port = (__force u16)htons(0);
  526. dest.sin_family = AF_INET;
  527. dest.sin_addr.s_addr = (__force u32)conn->c_faddr;
  528. dest.sin_port = (__force u16)htons(RDS_PORT);
  529. ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src,
  530. (struct sockaddr *)&dest,
  531. RDS_RDMA_RESOLVE_TIMEOUT_MS);
  532. if (ret) {
  533. rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id,
  534. ret);
  535. rdma_destroy_id(ic->i_cm_id);
  536. ic->i_cm_id = NULL;
  537. }
  538. out:
  539. return ret;
  540. }
  541. /*
  542. * This is so careful about only cleaning up resources that were built up
  543. * so that it can be called at any point during startup. In fact it
  544. * can be called multiple times for a given connection.
  545. */
  546. void rds_ib_conn_shutdown(struct rds_connection *conn)
  547. {
  548. struct rds_ib_connection *ic = conn->c_transport_data;
  549. int err = 0;
  550. rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id,
  551. ic->i_pd, ic->i_send_cq, ic->i_recv_cq,
  552. ic->i_cm_id ? ic->i_cm_id->qp : NULL);
  553. if (ic->i_cm_id) {
  554. struct ib_device *dev = ic->i_cm_id->device;
  555. rdsdebug("disconnecting cm %p\n", ic->i_cm_id);
  556. err = rdma_disconnect(ic->i_cm_id);
  557. if (err) {
  558. /* Actually this may happen quite frequently, when
  559. * an outgoing connect raced with an incoming connect.
  560. */
  561. rdsdebug("failed to disconnect, cm: %p err %d\n",
  562. ic->i_cm_id, err);
  563. }
  564. /*
  565. * We want to wait for tx and rx completion to finish
  566. * before we tear down the connection, but we have to be
  567. * careful not to get stuck waiting on a send ring that
  568. * only has unsignaled sends in it. We've shutdown new
  569. * sends before getting here so by waiting for signaled
  570. * sends to complete we're ensured that there will be no
  571. * more tx processing.
  572. */
  573. wait_event(rds_ib_ring_empty_wait,
  574. rds_ib_ring_empty(&ic->i_recv_ring) &&
  575. (atomic_read(&ic->i_signaled_sends) == 0));
  576. tasklet_kill(&ic->i_recv_tasklet);
  577. if (ic->i_send_hdrs)
  578. ib_dma_free_coherent(dev,
  579. ic->i_send_ring.w_nr *
  580. sizeof(struct rds_header),
  581. ic->i_send_hdrs,
  582. ic->i_send_hdrs_dma);
  583. if (ic->i_recv_hdrs)
  584. ib_dma_free_coherent(dev,
  585. ic->i_recv_ring.w_nr *
  586. sizeof(struct rds_header),
  587. ic->i_recv_hdrs,
  588. ic->i_recv_hdrs_dma);
  589. if (ic->i_ack)
  590. ib_dma_free_coherent(dev, sizeof(struct rds_header),
  591. ic->i_ack, ic->i_ack_dma);
  592. if (ic->i_sends)
  593. rds_ib_send_clear_ring(ic);
  594. if (ic->i_recvs)
  595. rds_ib_recv_clear_ring(ic);
  596. if (ic->i_cm_id->qp)
  597. rdma_destroy_qp(ic->i_cm_id);
  598. if (ic->i_send_cq)
  599. ib_destroy_cq(ic->i_send_cq);
  600. if (ic->i_recv_cq)
  601. ib_destroy_cq(ic->i_recv_cq);
  602. rdma_destroy_id(ic->i_cm_id);
  603. /*
  604. * Move connection back to the nodev list.
  605. */
  606. if (ic->rds_ibdev)
  607. rds_ib_remove_conn(ic->rds_ibdev, conn);
  608. ic->i_cm_id = NULL;
  609. ic->i_pd = NULL;
  610. ic->i_mr = NULL;
  611. ic->i_send_cq = NULL;
  612. ic->i_recv_cq = NULL;
  613. ic->i_send_hdrs = NULL;
  614. ic->i_recv_hdrs = NULL;
  615. ic->i_ack = NULL;
  616. }
  617. BUG_ON(ic->rds_ibdev);
  618. /* Clear pending transmit */
  619. if (ic->i_data_op) {
  620. struct rds_message *rm;
  621. rm = container_of(ic->i_data_op, struct rds_message, data);
  622. rds_message_put(rm);
  623. ic->i_data_op = NULL;
  624. }
  625. /* Clear the ACK state */
  626. clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
  627. #ifdef KERNEL_HAS_ATOMIC64
  628. atomic64_set(&ic->i_ack_next, 0);
  629. #else
  630. ic->i_ack_next = 0;
  631. #endif
  632. ic->i_ack_recv = 0;
  633. /* Clear flow control state */
  634. ic->i_flowctl = 0;
  635. atomic_set(&ic->i_credits, 0);
  636. rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
  637. rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
  638. if (ic->i_ibinc) {
  639. rds_inc_put(&ic->i_ibinc->ii_inc);
  640. ic->i_ibinc = NULL;
  641. }
  642. vfree(ic->i_sends);
  643. ic->i_sends = NULL;
  644. vfree(ic->i_recvs);
  645. ic->i_recvs = NULL;
  646. }
  647. int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
  648. {
  649. struct rds_ib_connection *ic;
  650. unsigned long flags;
  651. int ret;
  652. /* XXX too lazy? */
  653. ic = kzalloc(sizeof(struct rds_ib_connection), GFP_KERNEL);
  654. if (!ic)
  655. return -ENOMEM;
  656. ret = rds_ib_recv_alloc_caches(ic);
  657. if (ret) {
  658. kfree(ic);
  659. return ret;
  660. }
  661. INIT_LIST_HEAD(&ic->ib_node);
  662. tasklet_init(&ic->i_recv_tasklet, rds_ib_recv_tasklet_fn,
  663. (unsigned long) ic);
  664. mutex_init(&ic->i_recv_mutex);
  665. #ifndef KERNEL_HAS_ATOMIC64
  666. spin_lock_init(&ic->i_ack_lock);
  667. #endif
  668. atomic_set(&ic->i_signaled_sends, 0);
  669. /*
  670. * rds_ib_conn_shutdown() waits for these to be emptied so they
  671. * must be initialized before it can be called.
  672. */
  673. rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
  674. rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
  675. ic->conn = conn;
  676. conn->c_transport_data = ic;
  677. spin_lock_irqsave(&ib_nodev_conns_lock, flags);
  678. list_add_tail(&ic->ib_node, &ib_nodev_conns);
  679. spin_unlock_irqrestore(&ib_nodev_conns_lock, flags);
  680. rdsdebug("conn %p conn ic %p\n", conn, conn->c_transport_data);
  681. return 0;
  682. }
  683. /*
  684. * Free a connection. Connection must be shut down and not set for reconnect.
  685. */
  686. void rds_ib_conn_free(void *arg)
  687. {
  688. struct rds_ib_connection *ic = arg;
  689. spinlock_t *lock_ptr;
  690. rdsdebug("ic %p\n", ic);
  691. /*
  692. * Conn is either on a dev's list or on the nodev list.
  693. * A race with shutdown() or connect() would cause problems
  694. * (since rds_ibdev would change) but that should never happen.
  695. */
  696. lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock;
  697. spin_lock_irq(lock_ptr);
  698. list_del(&ic->ib_node);
  699. spin_unlock_irq(lock_ptr);
  700. rds_ib_recv_free_caches(ic);
  701. kfree(ic);
  702. }
  703. /*
  704. * An error occurred on the connection
  705. */
  706. void
  707. __rds_ib_conn_error(struct rds_connection *conn, const char *fmt, ...)
  708. {
  709. va_list ap;
  710. rds_conn_drop(conn);
  711. va_start(ap, fmt);
  712. vprintk(fmt, ap);
  713. va_end(ap);
  714. }