iser_verbs.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923
  1. /*
  2. * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
  3. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
  4. * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <linux/kernel.h>
  35. #include <linux/module.h>
  36. #include <linux/slab.h>
  37. #include <linux/delay.h>
  38. #include "iscsi_iser.h"
  39. #define ISCSI_ISER_MAX_CONN 8
  40. #define ISER_MAX_RX_CQ_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
  41. #define ISER_MAX_TX_CQ_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN)
  42. static void iser_cq_tasklet_fn(unsigned long data);
  43. static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
  44. static void iser_cq_event_callback(struct ib_event *cause, void *context)
  45. {
  46. iser_err("got cq event %d \n", cause->event);
  47. }
  48. static void iser_qp_event_callback(struct ib_event *cause, void *context)
  49. {
  50. iser_err("got qp event %d\n",cause->event);
  51. }
  52. static void iser_event_handler(struct ib_event_handler *handler,
  53. struct ib_event *event)
  54. {
  55. iser_err("async event %d on device %s port %d\n", event->event,
  56. event->device->name, event->element.port_num);
  57. }
  58. /**
  59. * iser_create_device_ib_res - creates Protection Domain (PD), Completion
  60. * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
  61. * the adapator.
  62. *
  63. * returns 0 on success, -1 on failure
  64. */
  65. static int iser_create_device_ib_res(struct iser_device *device)
  66. {
  67. int i, j;
  68. struct iser_cq_desc *cq_desc;
  69. device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors);
  70. iser_info("using %d CQs, device %s supports %d vectors\n",
  71. device->cqs_used, device->ib_device->name,
  72. device->ib_device->num_comp_vectors);
  73. device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used,
  74. GFP_KERNEL);
  75. if (device->cq_desc == NULL)
  76. goto cq_desc_err;
  77. cq_desc = device->cq_desc;
  78. device->pd = ib_alloc_pd(device->ib_device);
  79. if (IS_ERR(device->pd))
  80. goto pd_err;
  81. for (i = 0; i < device->cqs_used; i++) {
  82. cq_desc[i].device = device;
  83. cq_desc[i].cq_index = i;
  84. device->rx_cq[i] = ib_create_cq(device->ib_device,
  85. iser_cq_callback,
  86. iser_cq_event_callback,
  87. (void *)&cq_desc[i],
  88. ISER_MAX_RX_CQ_LEN, i);
  89. if (IS_ERR(device->rx_cq[i]))
  90. goto cq_err;
  91. device->tx_cq[i] = ib_create_cq(device->ib_device,
  92. NULL, iser_cq_event_callback,
  93. (void *)&cq_desc[i],
  94. ISER_MAX_TX_CQ_LEN, i);
  95. if (IS_ERR(device->tx_cq[i]))
  96. goto cq_err;
  97. if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP))
  98. goto cq_err;
  99. tasklet_init(&device->cq_tasklet[i],
  100. iser_cq_tasklet_fn,
  101. (unsigned long)&cq_desc[i]);
  102. }
  103. device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
  104. IB_ACCESS_REMOTE_WRITE |
  105. IB_ACCESS_REMOTE_READ);
  106. if (IS_ERR(device->mr))
  107. goto dma_mr_err;
  108. INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
  109. iser_event_handler);
  110. if (ib_register_event_handler(&device->event_handler))
  111. goto handler_err;
  112. return 0;
  113. handler_err:
  114. ib_dereg_mr(device->mr);
  115. dma_mr_err:
  116. for (j = 0; j < device->cqs_used; j++)
  117. tasklet_kill(&device->cq_tasklet[j]);
  118. cq_err:
  119. for (j = 0; j < i; j++) {
  120. if (device->tx_cq[j])
  121. ib_destroy_cq(device->tx_cq[j]);
  122. if (device->rx_cq[j])
  123. ib_destroy_cq(device->rx_cq[j]);
  124. }
  125. ib_dealloc_pd(device->pd);
  126. pd_err:
  127. kfree(device->cq_desc);
  128. cq_desc_err:
  129. iser_err("failed to allocate an IB resource\n");
  130. return -1;
  131. }
  132. /**
  133. * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
  134. * CQ and PD created with the device associated with the adapator.
  135. */
  136. static void iser_free_device_ib_res(struct iser_device *device)
  137. {
  138. int i;
  139. BUG_ON(device->mr == NULL);
  140. for (i = 0; i < device->cqs_used; i++) {
  141. tasklet_kill(&device->cq_tasklet[i]);
  142. (void)ib_destroy_cq(device->tx_cq[i]);
  143. (void)ib_destroy_cq(device->rx_cq[i]);
  144. device->tx_cq[i] = NULL;
  145. device->rx_cq[i] = NULL;
  146. }
  147. (void)ib_unregister_event_handler(&device->event_handler);
  148. (void)ib_dereg_mr(device->mr);
  149. (void)ib_dealloc_pd(device->pd);
  150. kfree(device->cq_desc);
  151. device->mr = NULL;
  152. device->pd = NULL;
  153. }
  154. /**
  155. * iser_create_fmr_pool - Creates FMR pool and page_vector
  156. *
  157. * returns 0 on success, or errno code on failure
  158. */
  159. int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max)
  160. {
  161. struct iser_device *device = ib_conn->device;
  162. struct ib_fmr_pool_param params;
  163. int ret = -ENOMEM;
  164. ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
  165. (sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE+1)),
  166. GFP_KERNEL);
  167. if (!ib_conn->page_vec)
  168. return ret;
  169. ib_conn->page_vec->pages = (u64 *)(ib_conn->page_vec + 1);
  170. params.page_shift = SHIFT_4K;
  171. /* when the first/last SG element are not start/end *
  172. * page aligned, the map whould be of N+1 pages */
  173. params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
  174. /* make the pool size twice the max number of SCSI commands *
  175. * the ML is expected to queue, watermark for unmap at 50% */
  176. params.pool_size = cmds_max * 2;
  177. params.dirty_watermark = cmds_max;
  178. params.cache = 0;
  179. params.flush_function = NULL;
  180. params.access = (IB_ACCESS_LOCAL_WRITE |
  181. IB_ACCESS_REMOTE_WRITE |
  182. IB_ACCESS_REMOTE_READ);
  183. ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params);
  184. if (!IS_ERR(ib_conn->fmr_pool))
  185. return 0;
  186. /* no FMR => no need for page_vec */
  187. kfree(ib_conn->page_vec);
  188. ib_conn->page_vec = NULL;
  189. ret = PTR_ERR(ib_conn->fmr_pool);
  190. ib_conn->fmr_pool = NULL;
  191. if (ret != -ENOSYS) {
  192. iser_err("FMR allocation failed, err %d\n", ret);
  193. return ret;
  194. } else {
  195. iser_warn("FMRs are not supported, using unaligned mode\n");
  196. return 0;
  197. }
  198. }
  199. /**
  200. * iser_free_fmr_pool - releases the FMR pool and page vec
  201. */
  202. void iser_free_fmr_pool(struct iser_conn *ib_conn)
  203. {
  204. iser_info("freeing conn %p fmr pool %p\n",
  205. ib_conn, ib_conn->fmr_pool);
  206. if (ib_conn->fmr_pool != NULL)
  207. ib_destroy_fmr_pool(ib_conn->fmr_pool);
  208. ib_conn->fmr_pool = NULL;
  209. kfree(ib_conn->page_vec);
  210. ib_conn->page_vec = NULL;
  211. }
  212. /**
  213. * iser_create_ib_conn_res - Queue-Pair (QP)
  214. *
  215. * returns 0 on success, -1 on failure
  216. */
  217. static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
  218. {
  219. struct iser_device *device;
  220. struct ib_qp_init_attr init_attr;
  221. int ret = -ENOMEM;
  222. int index, min_index = 0;
  223. BUG_ON(ib_conn->device == NULL);
  224. device = ib_conn->device;
  225. memset(&init_attr, 0, sizeof init_attr);
  226. mutex_lock(&ig.connlist_mutex);
  227. /* select the CQ with the minimal number of usages */
  228. for (index = 0; index < device->cqs_used; index++)
  229. if (device->cq_active_qps[index] <
  230. device->cq_active_qps[min_index])
  231. min_index = index;
  232. device->cq_active_qps[min_index]++;
  233. mutex_unlock(&ig.connlist_mutex);
  234. iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
  235. init_attr.event_handler = iser_qp_event_callback;
  236. init_attr.qp_context = (void *)ib_conn;
  237. init_attr.send_cq = device->tx_cq[min_index];
  238. init_attr.recv_cq = device->rx_cq[min_index];
  239. init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS;
  240. init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
  241. init_attr.cap.max_send_sge = 2;
  242. init_attr.cap.max_recv_sge = 1;
  243. init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
  244. init_attr.qp_type = IB_QPT_RC;
  245. ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
  246. if (ret)
  247. goto out_err;
  248. ib_conn->qp = ib_conn->cma_id->qp;
  249. iser_info("setting conn %p cma_id %p qp %p\n",
  250. ib_conn, ib_conn->cma_id,
  251. ib_conn->cma_id->qp);
  252. return ret;
  253. out_err:
  254. iser_err("unable to alloc mem or create resource, err %d\n", ret);
  255. return ret;
  256. }
  257. /**
  258. * releases the QP objects, returns 0 on success,
  259. * -1 on failure
  260. */
  261. static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
  262. {
  263. int cq_index;
  264. BUG_ON(ib_conn == NULL);
  265. iser_info("freeing conn %p cma_id %p qp %p\n",
  266. ib_conn, ib_conn->cma_id,
  267. ib_conn->qp);
  268. /* qp is created only once both addr & route are resolved */
  269. if (ib_conn->qp != NULL) {
  270. cq_index = ((struct iser_cq_desc *)ib_conn->qp->recv_cq->cq_context)->cq_index;
  271. ib_conn->device->cq_active_qps[cq_index]--;
  272. rdma_destroy_qp(ib_conn->cma_id);
  273. }
  274. ib_conn->qp = NULL;
  275. return 0;
  276. }
  277. /**
  278. * based on the resolved device node GUID see if there already allocated
  279. * device for this device. If there's no such, create one.
  280. */
  281. static
  282. struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
  283. {
  284. struct iser_device *device;
  285. mutex_lock(&ig.device_list_mutex);
  286. list_for_each_entry(device, &ig.device_list, ig_list)
  287. /* find if there's a match using the node GUID */
  288. if (device->ib_device->node_guid == cma_id->device->node_guid)
  289. goto inc_refcnt;
  290. device = kzalloc(sizeof *device, GFP_KERNEL);
  291. if (device == NULL)
  292. goto out;
  293. /* assign this device to the device */
  294. device->ib_device = cma_id->device;
  295. /* init the device and link it into ig device list */
  296. if (iser_create_device_ib_res(device)) {
  297. kfree(device);
  298. device = NULL;
  299. goto out;
  300. }
  301. list_add(&device->ig_list, &ig.device_list);
  302. inc_refcnt:
  303. device->refcount++;
  304. out:
  305. mutex_unlock(&ig.device_list_mutex);
  306. return device;
  307. }
  308. /* if there's no demand for this device, release it */
  309. static void iser_device_try_release(struct iser_device *device)
  310. {
  311. mutex_lock(&ig.device_list_mutex);
  312. device->refcount--;
  313. iser_info("device %p refcount %d\n", device, device->refcount);
  314. if (!device->refcount) {
  315. iser_free_device_ib_res(device);
  316. list_del(&device->ig_list);
  317. kfree(device);
  318. }
  319. mutex_unlock(&ig.device_list_mutex);
  320. }
  321. static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
  322. enum iser_ib_conn_state comp,
  323. enum iser_ib_conn_state exch)
  324. {
  325. int ret;
  326. spin_lock_bh(&ib_conn->lock);
  327. if ((ret = (ib_conn->state == comp)))
  328. ib_conn->state = exch;
  329. spin_unlock_bh(&ib_conn->lock);
  330. return ret;
  331. }
  332. /**
  333. * Frees all conn objects and deallocs conn descriptor
  334. */
  335. static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id)
  336. {
  337. struct iser_device *device = ib_conn->device;
  338. BUG_ON(ib_conn->state != ISER_CONN_DOWN);
  339. mutex_lock(&ig.connlist_mutex);
  340. list_del(&ib_conn->conn_list);
  341. mutex_unlock(&ig.connlist_mutex);
  342. iser_free_rx_descriptors(ib_conn);
  343. iser_free_ib_conn_res(ib_conn);
  344. ib_conn->device = NULL;
  345. /* on EVENT_ADDR_ERROR there's no device yet for this conn */
  346. if (device != NULL)
  347. iser_device_try_release(device);
  348. /* if cma handler context, the caller actually destroy the id */
  349. if (ib_conn->cma_id != NULL && can_destroy_id) {
  350. rdma_destroy_id(ib_conn->cma_id);
  351. ib_conn->cma_id = NULL;
  352. }
  353. iscsi_destroy_endpoint(ib_conn->ep);
  354. }
  355. void iser_conn_get(struct iser_conn *ib_conn)
  356. {
  357. atomic_inc(&ib_conn->refcount);
  358. }
  359. int iser_conn_put(struct iser_conn *ib_conn, int can_destroy_id)
  360. {
  361. if (atomic_dec_and_test(&ib_conn->refcount)) {
  362. iser_conn_release(ib_conn, can_destroy_id);
  363. return 1;
  364. }
  365. return 0;
  366. }
  367. /**
  368. * triggers start of the disconnect procedures and wait for them to be done
  369. */
  370. void iser_conn_terminate(struct iser_conn *ib_conn)
  371. {
  372. int err = 0;
  373. /* change the ib conn state only if the conn is UP, however always call
  374. * rdma_disconnect since this is the only way to cause the CMA to change
  375. * the QP state to ERROR
  376. */
  377. iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, ISER_CONN_TERMINATING);
  378. err = rdma_disconnect(ib_conn->cma_id);
  379. if (err)
  380. iser_err("Failed to disconnect, conn: 0x%p err %d\n",
  381. ib_conn,err);
  382. wait_event_interruptible(ib_conn->wait,
  383. ib_conn->state == ISER_CONN_DOWN);
  384. iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */
  385. }
  386. static int iser_connect_error(struct rdma_cm_id *cma_id)
  387. {
  388. struct iser_conn *ib_conn;
  389. ib_conn = (struct iser_conn *)cma_id->context;
  390. ib_conn->state = ISER_CONN_DOWN;
  391. wake_up_interruptible(&ib_conn->wait);
  392. return iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
  393. }
  394. static int iser_addr_handler(struct rdma_cm_id *cma_id)
  395. {
  396. struct iser_device *device;
  397. struct iser_conn *ib_conn;
  398. int ret;
  399. device = iser_device_find_by_ib_device(cma_id);
  400. if (!device) {
  401. iser_err("device lookup/creation failed\n");
  402. return iser_connect_error(cma_id);
  403. }
  404. ib_conn = (struct iser_conn *)cma_id->context;
  405. ib_conn->device = device;
  406. ret = rdma_resolve_route(cma_id, 1000);
  407. if (ret) {
  408. iser_err("resolve route failed: %d\n", ret);
  409. return iser_connect_error(cma_id);
  410. }
  411. return 0;
  412. }
  413. static int iser_route_handler(struct rdma_cm_id *cma_id)
  414. {
  415. struct rdma_conn_param conn_param;
  416. int ret;
  417. struct iser_cm_hdr req_hdr;
  418. ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context);
  419. if (ret)
  420. goto failure;
  421. memset(&conn_param, 0, sizeof conn_param);
  422. conn_param.responder_resources = 4;
  423. conn_param.initiator_depth = 1;
  424. conn_param.retry_count = 7;
  425. conn_param.rnr_retry_count = 6;
  426. memset(&req_hdr, 0, sizeof(req_hdr));
  427. req_hdr.flags = (ISER_ZBVA_NOT_SUPPORTED |
  428. ISER_SEND_W_INV_NOT_SUPPORTED);
  429. conn_param.private_data = (void *)&req_hdr;
  430. conn_param.private_data_len = sizeof(struct iser_cm_hdr);
  431. ret = rdma_connect(cma_id, &conn_param);
  432. if (ret) {
  433. iser_err("failure connecting: %d\n", ret);
  434. goto failure;
  435. }
  436. return 0;
  437. failure:
  438. return iser_connect_error(cma_id);
  439. }
  440. static void iser_connected_handler(struct rdma_cm_id *cma_id)
  441. {
  442. struct iser_conn *ib_conn;
  443. ib_conn = (struct iser_conn *)cma_id->context;
  444. ib_conn->state = ISER_CONN_UP;
  445. wake_up_interruptible(&ib_conn->wait);
  446. }
  447. static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
  448. {
  449. struct iser_conn *ib_conn;
  450. int ret;
  451. ib_conn = (struct iser_conn *)cma_id->context;
  452. /* getting here when the state is UP means that the conn is being *
  453. * terminated asynchronously from the iSCSI layer's perspective. */
  454. if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
  455. ISER_CONN_TERMINATING))
  456. iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
  457. ISCSI_ERR_CONN_FAILED);
  458. /* Complete the termination process if no posts are pending */
  459. if (ib_conn->post_recv_buf_count == 0 &&
  460. (atomic_read(&ib_conn->post_send_buf_count) == 0)) {
  461. ib_conn->state = ISER_CONN_DOWN;
  462. wake_up_interruptible(&ib_conn->wait);
  463. }
  464. ret = iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */
  465. return ret;
  466. }
  467. static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
  468. {
  469. int ret = 0;
  470. iser_info("event %d status %d conn %p id %p\n",
  471. event->event, event->status, cma_id->context, cma_id);
  472. switch (event->event) {
  473. case RDMA_CM_EVENT_ADDR_RESOLVED:
  474. ret = iser_addr_handler(cma_id);
  475. break;
  476. case RDMA_CM_EVENT_ROUTE_RESOLVED:
  477. ret = iser_route_handler(cma_id);
  478. break;
  479. case RDMA_CM_EVENT_ESTABLISHED:
  480. iser_connected_handler(cma_id);
  481. break;
  482. case RDMA_CM_EVENT_ADDR_ERROR:
  483. case RDMA_CM_EVENT_ROUTE_ERROR:
  484. case RDMA_CM_EVENT_CONNECT_ERROR:
  485. case RDMA_CM_EVENT_UNREACHABLE:
  486. case RDMA_CM_EVENT_REJECTED:
  487. ret = iser_connect_error(cma_id);
  488. break;
  489. case RDMA_CM_EVENT_DISCONNECTED:
  490. case RDMA_CM_EVENT_DEVICE_REMOVAL:
  491. case RDMA_CM_EVENT_ADDR_CHANGE:
  492. ret = iser_disconnected_handler(cma_id);
  493. break;
  494. default:
  495. iser_err("Unexpected RDMA CM event (%d)\n", event->event);
  496. break;
  497. }
  498. return ret;
  499. }
  500. void iser_conn_init(struct iser_conn *ib_conn)
  501. {
  502. ib_conn->state = ISER_CONN_INIT;
  503. init_waitqueue_head(&ib_conn->wait);
  504. ib_conn->post_recv_buf_count = 0;
  505. atomic_set(&ib_conn->post_send_buf_count, 0);
  506. atomic_set(&ib_conn->refcount, 1); /* ref ib conn allocation */
  507. INIT_LIST_HEAD(&ib_conn->conn_list);
  508. spin_lock_init(&ib_conn->lock);
  509. }
  510. /**
  511. * starts the process of connecting to the target
  512. * sleeps until the connection is established or rejected
  513. */
  514. int iser_connect(struct iser_conn *ib_conn,
  515. struct sockaddr_in *src_addr,
  516. struct sockaddr_in *dst_addr,
  517. int non_blocking)
  518. {
  519. struct sockaddr *src, *dst;
  520. int err = 0;
  521. sprintf(ib_conn->name, "%pI4:%d",
  522. &dst_addr->sin_addr.s_addr, dst_addr->sin_port);
  523. /* the device is known only --after-- address resolution */
  524. ib_conn->device = NULL;
  525. iser_info("connecting to: %pI4, port 0x%x\n",
  526. &dst_addr->sin_addr, dst_addr->sin_port);
  527. ib_conn->state = ISER_CONN_PENDING;
  528. iser_conn_get(ib_conn); /* ref ib conn's cma id */
  529. ib_conn->cma_id = rdma_create_id(iser_cma_handler,
  530. (void *)ib_conn,
  531. RDMA_PS_TCP, IB_QPT_RC);
  532. if (IS_ERR(ib_conn->cma_id)) {
  533. err = PTR_ERR(ib_conn->cma_id);
  534. iser_err("rdma_create_id failed: %d\n", err);
  535. goto id_failure;
  536. }
  537. src = (struct sockaddr *)src_addr;
  538. dst = (struct sockaddr *)dst_addr;
  539. err = rdma_resolve_addr(ib_conn->cma_id, src, dst, 1000);
  540. if (err) {
  541. iser_err("rdma_resolve_addr failed: %d\n", err);
  542. goto addr_failure;
  543. }
  544. if (!non_blocking) {
  545. wait_event_interruptible(ib_conn->wait,
  546. (ib_conn->state != ISER_CONN_PENDING));
  547. if (ib_conn->state != ISER_CONN_UP) {
  548. err = -EIO;
  549. goto connect_failure;
  550. }
  551. }
  552. mutex_lock(&ig.connlist_mutex);
  553. list_add(&ib_conn->conn_list, &ig.connlist);
  554. mutex_unlock(&ig.connlist_mutex);
  555. return 0;
  556. id_failure:
  557. ib_conn->cma_id = NULL;
  558. addr_failure:
  559. ib_conn->state = ISER_CONN_DOWN;
  560. iser_conn_put(ib_conn, 1); /* deref ib conn's cma id */
  561. connect_failure:
  562. iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */
  563. return err;
  564. }
  565. /**
  566. * iser_reg_page_vec - Register physical memory
  567. *
  568. * returns: 0 on success, errno code on failure
  569. */
  570. int iser_reg_page_vec(struct iser_conn *ib_conn,
  571. struct iser_page_vec *page_vec,
  572. struct iser_mem_reg *mem_reg)
  573. {
  574. struct ib_pool_fmr *mem;
  575. u64 io_addr;
  576. u64 *page_list;
  577. int status;
  578. page_list = page_vec->pages;
  579. io_addr = page_list[0];
  580. mem = ib_fmr_pool_map_phys(ib_conn->fmr_pool,
  581. page_list,
  582. page_vec->length,
  583. io_addr);
  584. if (IS_ERR(mem)) {
  585. status = (int)PTR_ERR(mem);
  586. iser_err("ib_fmr_pool_map_phys failed: %d\n", status);
  587. return status;
  588. }
  589. mem_reg->lkey = mem->fmr->lkey;
  590. mem_reg->rkey = mem->fmr->rkey;
  591. mem_reg->len = page_vec->length * SIZE_4K;
  592. mem_reg->va = io_addr;
  593. mem_reg->is_fmr = 1;
  594. mem_reg->mem_h = (void *)mem;
  595. mem_reg->va += page_vec->offset;
  596. mem_reg->len = page_vec->data_size;
  597. iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, "
  598. "entry[0]: (0x%08lx,%ld)] -> "
  599. "[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n",
  600. page_vec, page_vec->length,
  601. (unsigned long)page_vec->pages[0],
  602. (unsigned long)page_vec->data_size,
  603. (unsigned int)mem_reg->lkey, mem_reg->mem_h,
  604. (unsigned long)mem_reg->va, (unsigned long)mem_reg->len);
  605. return 0;
  606. }
  607. /**
  608. * Unregister (previosuly registered) memory.
  609. */
  610. void iser_unreg_mem(struct iser_mem_reg *reg)
  611. {
  612. int ret;
  613. iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h);
  614. ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
  615. if (ret)
  616. iser_err("ib_fmr_pool_unmap failed %d\n", ret);
  617. reg->mem_h = NULL;
  618. }
  619. int iser_post_recvl(struct iser_conn *ib_conn)
  620. {
  621. struct ib_recv_wr rx_wr, *rx_wr_failed;
  622. struct ib_sge sge;
  623. int ib_ret;
  624. sge.addr = ib_conn->login_resp_dma;
  625. sge.length = ISER_RX_LOGIN_SIZE;
  626. sge.lkey = ib_conn->device->mr->lkey;
  627. rx_wr.wr_id = (unsigned long)ib_conn->login_resp_buf;
  628. rx_wr.sg_list = &sge;
  629. rx_wr.num_sge = 1;
  630. rx_wr.next = NULL;
  631. ib_conn->post_recv_buf_count++;
  632. ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
  633. if (ib_ret) {
  634. iser_err("ib_post_recv failed ret=%d\n", ib_ret);
  635. ib_conn->post_recv_buf_count--;
  636. }
  637. return ib_ret;
  638. }
  639. int iser_post_recvm(struct iser_conn *ib_conn, int count)
  640. {
  641. struct ib_recv_wr *rx_wr, *rx_wr_failed;
  642. int i, ib_ret;
  643. unsigned int my_rx_head = ib_conn->rx_desc_head;
  644. struct iser_rx_desc *rx_desc;
  645. for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
  646. rx_desc = &ib_conn->rx_descs[my_rx_head];
  647. rx_wr->wr_id = (unsigned long)rx_desc;
  648. rx_wr->sg_list = &rx_desc->rx_sg;
  649. rx_wr->num_sge = 1;
  650. rx_wr->next = rx_wr + 1;
  651. my_rx_head = (my_rx_head + 1) & ib_conn->qp_max_recv_dtos_mask;
  652. }
  653. rx_wr--;
  654. rx_wr->next = NULL; /* mark end of work requests list */
  655. ib_conn->post_recv_buf_count += count;
  656. ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
  657. if (ib_ret) {
  658. iser_err("ib_post_recv failed ret=%d\n", ib_ret);
  659. ib_conn->post_recv_buf_count -= count;
  660. } else
  661. ib_conn->rx_desc_head = my_rx_head;
  662. return ib_ret;
  663. }
  664. /**
  665. * iser_start_send - Initiate a Send DTO operation
  666. *
  667. * returns 0 on success, -1 on failure
  668. */
  669. int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc)
  670. {
  671. int ib_ret;
  672. struct ib_send_wr send_wr, *send_wr_failed;
  673. ib_dma_sync_single_for_device(ib_conn->device->ib_device,
  674. tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
  675. send_wr.next = NULL;
  676. send_wr.wr_id = (unsigned long)tx_desc;
  677. send_wr.sg_list = tx_desc->tx_sg;
  678. send_wr.num_sge = tx_desc->num_sge;
  679. send_wr.opcode = IB_WR_SEND;
  680. send_wr.send_flags = IB_SEND_SIGNALED;
  681. atomic_inc(&ib_conn->post_send_buf_count);
  682. ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
  683. if (ib_ret) {
  684. iser_err("ib_post_send failed, ret:%d\n", ib_ret);
  685. atomic_dec(&ib_conn->post_send_buf_count);
  686. }
  687. return ib_ret;
  688. }
  689. static void iser_handle_comp_error(struct iser_tx_desc *desc,
  690. struct iser_conn *ib_conn)
  691. {
  692. if (desc && desc->type == ISCSI_TX_DATAOUT)
  693. kmem_cache_free(ig.desc_cache, desc);
  694. if (ib_conn->post_recv_buf_count == 0 &&
  695. atomic_read(&ib_conn->post_send_buf_count) == 0) {
  696. /* getting here when the state is UP means that the conn is *
  697. * being terminated asynchronously from the iSCSI layer's *
  698. * perspective. */
  699. if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
  700. ISER_CONN_TERMINATING))
  701. iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
  702. ISCSI_ERR_CONN_FAILED);
  703. /* no more non completed posts to the QP, complete the
  704. * termination process w.o worrying on disconnect event */
  705. ib_conn->state = ISER_CONN_DOWN;
  706. wake_up_interruptible(&ib_conn->wait);
  707. }
  708. }
  709. static int iser_drain_tx_cq(struct iser_device *device, int cq_index)
  710. {
  711. struct ib_cq *cq = device->tx_cq[cq_index];
  712. struct ib_wc wc;
  713. struct iser_tx_desc *tx_desc;
  714. struct iser_conn *ib_conn;
  715. int completed_tx = 0;
  716. while (ib_poll_cq(cq, 1, &wc) == 1) {
  717. tx_desc = (struct iser_tx_desc *) (unsigned long) wc.wr_id;
  718. ib_conn = wc.qp->qp_context;
  719. if (wc.status == IB_WC_SUCCESS) {
  720. if (wc.opcode == IB_WC_SEND)
  721. iser_snd_completion(tx_desc, ib_conn);
  722. else
  723. iser_err("expected opcode %d got %d\n",
  724. IB_WC_SEND, wc.opcode);
  725. } else {
  726. iser_err("tx id %llx status %d vend_err %x\n",
  727. wc.wr_id, wc.status, wc.vendor_err);
  728. atomic_dec(&ib_conn->post_send_buf_count);
  729. iser_handle_comp_error(tx_desc, ib_conn);
  730. }
  731. completed_tx++;
  732. }
  733. return completed_tx;
  734. }
  735. static void iser_cq_tasklet_fn(unsigned long data)
  736. {
  737. struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)data;
  738. struct iser_device *device = cq_desc->device;
  739. int cq_index = cq_desc->cq_index;
  740. struct ib_cq *cq = device->rx_cq[cq_index];
  741. struct ib_wc wc;
  742. struct iser_rx_desc *desc;
  743. unsigned long xfer_len;
  744. struct iser_conn *ib_conn;
  745. int completed_tx, completed_rx;
  746. completed_tx = completed_rx = 0;
  747. while (ib_poll_cq(cq, 1, &wc) == 1) {
  748. desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id;
  749. BUG_ON(desc == NULL);
  750. ib_conn = wc.qp->qp_context;
  751. if (wc.status == IB_WC_SUCCESS) {
  752. if (wc.opcode == IB_WC_RECV) {
  753. xfer_len = (unsigned long)wc.byte_len;
  754. iser_rcv_completion(desc, xfer_len, ib_conn);
  755. } else
  756. iser_err("expected opcode %d got %d\n",
  757. IB_WC_RECV, wc.opcode);
  758. } else {
  759. if (wc.status != IB_WC_WR_FLUSH_ERR)
  760. iser_err("rx id %llx status %d vend_err %x\n",
  761. wc.wr_id, wc.status, wc.vendor_err);
  762. ib_conn->post_recv_buf_count--;
  763. iser_handle_comp_error(NULL, ib_conn);
  764. }
  765. completed_rx++;
  766. if (!(completed_rx & 63))
  767. completed_tx += iser_drain_tx_cq(device, cq_index);
  768. }
  769. /* #warning "it is assumed here that arming CQ only once its empty" *
  770. * " would not cause interrupts to be missed" */
  771. ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
  772. completed_tx += iser_drain_tx_cq(device, cq_index);
  773. iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx);
  774. }
  775. static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
  776. {
  777. struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)cq_context;
  778. struct iser_device *device = cq_desc->device;
  779. int cq_index = cq_desc->cq_index;
  780. tasklet_schedule(&device->cq_tasklet[cq_index]);
  781. }