ucma.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075
  1. /*
  2. * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/completion.h>
  33. #include <linux/mutex.h>
  34. #include <linux/poll.h>
  35. #include <linux/idr.h>
  36. #include <linux/in.h>
  37. #include <linux/in6.h>
  38. #include <linux/miscdevice.h>
  39. #include <rdma/rdma_user_cm.h>
  40. #include <rdma/ib_marshall.h>
  41. #include <rdma/rdma_cm.h>
  42. MODULE_AUTHOR("Sean Hefty");
  43. MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
  44. MODULE_LICENSE("Dual BSD/GPL");
  45. enum {
  46. UCMA_MAX_BACKLOG = 128
  47. };
  48. struct ucma_file {
  49. struct mutex mut;
  50. struct file *filp;
  51. struct list_head ctx_list;
  52. struct list_head event_list;
  53. wait_queue_head_t poll_wait;
  54. };
  55. struct ucma_context {
  56. int id;
  57. struct completion comp;
  58. atomic_t ref;
  59. int events_reported;
  60. int backlog;
  61. struct ucma_file *file;
  62. struct rdma_cm_id *cm_id;
  63. u64 uid;
  64. struct list_head list;
  65. struct list_head mc_list;
  66. };
  67. struct ucma_multicast {
  68. struct ucma_context *ctx;
  69. int id;
  70. int events_reported;
  71. u64 uid;
  72. struct list_head list;
  73. struct sockaddr addr;
  74. u8 pad[sizeof(struct sockaddr_in6) -
  75. sizeof(struct sockaddr)];
  76. };
  77. struct ucma_event {
  78. struct ucma_context *ctx;
  79. struct ucma_multicast *mc;
  80. struct list_head list;
  81. struct rdma_cm_id *cm_id;
  82. struct rdma_ucm_event_resp resp;
  83. };
  84. static DEFINE_MUTEX(mut);
  85. static DEFINE_IDR(ctx_idr);
  86. static DEFINE_IDR(multicast_idr);
  87. static inline struct ucma_context *_ucma_find_context(int id,
  88. struct ucma_file *file)
  89. {
  90. struct ucma_context *ctx;
  91. ctx = idr_find(&ctx_idr, id);
  92. if (!ctx)
  93. ctx = ERR_PTR(-ENOENT);
  94. else if (ctx->file != file)
  95. ctx = ERR_PTR(-EINVAL);
  96. return ctx;
  97. }
  98. static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
  99. {
  100. struct ucma_context *ctx;
  101. mutex_lock(&mut);
  102. ctx = _ucma_find_context(id, file);
  103. if (!IS_ERR(ctx))
  104. atomic_inc(&ctx->ref);
  105. mutex_unlock(&mut);
  106. return ctx;
  107. }
  108. static void ucma_put_ctx(struct ucma_context *ctx)
  109. {
  110. if (atomic_dec_and_test(&ctx->ref))
  111. complete(&ctx->comp);
  112. }
  113. static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
  114. {
  115. struct ucma_context *ctx;
  116. int ret;
  117. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  118. if (!ctx)
  119. return NULL;
  120. atomic_set(&ctx->ref, 1);
  121. init_completion(&ctx->comp);
  122. INIT_LIST_HEAD(&ctx->mc_list);
  123. ctx->file = file;
  124. do {
  125. ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
  126. if (!ret)
  127. goto error;
  128. mutex_lock(&mut);
  129. ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
  130. mutex_unlock(&mut);
  131. } while (ret == -EAGAIN);
  132. if (ret)
  133. goto error;
  134. list_add_tail(&ctx->list, &file->ctx_list);
  135. return ctx;
  136. error:
  137. kfree(ctx);
  138. return NULL;
  139. }
  140. static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
  141. {
  142. struct ucma_multicast *mc;
  143. int ret;
  144. mc = kzalloc(sizeof(*mc), GFP_KERNEL);
  145. if (!mc)
  146. return NULL;
  147. do {
  148. ret = idr_pre_get(&multicast_idr, GFP_KERNEL);
  149. if (!ret)
  150. goto error;
  151. mutex_lock(&mut);
  152. ret = idr_get_new(&multicast_idr, mc, &mc->id);
  153. mutex_unlock(&mut);
  154. } while (ret == -EAGAIN);
  155. if (ret)
  156. goto error;
  157. mc->ctx = ctx;
  158. list_add_tail(&mc->list, &ctx->mc_list);
  159. return mc;
  160. error:
  161. kfree(mc);
  162. return NULL;
  163. }
  164. static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
  165. struct rdma_conn_param *src)
  166. {
  167. if (src->private_data_len)
  168. memcpy(dst->private_data, src->private_data,
  169. src->private_data_len);
  170. dst->private_data_len = src->private_data_len;
  171. dst->responder_resources =src->responder_resources;
  172. dst->initiator_depth = src->initiator_depth;
  173. dst->flow_control = src->flow_control;
  174. dst->retry_count = src->retry_count;
  175. dst->rnr_retry_count = src->rnr_retry_count;
  176. dst->srq = src->srq;
  177. dst->qp_num = src->qp_num;
  178. }
  179. static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
  180. struct rdma_ud_param *src)
  181. {
  182. if (src->private_data_len)
  183. memcpy(dst->private_data, src->private_data,
  184. src->private_data_len);
  185. dst->private_data_len = src->private_data_len;
  186. ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
  187. dst->qp_num = src->qp_num;
  188. dst->qkey = src->qkey;
  189. }
  190. static void ucma_set_event_context(struct ucma_context *ctx,
  191. struct rdma_cm_event *event,
  192. struct ucma_event *uevent)
  193. {
  194. uevent->ctx = ctx;
  195. switch (event->event) {
  196. case RDMA_CM_EVENT_MULTICAST_JOIN:
  197. case RDMA_CM_EVENT_MULTICAST_ERROR:
  198. uevent->mc = (struct ucma_multicast *)
  199. event->param.ud.private_data;
  200. uevent->resp.uid = uevent->mc->uid;
  201. uevent->resp.id = uevent->mc->id;
  202. break;
  203. default:
  204. uevent->resp.uid = ctx->uid;
  205. uevent->resp.id = ctx->id;
  206. break;
  207. }
  208. }
  209. static int ucma_event_handler(struct rdma_cm_id *cm_id,
  210. struct rdma_cm_event *event)
  211. {
  212. struct ucma_event *uevent;
  213. struct ucma_context *ctx = cm_id->context;
  214. int ret = 0;
  215. uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
  216. if (!uevent)
  217. return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
  218. uevent->cm_id = cm_id;
  219. ucma_set_event_context(ctx, event, uevent);
  220. uevent->resp.event = event->event;
  221. uevent->resp.status = event->status;
  222. if (cm_id->ps == RDMA_PS_UDP || cm_id->ps == RDMA_PS_IPOIB)
  223. ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
  224. else
  225. ucma_copy_conn_event(&uevent->resp.param.conn,
  226. &event->param.conn);
  227. mutex_lock(&ctx->file->mut);
  228. if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
  229. if (!ctx->backlog) {
  230. ret = -ENOMEM;
  231. kfree(uevent);
  232. goto out;
  233. }
  234. ctx->backlog--;
  235. } else if (!ctx->uid) {
  236. /*
  237. * We ignore events for new connections until userspace has set
  238. * their context. This can only happen if an error occurs on a
  239. * new connection before the user accepts it. This is okay,
  240. * since the accept will just fail later.
  241. */
  242. kfree(uevent);
  243. goto out;
  244. }
  245. list_add_tail(&uevent->list, &ctx->file->event_list);
  246. wake_up_interruptible(&ctx->file->poll_wait);
  247. out:
  248. mutex_unlock(&ctx->file->mut);
  249. return ret;
  250. }
  251. static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
  252. int in_len, int out_len)
  253. {
  254. struct ucma_context *ctx;
  255. struct rdma_ucm_get_event cmd;
  256. struct ucma_event *uevent;
  257. int ret = 0;
  258. DEFINE_WAIT(wait);
  259. if (out_len < sizeof uevent->resp)
  260. return -ENOSPC;
  261. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  262. return -EFAULT;
  263. mutex_lock(&file->mut);
  264. while (list_empty(&file->event_list)) {
  265. mutex_unlock(&file->mut);
  266. if (file->filp->f_flags & O_NONBLOCK)
  267. return -EAGAIN;
  268. if (wait_event_interruptible(file->poll_wait,
  269. !list_empty(&file->event_list)))
  270. return -ERESTARTSYS;
  271. mutex_lock(&file->mut);
  272. }
  273. uevent = list_entry(file->event_list.next, struct ucma_event, list);
  274. if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
  275. ctx = ucma_alloc_ctx(file);
  276. if (!ctx) {
  277. ret = -ENOMEM;
  278. goto done;
  279. }
  280. uevent->ctx->backlog++;
  281. ctx->cm_id = uevent->cm_id;
  282. ctx->cm_id->context = ctx;
  283. uevent->resp.id = ctx->id;
  284. }
  285. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  286. &uevent->resp, sizeof uevent->resp)) {
  287. ret = -EFAULT;
  288. goto done;
  289. }
  290. list_del(&uevent->list);
  291. uevent->ctx->events_reported++;
  292. if (uevent->mc)
  293. uevent->mc->events_reported++;
  294. kfree(uevent);
  295. done:
  296. mutex_unlock(&file->mut);
  297. return ret;
  298. }
  299. static ssize_t ucma_create_id(struct ucma_file *file,
  300. const char __user *inbuf,
  301. int in_len, int out_len)
  302. {
  303. struct rdma_ucm_create_id cmd;
  304. struct rdma_ucm_create_id_resp resp;
  305. struct ucma_context *ctx;
  306. int ret;
  307. if (out_len < sizeof(resp))
  308. return -ENOSPC;
  309. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  310. return -EFAULT;
  311. mutex_lock(&file->mut);
  312. ctx = ucma_alloc_ctx(file);
  313. mutex_unlock(&file->mut);
  314. if (!ctx)
  315. return -ENOMEM;
  316. ctx->uid = cmd.uid;
  317. ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps);
  318. if (IS_ERR(ctx->cm_id)) {
  319. ret = PTR_ERR(ctx->cm_id);
  320. goto err1;
  321. }
  322. resp.id = ctx->id;
  323. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  324. &resp, sizeof(resp))) {
  325. ret = -EFAULT;
  326. goto err2;
  327. }
  328. return 0;
  329. err2:
  330. rdma_destroy_id(ctx->cm_id);
  331. err1:
  332. mutex_lock(&mut);
  333. idr_remove(&ctx_idr, ctx->id);
  334. mutex_unlock(&mut);
  335. kfree(ctx);
  336. return ret;
  337. }
  338. static void ucma_cleanup_multicast(struct ucma_context *ctx)
  339. {
  340. struct ucma_multicast *mc, *tmp;
  341. mutex_lock(&mut);
  342. list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
  343. list_del(&mc->list);
  344. idr_remove(&multicast_idr, mc->id);
  345. kfree(mc);
  346. }
  347. mutex_unlock(&mut);
  348. }
  349. static void ucma_cleanup_events(struct ucma_context *ctx)
  350. {
  351. struct ucma_event *uevent, *tmp;
  352. list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
  353. if (uevent->ctx != ctx)
  354. continue;
  355. list_del(&uevent->list);
  356. /* clear incoming connections. */
  357. if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
  358. rdma_destroy_id(uevent->cm_id);
  359. kfree(uevent);
  360. }
  361. }
  362. static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
  363. {
  364. struct ucma_event *uevent, *tmp;
  365. list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
  366. if (uevent->mc != mc)
  367. continue;
  368. list_del(&uevent->list);
  369. kfree(uevent);
  370. }
  371. }
  372. static int ucma_free_ctx(struct ucma_context *ctx)
  373. {
  374. int events_reported;
  375. /* No new events will be generated after destroying the id. */
  376. rdma_destroy_id(ctx->cm_id);
  377. ucma_cleanup_multicast(ctx);
  378. /* Cleanup events not yet reported to the user. */
  379. mutex_lock(&ctx->file->mut);
  380. ucma_cleanup_events(ctx);
  381. list_del(&ctx->list);
  382. mutex_unlock(&ctx->file->mut);
  383. events_reported = ctx->events_reported;
  384. kfree(ctx);
  385. return events_reported;
  386. }
  387. static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
  388. int in_len, int out_len)
  389. {
  390. struct rdma_ucm_destroy_id cmd;
  391. struct rdma_ucm_destroy_id_resp resp;
  392. struct ucma_context *ctx;
  393. int ret = 0;
  394. if (out_len < sizeof(resp))
  395. return -ENOSPC;
  396. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  397. return -EFAULT;
  398. mutex_lock(&mut);
  399. ctx = _ucma_find_context(cmd.id, file);
  400. if (!IS_ERR(ctx))
  401. idr_remove(&ctx_idr, ctx->id);
  402. mutex_unlock(&mut);
  403. if (IS_ERR(ctx))
  404. return PTR_ERR(ctx);
  405. ucma_put_ctx(ctx);
  406. wait_for_completion(&ctx->comp);
  407. resp.events_reported = ucma_free_ctx(ctx);
  408. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  409. &resp, sizeof(resp)))
  410. ret = -EFAULT;
  411. return ret;
  412. }
  413. static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
  414. int in_len, int out_len)
  415. {
  416. struct rdma_ucm_bind_addr cmd;
  417. struct ucma_context *ctx;
  418. int ret;
  419. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  420. return -EFAULT;
  421. ctx = ucma_get_ctx(file, cmd.id);
  422. if (IS_ERR(ctx))
  423. return PTR_ERR(ctx);
  424. ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
  425. ucma_put_ctx(ctx);
  426. return ret;
  427. }
  428. static ssize_t ucma_resolve_addr(struct ucma_file *file,
  429. const char __user *inbuf,
  430. int in_len, int out_len)
  431. {
  432. struct rdma_ucm_resolve_addr cmd;
  433. struct ucma_context *ctx;
  434. int ret;
  435. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  436. return -EFAULT;
  437. ctx = ucma_get_ctx(file, cmd.id);
  438. if (IS_ERR(ctx))
  439. return PTR_ERR(ctx);
  440. ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
  441. (struct sockaddr *) &cmd.dst_addr,
  442. cmd.timeout_ms);
  443. ucma_put_ctx(ctx);
  444. return ret;
  445. }
  446. static ssize_t ucma_resolve_route(struct ucma_file *file,
  447. const char __user *inbuf,
  448. int in_len, int out_len)
  449. {
  450. struct rdma_ucm_resolve_route cmd;
  451. struct ucma_context *ctx;
  452. int ret;
  453. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  454. return -EFAULT;
  455. ctx = ucma_get_ctx(file, cmd.id);
  456. if (IS_ERR(ctx))
  457. return PTR_ERR(ctx);
  458. ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
  459. ucma_put_ctx(ctx);
  460. return ret;
  461. }
  462. static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
  463. struct rdma_route *route)
  464. {
  465. struct rdma_dev_addr *dev_addr;
  466. resp->num_paths = route->num_paths;
  467. switch (route->num_paths) {
  468. case 0:
  469. dev_addr = &route->addr.dev_addr;
  470. ib_addr_get_dgid(dev_addr,
  471. (union ib_gid *) &resp->ib_route[0].dgid);
  472. ib_addr_get_sgid(dev_addr,
  473. (union ib_gid *) &resp->ib_route[0].sgid);
  474. resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
  475. break;
  476. case 2:
  477. ib_copy_path_rec_to_user(&resp->ib_route[1],
  478. &route->path_rec[1]);
  479. /* fall through */
  480. case 1:
  481. ib_copy_path_rec_to_user(&resp->ib_route[0],
  482. &route->path_rec[0]);
  483. break;
  484. default:
  485. break;
  486. }
  487. }
  488. static ssize_t ucma_query_route(struct ucma_file *file,
  489. const char __user *inbuf,
  490. int in_len, int out_len)
  491. {
  492. struct rdma_ucm_query_route cmd;
  493. struct rdma_ucm_query_route_resp resp;
  494. struct ucma_context *ctx;
  495. struct sockaddr *addr;
  496. int ret = 0;
  497. if (out_len < sizeof(resp))
  498. return -ENOSPC;
  499. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  500. return -EFAULT;
  501. ctx = ucma_get_ctx(file, cmd.id);
  502. if (IS_ERR(ctx))
  503. return PTR_ERR(ctx);
  504. memset(&resp, 0, sizeof resp);
  505. addr = &ctx->cm_id->route.addr.src_addr;
  506. memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
  507. sizeof(struct sockaddr_in) :
  508. sizeof(struct sockaddr_in6));
  509. addr = &ctx->cm_id->route.addr.dst_addr;
  510. memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
  511. sizeof(struct sockaddr_in) :
  512. sizeof(struct sockaddr_in6));
  513. if (!ctx->cm_id->device)
  514. goto out;
  515. resp.node_guid = ctx->cm_id->device->node_guid;
  516. resp.port_num = ctx->cm_id->port_num;
  517. switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
  518. case RDMA_TRANSPORT_IB:
  519. ucma_copy_ib_route(&resp, &ctx->cm_id->route);
  520. break;
  521. default:
  522. break;
  523. }
  524. out:
  525. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  526. &resp, sizeof(resp)))
  527. ret = -EFAULT;
  528. ucma_put_ctx(ctx);
  529. return ret;
  530. }
  531. static void ucma_copy_conn_param(struct rdma_conn_param *dst,
  532. struct rdma_ucm_conn_param *src)
  533. {
  534. dst->private_data = src->private_data;
  535. dst->private_data_len = src->private_data_len;
  536. dst->responder_resources =src->responder_resources;
  537. dst->initiator_depth = src->initiator_depth;
  538. dst->flow_control = src->flow_control;
  539. dst->retry_count = src->retry_count;
  540. dst->rnr_retry_count = src->rnr_retry_count;
  541. dst->srq = src->srq;
  542. dst->qp_num = src->qp_num;
  543. }
  544. static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
  545. int in_len, int out_len)
  546. {
  547. struct rdma_ucm_connect cmd;
  548. struct rdma_conn_param conn_param;
  549. struct ucma_context *ctx;
  550. int ret;
  551. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  552. return -EFAULT;
  553. if (!cmd.conn_param.valid)
  554. return -EINVAL;
  555. ctx = ucma_get_ctx(file, cmd.id);
  556. if (IS_ERR(ctx))
  557. return PTR_ERR(ctx);
  558. ucma_copy_conn_param(&conn_param, &cmd.conn_param);
  559. ret = rdma_connect(ctx->cm_id, &conn_param);
  560. ucma_put_ctx(ctx);
  561. return ret;
  562. }
  563. static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
  564. int in_len, int out_len)
  565. {
  566. struct rdma_ucm_listen cmd;
  567. struct ucma_context *ctx;
  568. int ret;
  569. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  570. return -EFAULT;
  571. ctx = ucma_get_ctx(file, cmd.id);
  572. if (IS_ERR(ctx))
  573. return PTR_ERR(ctx);
  574. ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ?
  575. cmd.backlog : UCMA_MAX_BACKLOG;
  576. ret = rdma_listen(ctx->cm_id, ctx->backlog);
  577. ucma_put_ctx(ctx);
  578. return ret;
  579. }
  580. static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
  581. int in_len, int out_len)
  582. {
  583. struct rdma_ucm_accept cmd;
  584. struct rdma_conn_param conn_param;
  585. struct ucma_context *ctx;
  586. int ret;
  587. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  588. return -EFAULT;
  589. ctx = ucma_get_ctx(file, cmd.id);
  590. if (IS_ERR(ctx))
  591. return PTR_ERR(ctx);
  592. if (cmd.conn_param.valid) {
  593. ctx->uid = cmd.uid;
  594. ucma_copy_conn_param(&conn_param, &cmd.conn_param);
  595. ret = rdma_accept(ctx->cm_id, &conn_param);
  596. } else
  597. ret = rdma_accept(ctx->cm_id, NULL);
  598. ucma_put_ctx(ctx);
  599. return ret;
  600. }
  601. static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
  602. int in_len, int out_len)
  603. {
  604. struct rdma_ucm_reject cmd;
  605. struct ucma_context *ctx;
  606. int ret;
  607. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  608. return -EFAULT;
  609. ctx = ucma_get_ctx(file, cmd.id);
  610. if (IS_ERR(ctx))
  611. return PTR_ERR(ctx);
  612. ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
  613. ucma_put_ctx(ctx);
  614. return ret;
  615. }
  616. static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
  617. int in_len, int out_len)
  618. {
  619. struct rdma_ucm_disconnect cmd;
  620. struct ucma_context *ctx;
  621. int ret;
  622. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  623. return -EFAULT;
  624. ctx = ucma_get_ctx(file, cmd.id);
  625. if (IS_ERR(ctx))
  626. return PTR_ERR(ctx);
  627. ret = rdma_disconnect(ctx->cm_id);
  628. ucma_put_ctx(ctx);
  629. return ret;
  630. }
  631. static ssize_t ucma_init_qp_attr(struct ucma_file *file,
  632. const char __user *inbuf,
  633. int in_len, int out_len)
  634. {
  635. struct rdma_ucm_init_qp_attr cmd;
  636. struct ib_uverbs_qp_attr resp;
  637. struct ucma_context *ctx;
  638. struct ib_qp_attr qp_attr;
  639. int ret;
  640. if (out_len < sizeof(resp))
  641. return -ENOSPC;
  642. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  643. return -EFAULT;
  644. ctx = ucma_get_ctx(file, cmd.id);
  645. if (IS_ERR(ctx))
  646. return PTR_ERR(ctx);
  647. resp.qp_attr_mask = 0;
  648. memset(&qp_attr, 0, sizeof qp_attr);
  649. qp_attr.qp_state = cmd.qp_state;
  650. ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
  651. if (ret)
  652. goto out;
  653. ib_copy_qp_attr_to_user(&resp, &qp_attr);
  654. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  655. &resp, sizeof(resp)))
  656. ret = -EFAULT;
  657. out:
  658. ucma_put_ctx(ctx);
  659. return ret;
  660. }
  661. static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
  662. int in_len, int out_len)
  663. {
  664. struct rdma_ucm_notify cmd;
  665. struct ucma_context *ctx;
  666. int ret;
  667. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  668. return -EFAULT;
  669. ctx = ucma_get_ctx(file, cmd.id);
  670. if (IS_ERR(ctx))
  671. return PTR_ERR(ctx);
  672. ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
  673. ucma_put_ctx(ctx);
  674. return ret;
  675. }
  676. static ssize_t ucma_join_multicast(struct ucma_file *file,
  677. const char __user *inbuf,
  678. int in_len, int out_len)
  679. {
  680. struct rdma_ucm_join_mcast cmd;
  681. struct rdma_ucm_create_id_resp resp;
  682. struct ucma_context *ctx;
  683. struct ucma_multicast *mc;
  684. int ret;
  685. if (out_len < sizeof(resp))
  686. return -ENOSPC;
  687. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  688. return -EFAULT;
  689. ctx = ucma_get_ctx(file, cmd.id);
  690. if (IS_ERR(ctx))
  691. return PTR_ERR(ctx);
  692. mutex_lock(&file->mut);
  693. mc = ucma_alloc_multicast(ctx);
  694. if (IS_ERR(mc)) {
  695. ret = PTR_ERR(mc);
  696. goto err1;
  697. }
  698. mc->uid = cmd.uid;
  699. memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
  700. ret = rdma_join_multicast(ctx->cm_id, &mc->addr, mc);
  701. if (ret)
  702. goto err2;
  703. resp.id = mc->id;
  704. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  705. &resp, sizeof(resp))) {
  706. ret = -EFAULT;
  707. goto err3;
  708. }
  709. mutex_unlock(&file->mut);
  710. ucma_put_ctx(ctx);
  711. return 0;
  712. err3:
  713. rdma_leave_multicast(ctx->cm_id, &mc->addr);
  714. ucma_cleanup_mc_events(mc);
  715. err2:
  716. mutex_lock(&mut);
  717. idr_remove(&multicast_idr, mc->id);
  718. mutex_unlock(&mut);
  719. list_del(&mc->list);
  720. kfree(mc);
  721. err1:
  722. mutex_unlock(&file->mut);
  723. ucma_put_ctx(ctx);
  724. return ret;
  725. }
  726. static ssize_t ucma_leave_multicast(struct ucma_file *file,
  727. const char __user *inbuf,
  728. int in_len, int out_len)
  729. {
  730. struct rdma_ucm_destroy_id cmd;
  731. struct rdma_ucm_destroy_id_resp resp;
  732. struct ucma_multicast *mc;
  733. int ret = 0;
  734. if (out_len < sizeof(resp))
  735. return -ENOSPC;
  736. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  737. return -EFAULT;
  738. mutex_lock(&mut);
  739. mc = idr_find(&multicast_idr, cmd.id);
  740. if (!mc)
  741. mc = ERR_PTR(-ENOENT);
  742. else if (mc->ctx->file != file)
  743. mc = ERR_PTR(-EINVAL);
  744. else {
  745. idr_remove(&multicast_idr, mc->id);
  746. atomic_inc(&mc->ctx->ref);
  747. }
  748. mutex_unlock(&mut);
  749. if (IS_ERR(mc)) {
  750. ret = PTR_ERR(mc);
  751. goto out;
  752. }
  753. rdma_leave_multicast(mc->ctx->cm_id, &mc->addr);
  754. mutex_lock(&mc->ctx->file->mut);
  755. ucma_cleanup_mc_events(mc);
  756. list_del(&mc->list);
  757. mutex_unlock(&mc->ctx->file->mut);
  758. ucma_put_ctx(mc->ctx);
  759. resp.events_reported = mc->events_reported;
  760. kfree(mc);
  761. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  762. &resp, sizeof(resp)))
  763. ret = -EFAULT;
  764. out:
  765. return ret;
  766. }
  767. static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
  768. const char __user *inbuf,
  769. int in_len, int out_len) = {
  770. [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
  771. [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
  772. [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr,
  773. [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
  774. [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
  775. [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
  776. [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
  777. [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
  778. [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
  779. [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
  780. [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
  781. [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
  782. [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
  783. [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
  784. [RDMA_USER_CM_CMD_SET_OPTION] = NULL,
  785. [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
  786. [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
  787. [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
  788. };
  789. static ssize_t ucma_write(struct file *filp, const char __user *buf,
  790. size_t len, loff_t *pos)
  791. {
  792. struct ucma_file *file = filp->private_data;
  793. struct rdma_ucm_cmd_hdr hdr;
  794. ssize_t ret;
  795. if (len < sizeof(hdr))
  796. return -EINVAL;
  797. if (copy_from_user(&hdr, buf, sizeof(hdr)))
  798. return -EFAULT;
  799. if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
  800. return -EINVAL;
  801. if (hdr.in + sizeof(hdr) > len)
  802. return -EINVAL;
  803. if (!ucma_cmd_table[hdr.cmd])
  804. return -ENOSYS;
  805. ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
  806. if (!ret)
  807. ret = len;
  808. return ret;
  809. }
  810. static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
  811. {
  812. struct ucma_file *file = filp->private_data;
  813. unsigned int mask = 0;
  814. poll_wait(filp, &file->poll_wait, wait);
  815. if (!list_empty(&file->event_list))
  816. mask = POLLIN | POLLRDNORM;
  817. return mask;
  818. }
  819. static int ucma_open(struct inode *inode, struct file *filp)
  820. {
  821. struct ucma_file *file;
  822. file = kmalloc(sizeof *file, GFP_KERNEL);
  823. if (!file)
  824. return -ENOMEM;
  825. INIT_LIST_HEAD(&file->event_list);
  826. INIT_LIST_HEAD(&file->ctx_list);
  827. init_waitqueue_head(&file->poll_wait);
  828. mutex_init(&file->mut);
  829. filp->private_data = file;
  830. file->filp = filp;
  831. return 0;
  832. }
  833. static int ucma_close(struct inode *inode, struct file *filp)
  834. {
  835. struct ucma_file *file = filp->private_data;
  836. struct ucma_context *ctx, *tmp;
  837. mutex_lock(&file->mut);
  838. list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
  839. mutex_unlock(&file->mut);
  840. mutex_lock(&mut);
  841. idr_remove(&ctx_idr, ctx->id);
  842. mutex_unlock(&mut);
  843. ucma_free_ctx(ctx);
  844. mutex_lock(&file->mut);
  845. }
  846. mutex_unlock(&file->mut);
  847. kfree(file);
  848. return 0;
  849. }
  850. static const struct file_operations ucma_fops = {
  851. .owner = THIS_MODULE,
  852. .open = ucma_open,
  853. .release = ucma_close,
  854. .write = ucma_write,
  855. .poll = ucma_poll,
  856. };
  857. static struct miscdevice ucma_misc = {
  858. .minor = MISC_DYNAMIC_MINOR,
  859. .name = "rdma_cm",
  860. .fops = &ucma_fops,
  861. };
  862. static ssize_t show_abi_version(struct device *dev,
  863. struct device_attribute *attr,
  864. char *buf)
  865. {
  866. return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
  867. }
  868. static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
  869. static int __init ucma_init(void)
  870. {
  871. int ret;
  872. ret = misc_register(&ucma_misc);
  873. if (ret)
  874. return ret;
  875. ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
  876. if (ret) {
  877. printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
  878. goto err;
  879. }
  880. return 0;
  881. err:
  882. misc_deregister(&ucma_misc);
  883. return ret;
  884. }
  885. static void __exit ucma_cleanup(void)
  886. {
  887. device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
  888. misc_deregister(&ucma_misc);
  889. idr_destroy(&ctx_idr);
  890. }
  891. module_init(ucma_init);
  892. module_exit(ucma_cleanup);