ucma.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384
  1. /*
  2. * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/completion.h>
  33. #include <linux/file.h>
  34. #include <linux/mutex.h>
  35. #include <linux/poll.h>
  36. #include <linux/sched.h>
  37. #include <linux/idr.h>
  38. #include <linux/in.h>
  39. #include <linux/in6.h>
  40. #include <linux/miscdevice.h>
  41. #include <linux/slab.h>
  42. #include <linux/sysctl.h>
  43. #include <rdma/rdma_user_cm.h>
  44. #include <rdma/ib_marshall.h>
  45. #include <rdma/rdma_cm.h>
  46. #include <rdma/rdma_cm_ib.h>
  47. MODULE_AUTHOR("Sean Hefty");
  48. MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
  49. MODULE_LICENSE("Dual BSD/GPL");
  50. static unsigned int max_backlog = 1024;
  51. static struct ctl_table_header *ucma_ctl_table_hdr;
  52. static ctl_table ucma_ctl_table[] = {
  53. {
  54. .procname = "max_backlog",
  55. .data = &max_backlog,
  56. .maxlen = sizeof max_backlog,
  57. .mode = 0644,
  58. .proc_handler = proc_dointvec,
  59. },
  60. { }
  61. };
  62. static struct ctl_path ucma_ctl_path[] = {
  63. { .procname = "net" },
  64. { .procname = "rdma_ucm" },
  65. { }
  66. };
  67. struct ucma_file {
  68. struct mutex mut;
  69. struct file *filp;
  70. struct list_head ctx_list;
  71. struct list_head event_list;
  72. wait_queue_head_t poll_wait;
  73. };
  74. struct ucma_context {
  75. int id;
  76. struct completion comp;
  77. atomic_t ref;
  78. int events_reported;
  79. int backlog;
  80. struct ucma_file *file;
  81. struct rdma_cm_id *cm_id;
  82. u64 uid;
  83. struct list_head list;
  84. struct list_head mc_list;
  85. };
  86. struct ucma_multicast {
  87. struct ucma_context *ctx;
  88. int id;
  89. int events_reported;
  90. u64 uid;
  91. struct list_head list;
  92. struct sockaddr_storage addr;
  93. };
  94. struct ucma_event {
  95. struct ucma_context *ctx;
  96. struct ucma_multicast *mc;
  97. struct list_head list;
  98. struct rdma_cm_id *cm_id;
  99. struct rdma_ucm_event_resp resp;
  100. };
  101. static DEFINE_MUTEX(mut);
  102. static DEFINE_IDR(ctx_idr);
  103. static DEFINE_IDR(multicast_idr);
  104. static inline struct ucma_context *_ucma_find_context(int id,
  105. struct ucma_file *file)
  106. {
  107. struct ucma_context *ctx;
  108. ctx = idr_find(&ctx_idr, id);
  109. if (!ctx)
  110. ctx = ERR_PTR(-ENOENT);
  111. else if (ctx->file != file)
  112. ctx = ERR_PTR(-EINVAL);
  113. return ctx;
  114. }
  115. static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
  116. {
  117. struct ucma_context *ctx;
  118. mutex_lock(&mut);
  119. ctx = _ucma_find_context(id, file);
  120. if (!IS_ERR(ctx))
  121. atomic_inc(&ctx->ref);
  122. mutex_unlock(&mut);
  123. return ctx;
  124. }
  125. static void ucma_put_ctx(struct ucma_context *ctx)
  126. {
  127. if (atomic_dec_and_test(&ctx->ref))
  128. complete(&ctx->comp);
  129. }
  130. static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
  131. {
  132. struct ucma_context *ctx;
  133. int ret;
  134. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  135. if (!ctx)
  136. return NULL;
  137. atomic_set(&ctx->ref, 1);
  138. init_completion(&ctx->comp);
  139. INIT_LIST_HEAD(&ctx->mc_list);
  140. ctx->file = file;
  141. do {
  142. ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
  143. if (!ret)
  144. goto error;
  145. mutex_lock(&mut);
  146. ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
  147. mutex_unlock(&mut);
  148. } while (ret == -EAGAIN);
  149. if (ret)
  150. goto error;
  151. list_add_tail(&ctx->list, &file->ctx_list);
  152. return ctx;
  153. error:
  154. kfree(ctx);
  155. return NULL;
  156. }
  157. static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
  158. {
  159. struct ucma_multicast *mc;
  160. int ret;
  161. mc = kzalloc(sizeof(*mc), GFP_KERNEL);
  162. if (!mc)
  163. return NULL;
  164. do {
  165. ret = idr_pre_get(&multicast_idr, GFP_KERNEL);
  166. if (!ret)
  167. goto error;
  168. mutex_lock(&mut);
  169. ret = idr_get_new(&multicast_idr, mc, &mc->id);
  170. mutex_unlock(&mut);
  171. } while (ret == -EAGAIN);
  172. if (ret)
  173. goto error;
  174. mc->ctx = ctx;
  175. list_add_tail(&mc->list, &ctx->mc_list);
  176. return mc;
  177. error:
  178. kfree(mc);
  179. return NULL;
  180. }
  181. static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
  182. struct rdma_conn_param *src)
  183. {
  184. if (src->private_data_len)
  185. memcpy(dst->private_data, src->private_data,
  186. src->private_data_len);
  187. dst->private_data_len = src->private_data_len;
  188. dst->responder_resources =src->responder_resources;
  189. dst->initiator_depth = src->initiator_depth;
  190. dst->flow_control = src->flow_control;
  191. dst->retry_count = src->retry_count;
  192. dst->rnr_retry_count = src->rnr_retry_count;
  193. dst->srq = src->srq;
  194. dst->qp_num = src->qp_num;
  195. }
  196. static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
  197. struct rdma_ud_param *src)
  198. {
  199. if (src->private_data_len)
  200. memcpy(dst->private_data, src->private_data,
  201. src->private_data_len);
  202. dst->private_data_len = src->private_data_len;
  203. ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
  204. dst->qp_num = src->qp_num;
  205. dst->qkey = src->qkey;
  206. }
  207. static void ucma_set_event_context(struct ucma_context *ctx,
  208. struct rdma_cm_event *event,
  209. struct ucma_event *uevent)
  210. {
  211. uevent->ctx = ctx;
  212. switch (event->event) {
  213. case RDMA_CM_EVENT_MULTICAST_JOIN:
  214. case RDMA_CM_EVENT_MULTICAST_ERROR:
  215. uevent->mc = (struct ucma_multicast *)
  216. event->param.ud.private_data;
  217. uevent->resp.uid = uevent->mc->uid;
  218. uevent->resp.id = uevent->mc->id;
  219. break;
  220. default:
  221. uevent->resp.uid = ctx->uid;
  222. uevent->resp.id = ctx->id;
  223. break;
  224. }
  225. }
  226. static int ucma_event_handler(struct rdma_cm_id *cm_id,
  227. struct rdma_cm_event *event)
  228. {
  229. struct ucma_event *uevent;
  230. struct ucma_context *ctx = cm_id->context;
  231. int ret = 0;
  232. uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
  233. if (!uevent)
  234. return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
  235. uevent->cm_id = cm_id;
  236. ucma_set_event_context(ctx, event, uevent);
  237. uevent->resp.event = event->event;
  238. uevent->resp.status = event->status;
  239. if (cm_id->ps == RDMA_PS_UDP || cm_id->ps == RDMA_PS_IPOIB)
  240. ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
  241. else
  242. ucma_copy_conn_event(&uevent->resp.param.conn,
  243. &event->param.conn);
  244. mutex_lock(&ctx->file->mut);
  245. if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
  246. if (!ctx->backlog) {
  247. ret = -ENOMEM;
  248. kfree(uevent);
  249. goto out;
  250. }
  251. ctx->backlog--;
  252. } else if (!ctx->uid) {
  253. /*
  254. * We ignore events for new connections until userspace has set
  255. * their context. This can only happen if an error occurs on a
  256. * new connection before the user accepts it. This is okay,
  257. * since the accept will just fail later.
  258. */
  259. kfree(uevent);
  260. goto out;
  261. }
  262. list_add_tail(&uevent->list, &ctx->file->event_list);
  263. wake_up_interruptible(&ctx->file->poll_wait);
  264. out:
  265. mutex_unlock(&ctx->file->mut);
  266. return ret;
  267. }
  268. static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
  269. int in_len, int out_len)
  270. {
  271. struct ucma_context *ctx;
  272. struct rdma_ucm_get_event cmd;
  273. struct ucma_event *uevent;
  274. int ret = 0;
  275. DEFINE_WAIT(wait);
  276. if (out_len < sizeof uevent->resp)
  277. return -ENOSPC;
  278. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  279. return -EFAULT;
  280. mutex_lock(&file->mut);
  281. while (list_empty(&file->event_list)) {
  282. mutex_unlock(&file->mut);
  283. if (file->filp->f_flags & O_NONBLOCK)
  284. return -EAGAIN;
  285. if (wait_event_interruptible(file->poll_wait,
  286. !list_empty(&file->event_list)))
  287. return -ERESTARTSYS;
  288. mutex_lock(&file->mut);
  289. }
  290. uevent = list_entry(file->event_list.next, struct ucma_event, list);
  291. if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
  292. ctx = ucma_alloc_ctx(file);
  293. if (!ctx) {
  294. ret = -ENOMEM;
  295. goto done;
  296. }
  297. uevent->ctx->backlog++;
  298. ctx->cm_id = uevent->cm_id;
  299. ctx->cm_id->context = ctx;
  300. uevent->resp.id = ctx->id;
  301. }
  302. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  303. &uevent->resp, sizeof uevent->resp)) {
  304. ret = -EFAULT;
  305. goto done;
  306. }
  307. list_del(&uevent->list);
  308. uevent->ctx->events_reported++;
  309. if (uevent->mc)
  310. uevent->mc->events_reported++;
  311. kfree(uevent);
  312. done:
  313. mutex_unlock(&file->mut);
  314. return ret;
  315. }
  316. static ssize_t ucma_create_id(struct ucma_file *file,
  317. const char __user *inbuf,
  318. int in_len, int out_len)
  319. {
  320. struct rdma_ucm_create_id cmd;
  321. struct rdma_ucm_create_id_resp resp;
  322. struct ucma_context *ctx;
  323. int ret;
  324. if (out_len < sizeof(resp))
  325. return -ENOSPC;
  326. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  327. return -EFAULT;
  328. mutex_lock(&file->mut);
  329. ctx = ucma_alloc_ctx(file);
  330. mutex_unlock(&file->mut);
  331. if (!ctx)
  332. return -ENOMEM;
  333. ctx->uid = cmd.uid;
  334. ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps);
  335. if (IS_ERR(ctx->cm_id)) {
  336. ret = PTR_ERR(ctx->cm_id);
  337. goto err1;
  338. }
  339. resp.id = ctx->id;
  340. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  341. &resp, sizeof(resp))) {
  342. ret = -EFAULT;
  343. goto err2;
  344. }
  345. return 0;
  346. err2:
  347. rdma_destroy_id(ctx->cm_id);
  348. err1:
  349. mutex_lock(&mut);
  350. idr_remove(&ctx_idr, ctx->id);
  351. mutex_unlock(&mut);
  352. kfree(ctx);
  353. return ret;
  354. }
  355. static void ucma_cleanup_multicast(struct ucma_context *ctx)
  356. {
  357. struct ucma_multicast *mc, *tmp;
  358. mutex_lock(&mut);
  359. list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
  360. list_del(&mc->list);
  361. idr_remove(&multicast_idr, mc->id);
  362. kfree(mc);
  363. }
  364. mutex_unlock(&mut);
  365. }
  366. static void ucma_cleanup_events(struct ucma_context *ctx)
  367. {
  368. struct ucma_event *uevent, *tmp;
  369. list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
  370. if (uevent->ctx != ctx)
  371. continue;
  372. list_del(&uevent->list);
  373. /* clear incoming connections. */
  374. if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
  375. rdma_destroy_id(uevent->cm_id);
  376. kfree(uevent);
  377. }
  378. }
  379. static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
  380. {
  381. struct ucma_event *uevent, *tmp;
  382. list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
  383. if (uevent->mc != mc)
  384. continue;
  385. list_del(&uevent->list);
  386. kfree(uevent);
  387. }
  388. }
  389. static int ucma_free_ctx(struct ucma_context *ctx)
  390. {
  391. int events_reported;
  392. /* No new events will be generated after destroying the id. */
  393. rdma_destroy_id(ctx->cm_id);
  394. ucma_cleanup_multicast(ctx);
  395. /* Cleanup events not yet reported to the user. */
  396. mutex_lock(&ctx->file->mut);
  397. ucma_cleanup_events(ctx);
  398. list_del(&ctx->list);
  399. mutex_unlock(&ctx->file->mut);
  400. events_reported = ctx->events_reported;
  401. kfree(ctx);
  402. return events_reported;
  403. }
  404. static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
  405. int in_len, int out_len)
  406. {
  407. struct rdma_ucm_destroy_id cmd;
  408. struct rdma_ucm_destroy_id_resp resp;
  409. struct ucma_context *ctx;
  410. int ret = 0;
  411. if (out_len < sizeof(resp))
  412. return -ENOSPC;
  413. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  414. return -EFAULT;
  415. mutex_lock(&mut);
  416. ctx = _ucma_find_context(cmd.id, file);
  417. if (!IS_ERR(ctx))
  418. idr_remove(&ctx_idr, ctx->id);
  419. mutex_unlock(&mut);
  420. if (IS_ERR(ctx))
  421. return PTR_ERR(ctx);
  422. ucma_put_ctx(ctx);
  423. wait_for_completion(&ctx->comp);
  424. resp.events_reported = ucma_free_ctx(ctx);
  425. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  426. &resp, sizeof(resp)))
  427. ret = -EFAULT;
  428. return ret;
  429. }
  430. static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
  431. int in_len, int out_len)
  432. {
  433. struct rdma_ucm_bind_addr cmd;
  434. struct ucma_context *ctx;
  435. int ret;
  436. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  437. return -EFAULT;
  438. ctx = ucma_get_ctx(file, cmd.id);
  439. if (IS_ERR(ctx))
  440. return PTR_ERR(ctx);
  441. ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
  442. ucma_put_ctx(ctx);
  443. return ret;
  444. }
  445. static ssize_t ucma_resolve_addr(struct ucma_file *file,
  446. const char __user *inbuf,
  447. int in_len, int out_len)
  448. {
  449. struct rdma_ucm_resolve_addr cmd;
  450. struct ucma_context *ctx;
  451. int ret;
  452. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  453. return -EFAULT;
  454. ctx = ucma_get_ctx(file, cmd.id);
  455. if (IS_ERR(ctx))
  456. return PTR_ERR(ctx);
  457. ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
  458. (struct sockaddr *) &cmd.dst_addr,
  459. cmd.timeout_ms);
  460. ucma_put_ctx(ctx);
  461. return ret;
  462. }
  463. static ssize_t ucma_resolve_route(struct ucma_file *file,
  464. const char __user *inbuf,
  465. int in_len, int out_len)
  466. {
  467. struct rdma_ucm_resolve_route cmd;
  468. struct ucma_context *ctx;
  469. int ret;
  470. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  471. return -EFAULT;
  472. ctx = ucma_get_ctx(file, cmd.id);
  473. if (IS_ERR(ctx))
  474. return PTR_ERR(ctx);
  475. ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
  476. ucma_put_ctx(ctx);
  477. return ret;
  478. }
  479. static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
  480. struct rdma_route *route)
  481. {
  482. struct rdma_dev_addr *dev_addr;
  483. resp->num_paths = route->num_paths;
  484. switch (route->num_paths) {
  485. case 0:
  486. dev_addr = &route->addr.dev_addr;
  487. rdma_addr_get_dgid(dev_addr,
  488. (union ib_gid *) &resp->ib_route[0].dgid);
  489. rdma_addr_get_sgid(dev_addr,
  490. (union ib_gid *) &resp->ib_route[0].sgid);
  491. resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
  492. break;
  493. case 2:
  494. ib_copy_path_rec_to_user(&resp->ib_route[1],
  495. &route->path_rec[1]);
  496. /* fall through */
  497. case 1:
  498. ib_copy_path_rec_to_user(&resp->ib_route[0],
  499. &route->path_rec[0]);
  500. break;
  501. default:
  502. break;
  503. }
  504. }
  505. static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
  506. struct rdma_route *route)
  507. {
  508. struct rdma_dev_addr *dev_addr;
  509. struct net_device *dev;
  510. u16 vid = 0;
  511. resp->num_paths = route->num_paths;
  512. switch (route->num_paths) {
  513. case 0:
  514. dev_addr = &route->addr.dev_addr;
  515. dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
  516. if (dev) {
  517. vid = rdma_vlan_dev_vlan_id(dev);
  518. dev_put(dev);
  519. }
  520. iboe_mac_vlan_to_ll((union ib_gid *) &resp->ib_route[0].dgid,
  521. dev_addr->dst_dev_addr, vid);
  522. iboe_addr_get_sgid(dev_addr,
  523. (union ib_gid *) &resp->ib_route[0].sgid);
  524. resp->ib_route[0].pkey = cpu_to_be16(0xffff);
  525. break;
  526. case 2:
  527. ib_copy_path_rec_to_user(&resp->ib_route[1],
  528. &route->path_rec[1]);
  529. /* fall through */
  530. case 1:
  531. ib_copy_path_rec_to_user(&resp->ib_route[0],
  532. &route->path_rec[0]);
  533. break;
  534. default:
  535. break;
  536. }
  537. }
  538. static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
  539. struct rdma_route *route)
  540. {
  541. struct rdma_dev_addr *dev_addr;
  542. dev_addr = &route->addr.dev_addr;
  543. rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
  544. rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
  545. }
  546. static ssize_t ucma_query_route(struct ucma_file *file,
  547. const char __user *inbuf,
  548. int in_len, int out_len)
  549. {
  550. struct rdma_ucm_query_route cmd;
  551. struct rdma_ucm_query_route_resp resp;
  552. struct ucma_context *ctx;
  553. struct sockaddr *addr;
  554. int ret = 0;
  555. if (out_len < sizeof(resp))
  556. return -ENOSPC;
  557. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  558. return -EFAULT;
  559. ctx = ucma_get_ctx(file, cmd.id);
  560. if (IS_ERR(ctx))
  561. return PTR_ERR(ctx);
  562. memset(&resp, 0, sizeof resp);
  563. addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
  564. memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
  565. sizeof(struct sockaddr_in) :
  566. sizeof(struct sockaddr_in6));
  567. addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
  568. memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
  569. sizeof(struct sockaddr_in) :
  570. sizeof(struct sockaddr_in6));
  571. if (!ctx->cm_id->device)
  572. goto out;
  573. resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
  574. resp.port_num = ctx->cm_id->port_num;
  575. switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
  576. case RDMA_TRANSPORT_IB:
  577. switch (rdma_port_get_link_layer(ctx->cm_id->device,
  578. ctx->cm_id->port_num)) {
  579. case IB_LINK_LAYER_INFINIBAND:
  580. ucma_copy_ib_route(&resp, &ctx->cm_id->route);
  581. break;
  582. case IB_LINK_LAYER_ETHERNET:
  583. ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
  584. break;
  585. default:
  586. break;
  587. }
  588. break;
  589. case RDMA_TRANSPORT_IWARP:
  590. ucma_copy_iw_route(&resp, &ctx->cm_id->route);
  591. break;
  592. default:
  593. break;
  594. }
  595. out:
  596. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  597. &resp, sizeof(resp)))
  598. ret = -EFAULT;
  599. ucma_put_ctx(ctx);
  600. return ret;
  601. }
  602. static void ucma_copy_conn_param(struct rdma_conn_param *dst,
  603. struct rdma_ucm_conn_param *src)
  604. {
  605. dst->private_data = src->private_data;
  606. dst->private_data_len = src->private_data_len;
  607. dst->responder_resources =src->responder_resources;
  608. dst->initiator_depth = src->initiator_depth;
  609. dst->flow_control = src->flow_control;
  610. dst->retry_count = src->retry_count;
  611. dst->rnr_retry_count = src->rnr_retry_count;
  612. dst->srq = src->srq;
  613. dst->qp_num = src->qp_num;
  614. }
  615. static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
  616. int in_len, int out_len)
  617. {
  618. struct rdma_ucm_connect cmd;
  619. struct rdma_conn_param conn_param;
  620. struct ucma_context *ctx;
  621. int ret;
  622. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  623. return -EFAULT;
  624. if (!cmd.conn_param.valid)
  625. return -EINVAL;
  626. ctx = ucma_get_ctx(file, cmd.id);
  627. if (IS_ERR(ctx))
  628. return PTR_ERR(ctx);
  629. ucma_copy_conn_param(&conn_param, &cmd.conn_param);
  630. ret = rdma_connect(ctx->cm_id, &conn_param);
  631. ucma_put_ctx(ctx);
  632. return ret;
  633. }
  634. static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
  635. int in_len, int out_len)
  636. {
  637. struct rdma_ucm_listen cmd;
  638. struct ucma_context *ctx;
  639. int ret;
  640. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  641. return -EFAULT;
  642. ctx = ucma_get_ctx(file, cmd.id);
  643. if (IS_ERR(ctx))
  644. return PTR_ERR(ctx);
  645. ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
  646. cmd.backlog : max_backlog;
  647. ret = rdma_listen(ctx->cm_id, ctx->backlog);
  648. ucma_put_ctx(ctx);
  649. return ret;
  650. }
  651. static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
  652. int in_len, int out_len)
  653. {
  654. struct rdma_ucm_accept cmd;
  655. struct rdma_conn_param conn_param;
  656. struct ucma_context *ctx;
  657. int ret;
  658. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  659. return -EFAULT;
  660. ctx = ucma_get_ctx(file, cmd.id);
  661. if (IS_ERR(ctx))
  662. return PTR_ERR(ctx);
  663. if (cmd.conn_param.valid) {
  664. ctx->uid = cmd.uid;
  665. ucma_copy_conn_param(&conn_param, &cmd.conn_param);
  666. ret = rdma_accept(ctx->cm_id, &conn_param);
  667. } else
  668. ret = rdma_accept(ctx->cm_id, NULL);
  669. ucma_put_ctx(ctx);
  670. return ret;
  671. }
  672. static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
  673. int in_len, int out_len)
  674. {
  675. struct rdma_ucm_reject cmd;
  676. struct ucma_context *ctx;
  677. int ret;
  678. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  679. return -EFAULT;
  680. ctx = ucma_get_ctx(file, cmd.id);
  681. if (IS_ERR(ctx))
  682. return PTR_ERR(ctx);
  683. ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
  684. ucma_put_ctx(ctx);
  685. return ret;
  686. }
  687. static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
  688. int in_len, int out_len)
  689. {
  690. struct rdma_ucm_disconnect cmd;
  691. struct ucma_context *ctx;
  692. int ret;
  693. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  694. return -EFAULT;
  695. ctx = ucma_get_ctx(file, cmd.id);
  696. if (IS_ERR(ctx))
  697. return PTR_ERR(ctx);
  698. ret = rdma_disconnect(ctx->cm_id);
  699. ucma_put_ctx(ctx);
  700. return ret;
  701. }
  702. static ssize_t ucma_init_qp_attr(struct ucma_file *file,
  703. const char __user *inbuf,
  704. int in_len, int out_len)
  705. {
  706. struct rdma_ucm_init_qp_attr cmd;
  707. struct ib_uverbs_qp_attr resp;
  708. struct ucma_context *ctx;
  709. struct ib_qp_attr qp_attr;
  710. int ret;
  711. if (out_len < sizeof(resp))
  712. return -ENOSPC;
  713. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  714. return -EFAULT;
  715. ctx = ucma_get_ctx(file, cmd.id);
  716. if (IS_ERR(ctx))
  717. return PTR_ERR(ctx);
  718. resp.qp_attr_mask = 0;
  719. memset(&qp_attr, 0, sizeof qp_attr);
  720. qp_attr.qp_state = cmd.qp_state;
  721. ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
  722. if (ret)
  723. goto out;
  724. ib_copy_qp_attr_to_user(&resp, &qp_attr);
  725. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  726. &resp, sizeof(resp)))
  727. ret = -EFAULT;
  728. out:
  729. ucma_put_ctx(ctx);
  730. return ret;
  731. }
  732. static int ucma_set_option_id(struct ucma_context *ctx, int optname,
  733. void *optval, size_t optlen)
  734. {
  735. int ret = 0;
  736. switch (optname) {
  737. case RDMA_OPTION_ID_TOS:
  738. if (optlen != sizeof(u8)) {
  739. ret = -EINVAL;
  740. break;
  741. }
  742. rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
  743. break;
  744. default:
  745. ret = -ENOSYS;
  746. }
  747. return ret;
  748. }
  749. static int ucma_set_ib_path(struct ucma_context *ctx,
  750. struct ib_path_rec_data *path_data, size_t optlen)
  751. {
  752. struct ib_sa_path_rec sa_path;
  753. struct rdma_cm_event event;
  754. int ret;
  755. if (optlen % sizeof(*path_data))
  756. return -EINVAL;
  757. for (; optlen; optlen -= sizeof(*path_data), path_data++) {
  758. if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
  759. IB_PATH_BIDIRECTIONAL))
  760. break;
  761. }
  762. if (!optlen)
  763. return -EINVAL;
  764. ib_sa_unpack_path(path_data->path_rec, &sa_path);
  765. ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
  766. if (ret)
  767. return ret;
  768. memset(&event, 0, sizeof event);
  769. event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
  770. return ucma_event_handler(ctx->cm_id, &event);
  771. }
  772. static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
  773. void *optval, size_t optlen)
  774. {
  775. int ret;
  776. switch (optname) {
  777. case RDMA_OPTION_IB_PATH:
  778. ret = ucma_set_ib_path(ctx, optval, optlen);
  779. break;
  780. default:
  781. ret = -ENOSYS;
  782. }
  783. return ret;
  784. }
  785. static int ucma_set_option_level(struct ucma_context *ctx, int level,
  786. int optname, void *optval, size_t optlen)
  787. {
  788. int ret;
  789. switch (level) {
  790. case RDMA_OPTION_ID:
  791. ret = ucma_set_option_id(ctx, optname, optval, optlen);
  792. break;
  793. case RDMA_OPTION_IB:
  794. ret = ucma_set_option_ib(ctx, optname, optval, optlen);
  795. break;
  796. default:
  797. ret = -ENOSYS;
  798. }
  799. return ret;
  800. }
  801. static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
  802. int in_len, int out_len)
  803. {
  804. struct rdma_ucm_set_option cmd;
  805. struct ucma_context *ctx;
  806. void *optval;
  807. int ret;
  808. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  809. return -EFAULT;
  810. ctx = ucma_get_ctx(file, cmd.id);
  811. if (IS_ERR(ctx))
  812. return PTR_ERR(ctx);
  813. optval = kmalloc(cmd.optlen, GFP_KERNEL);
  814. if (!optval) {
  815. ret = -ENOMEM;
  816. goto out1;
  817. }
  818. if (copy_from_user(optval, (void __user *) (unsigned long) cmd.optval,
  819. cmd.optlen)) {
  820. ret = -EFAULT;
  821. goto out2;
  822. }
  823. ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
  824. cmd.optlen);
  825. out2:
  826. kfree(optval);
  827. out1:
  828. ucma_put_ctx(ctx);
  829. return ret;
  830. }
  831. static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
  832. int in_len, int out_len)
  833. {
  834. struct rdma_ucm_notify cmd;
  835. struct ucma_context *ctx;
  836. int ret;
  837. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  838. return -EFAULT;
  839. ctx = ucma_get_ctx(file, cmd.id);
  840. if (IS_ERR(ctx))
  841. return PTR_ERR(ctx);
  842. ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
  843. ucma_put_ctx(ctx);
  844. return ret;
  845. }
  846. static ssize_t ucma_join_multicast(struct ucma_file *file,
  847. const char __user *inbuf,
  848. int in_len, int out_len)
  849. {
  850. struct rdma_ucm_join_mcast cmd;
  851. struct rdma_ucm_create_id_resp resp;
  852. struct ucma_context *ctx;
  853. struct ucma_multicast *mc;
  854. int ret;
  855. if (out_len < sizeof(resp))
  856. return -ENOSPC;
  857. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  858. return -EFAULT;
  859. ctx = ucma_get_ctx(file, cmd.id);
  860. if (IS_ERR(ctx))
  861. return PTR_ERR(ctx);
  862. mutex_lock(&file->mut);
  863. mc = ucma_alloc_multicast(ctx);
  864. if (!mc) {
  865. ret = -ENOMEM;
  866. goto err1;
  867. }
  868. mc->uid = cmd.uid;
  869. memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
  870. ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
  871. if (ret)
  872. goto err2;
  873. resp.id = mc->id;
  874. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  875. &resp, sizeof(resp))) {
  876. ret = -EFAULT;
  877. goto err3;
  878. }
  879. mutex_unlock(&file->mut);
  880. ucma_put_ctx(ctx);
  881. return 0;
  882. err3:
  883. rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
  884. ucma_cleanup_mc_events(mc);
  885. err2:
  886. mutex_lock(&mut);
  887. idr_remove(&multicast_idr, mc->id);
  888. mutex_unlock(&mut);
  889. list_del(&mc->list);
  890. kfree(mc);
  891. err1:
  892. mutex_unlock(&file->mut);
  893. ucma_put_ctx(ctx);
  894. return ret;
  895. }
  896. static ssize_t ucma_leave_multicast(struct ucma_file *file,
  897. const char __user *inbuf,
  898. int in_len, int out_len)
  899. {
  900. struct rdma_ucm_destroy_id cmd;
  901. struct rdma_ucm_destroy_id_resp resp;
  902. struct ucma_multicast *mc;
  903. int ret = 0;
  904. if (out_len < sizeof(resp))
  905. return -ENOSPC;
  906. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  907. return -EFAULT;
  908. mutex_lock(&mut);
  909. mc = idr_find(&multicast_idr, cmd.id);
  910. if (!mc)
  911. mc = ERR_PTR(-ENOENT);
  912. else if (mc->ctx->file != file)
  913. mc = ERR_PTR(-EINVAL);
  914. else {
  915. idr_remove(&multicast_idr, mc->id);
  916. atomic_inc(&mc->ctx->ref);
  917. }
  918. mutex_unlock(&mut);
  919. if (IS_ERR(mc)) {
  920. ret = PTR_ERR(mc);
  921. goto out;
  922. }
  923. rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
  924. mutex_lock(&mc->ctx->file->mut);
  925. ucma_cleanup_mc_events(mc);
  926. list_del(&mc->list);
  927. mutex_unlock(&mc->ctx->file->mut);
  928. ucma_put_ctx(mc->ctx);
  929. resp.events_reported = mc->events_reported;
  930. kfree(mc);
  931. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  932. &resp, sizeof(resp)))
  933. ret = -EFAULT;
  934. out:
  935. return ret;
  936. }
  937. static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
  938. {
  939. /* Acquire mutex's based on pointer comparison to prevent deadlock. */
  940. if (file1 < file2) {
  941. mutex_lock(&file1->mut);
  942. mutex_lock(&file2->mut);
  943. } else {
  944. mutex_lock(&file2->mut);
  945. mutex_lock(&file1->mut);
  946. }
  947. }
  948. static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
  949. {
  950. if (file1 < file2) {
  951. mutex_unlock(&file2->mut);
  952. mutex_unlock(&file1->mut);
  953. } else {
  954. mutex_unlock(&file1->mut);
  955. mutex_unlock(&file2->mut);
  956. }
  957. }
  958. static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
  959. {
  960. struct ucma_event *uevent, *tmp;
  961. list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
  962. if (uevent->ctx == ctx)
  963. list_move_tail(&uevent->list, &file->event_list);
  964. }
  965. static ssize_t ucma_migrate_id(struct ucma_file *new_file,
  966. const char __user *inbuf,
  967. int in_len, int out_len)
  968. {
  969. struct rdma_ucm_migrate_id cmd;
  970. struct rdma_ucm_migrate_resp resp;
  971. struct ucma_context *ctx;
  972. struct file *filp;
  973. struct ucma_file *cur_file;
  974. int ret = 0;
  975. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  976. return -EFAULT;
  977. /* Get current fd to protect against it being closed */
  978. filp = fget(cmd.fd);
  979. if (!filp)
  980. return -ENOENT;
  981. /* Validate current fd and prevent destruction of id. */
  982. ctx = ucma_get_ctx(filp->private_data, cmd.id);
  983. if (IS_ERR(ctx)) {
  984. ret = PTR_ERR(ctx);
  985. goto file_put;
  986. }
  987. cur_file = ctx->file;
  988. if (cur_file == new_file) {
  989. resp.events_reported = ctx->events_reported;
  990. goto response;
  991. }
  992. /*
  993. * Migrate events between fd's, maintaining order, and avoiding new
  994. * events being added before existing events.
  995. */
  996. ucma_lock_files(cur_file, new_file);
  997. mutex_lock(&mut);
  998. list_move_tail(&ctx->list, &new_file->ctx_list);
  999. ucma_move_events(ctx, new_file);
  1000. ctx->file = new_file;
  1001. resp.events_reported = ctx->events_reported;
  1002. mutex_unlock(&mut);
  1003. ucma_unlock_files(cur_file, new_file);
  1004. response:
  1005. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  1006. &resp, sizeof(resp)))
  1007. ret = -EFAULT;
  1008. ucma_put_ctx(ctx);
  1009. file_put:
  1010. fput(filp);
  1011. return ret;
  1012. }
  1013. static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
  1014. const char __user *inbuf,
  1015. int in_len, int out_len) = {
  1016. [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
  1017. [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
  1018. [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr,
  1019. [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
  1020. [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
  1021. [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
  1022. [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
  1023. [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
  1024. [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
  1025. [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
  1026. [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
  1027. [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
  1028. [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
  1029. [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
  1030. [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
  1031. [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
  1032. [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
  1033. [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
  1034. [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id
  1035. };
  1036. static ssize_t ucma_write(struct file *filp, const char __user *buf,
  1037. size_t len, loff_t *pos)
  1038. {
  1039. struct ucma_file *file = filp->private_data;
  1040. struct rdma_ucm_cmd_hdr hdr;
  1041. ssize_t ret;
  1042. if (len < sizeof(hdr))
  1043. return -EINVAL;
  1044. if (copy_from_user(&hdr, buf, sizeof(hdr)))
  1045. return -EFAULT;
  1046. if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
  1047. return -EINVAL;
  1048. if (hdr.in + sizeof(hdr) > len)
  1049. return -EINVAL;
  1050. if (!ucma_cmd_table[hdr.cmd])
  1051. return -ENOSYS;
  1052. ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
  1053. if (!ret)
  1054. ret = len;
  1055. return ret;
  1056. }
  1057. static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
  1058. {
  1059. struct ucma_file *file = filp->private_data;
  1060. unsigned int mask = 0;
  1061. poll_wait(filp, &file->poll_wait, wait);
  1062. if (!list_empty(&file->event_list))
  1063. mask = POLLIN | POLLRDNORM;
  1064. return mask;
  1065. }
  1066. /*
  1067. * ucma_open() does not need the BKL:
  1068. *
  1069. * - no global state is referred to;
  1070. * - there is no ioctl method to race against;
  1071. * - no further module initialization is required for open to work
  1072. * after the device is registered.
  1073. */
  1074. static int ucma_open(struct inode *inode, struct file *filp)
  1075. {
  1076. struct ucma_file *file;
  1077. file = kmalloc(sizeof *file, GFP_KERNEL);
  1078. if (!file)
  1079. return -ENOMEM;
  1080. INIT_LIST_HEAD(&file->event_list);
  1081. INIT_LIST_HEAD(&file->ctx_list);
  1082. init_waitqueue_head(&file->poll_wait);
  1083. mutex_init(&file->mut);
  1084. filp->private_data = file;
  1085. file->filp = filp;
  1086. return nonseekable_open(inode, filp);
  1087. }
  1088. static int ucma_close(struct inode *inode, struct file *filp)
  1089. {
  1090. struct ucma_file *file = filp->private_data;
  1091. struct ucma_context *ctx, *tmp;
  1092. mutex_lock(&file->mut);
  1093. list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
  1094. mutex_unlock(&file->mut);
  1095. mutex_lock(&mut);
  1096. idr_remove(&ctx_idr, ctx->id);
  1097. mutex_unlock(&mut);
  1098. ucma_free_ctx(ctx);
  1099. mutex_lock(&file->mut);
  1100. }
  1101. mutex_unlock(&file->mut);
  1102. kfree(file);
  1103. return 0;
  1104. }
  1105. static const struct file_operations ucma_fops = {
  1106. .owner = THIS_MODULE,
  1107. .open = ucma_open,
  1108. .release = ucma_close,
  1109. .write = ucma_write,
  1110. .poll = ucma_poll,
  1111. .llseek = no_llseek,
  1112. };
  1113. static struct miscdevice ucma_misc = {
  1114. .minor = MISC_DYNAMIC_MINOR,
  1115. .name = "rdma_cm",
  1116. .fops = &ucma_fops,
  1117. };
  1118. static ssize_t show_abi_version(struct device *dev,
  1119. struct device_attribute *attr,
  1120. char *buf)
  1121. {
  1122. return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
  1123. }
  1124. static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
  1125. static int __init ucma_init(void)
  1126. {
  1127. int ret;
  1128. ret = misc_register(&ucma_misc);
  1129. if (ret)
  1130. return ret;
  1131. ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
  1132. if (ret) {
  1133. printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
  1134. goto err1;
  1135. }
  1136. ucma_ctl_table_hdr = register_sysctl_paths(ucma_ctl_path, ucma_ctl_table);
  1137. if (!ucma_ctl_table_hdr) {
  1138. printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
  1139. ret = -ENOMEM;
  1140. goto err2;
  1141. }
  1142. return 0;
  1143. err2:
  1144. device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
  1145. err1:
  1146. misc_deregister(&ucma_misc);
  1147. return ret;
  1148. }
  1149. static void __exit ucma_cleanup(void)
  1150. {
  1151. unregister_sysctl_table(ucma_ctl_table_hdr);
  1152. device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
  1153. misc_deregister(&ucma_misc);
  1154. idr_destroy(&ctx_idr);
  1155. }
  1156. module_init(ucma_init);
  1157. module_exit(ucma_cleanup);