ucma.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419
  1. /*
  2. * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/completion.h>
  33. #include <linux/file.h>
  34. #include <linux/mutex.h>
  35. #include <linux/poll.h>
  36. #include <linux/sched.h>
  37. #include <linux/idr.h>
  38. #include <linux/in.h>
  39. #include <linux/in6.h>
  40. #include <linux/miscdevice.h>
  41. #include <linux/slab.h>
  42. #include <linux/sysctl.h>
  43. #include <linux/module.h>
  44. #include <rdma/rdma_user_cm.h>
  45. #include <rdma/ib_marshall.h>
  46. #include <rdma/rdma_cm.h>
  47. #include <rdma/rdma_cm_ib.h>
  48. MODULE_AUTHOR("Sean Hefty");
  49. MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
  50. MODULE_LICENSE("Dual BSD/GPL");
  51. static unsigned int max_backlog = 1024;
  52. static struct ctl_table_header *ucma_ctl_table_hdr;
  53. static ctl_table ucma_ctl_table[] = {
  54. {
  55. .procname = "max_backlog",
  56. .data = &max_backlog,
  57. .maxlen = sizeof max_backlog,
  58. .mode = 0644,
  59. .proc_handler = proc_dointvec,
  60. },
  61. { }
  62. };
  63. static struct ctl_path ucma_ctl_path[] = {
  64. { .procname = "net" },
  65. { .procname = "rdma_ucm" },
  66. { }
  67. };
  68. struct ucma_file {
  69. struct mutex mut;
  70. struct file *filp;
  71. struct list_head ctx_list;
  72. struct list_head event_list;
  73. wait_queue_head_t poll_wait;
  74. };
  75. struct ucma_context {
  76. int id;
  77. struct completion comp;
  78. atomic_t ref;
  79. int events_reported;
  80. int backlog;
  81. struct ucma_file *file;
  82. struct rdma_cm_id *cm_id;
  83. u64 uid;
  84. struct list_head list;
  85. struct list_head mc_list;
  86. };
  87. struct ucma_multicast {
  88. struct ucma_context *ctx;
  89. int id;
  90. int events_reported;
  91. u64 uid;
  92. struct list_head list;
  93. struct sockaddr_storage addr;
  94. };
  95. struct ucma_event {
  96. struct ucma_context *ctx;
  97. struct ucma_multicast *mc;
  98. struct list_head list;
  99. struct rdma_cm_id *cm_id;
  100. struct rdma_ucm_event_resp resp;
  101. };
  102. static DEFINE_MUTEX(mut);
  103. static DEFINE_IDR(ctx_idr);
  104. static DEFINE_IDR(multicast_idr);
  105. static inline struct ucma_context *_ucma_find_context(int id,
  106. struct ucma_file *file)
  107. {
  108. struct ucma_context *ctx;
  109. ctx = idr_find(&ctx_idr, id);
  110. if (!ctx)
  111. ctx = ERR_PTR(-ENOENT);
  112. else if (ctx->file != file)
  113. ctx = ERR_PTR(-EINVAL);
  114. return ctx;
  115. }
  116. static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
  117. {
  118. struct ucma_context *ctx;
  119. mutex_lock(&mut);
  120. ctx = _ucma_find_context(id, file);
  121. if (!IS_ERR(ctx))
  122. atomic_inc(&ctx->ref);
  123. mutex_unlock(&mut);
  124. return ctx;
  125. }
  126. static void ucma_put_ctx(struct ucma_context *ctx)
  127. {
  128. if (atomic_dec_and_test(&ctx->ref))
  129. complete(&ctx->comp);
  130. }
  131. static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
  132. {
  133. struct ucma_context *ctx;
  134. int ret;
  135. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  136. if (!ctx)
  137. return NULL;
  138. atomic_set(&ctx->ref, 1);
  139. init_completion(&ctx->comp);
  140. INIT_LIST_HEAD(&ctx->mc_list);
  141. ctx->file = file;
  142. do {
  143. ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
  144. if (!ret)
  145. goto error;
  146. mutex_lock(&mut);
  147. ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
  148. mutex_unlock(&mut);
  149. } while (ret == -EAGAIN);
  150. if (ret)
  151. goto error;
  152. list_add_tail(&ctx->list, &file->ctx_list);
  153. return ctx;
  154. error:
  155. kfree(ctx);
  156. return NULL;
  157. }
  158. static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
  159. {
  160. struct ucma_multicast *mc;
  161. int ret;
  162. mc = kzalloc(sizeof(*mc), GFP_KERNEL);
  163. if (!mc)
  164. return NULL;
  165. do {
  166. ret = idr_pre_get(&multicast_idr, GFP_KERNEL);
  167. if (!ret)
  168. goto error;
  169. mutex_lock(&mut);
  170. ret = idr_get_new(&multicast_idr, mc, &mc->id);
  171. mutex_unlock(&mut);
  172. } while (ret == -EAGAIN);
  173. if (ret)
  174. goto error;
  175. mc->ctx = ctx;
  176. list_add_tail(&mc->list, &ctx->mc_list);
  177. return mc;
  178. error:
  179. kfree(mc);
  180. return NULL;
  181. }
  182. static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
  183. struct rdma_conn_param *src)
  184. {
  185. if (src->private_data_len)
  186. memcpy(dst->private_data, src->private_data,
  187. src->private_data_len);
  188. dst->private_data_len = src->private_data_len;
  189. dst->responder_resources =src->responder_resources;
  190. dst->initiator_depth = src->initiator_depth;
  191. dst->flow_control = src->flow_control;
  192. dst->retry_count = src->retry_count;
  193. dst->rnr_retry_count = src->rnr_retry_count;
  194. dst->srq = src->srq;
  195. dst->qp_num = src->qp_num;
  196. }
  197. static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
  198. struct rdma_ud_param *src)
  199. {
  200. if (src->private_data_len)
  201. memcpy(dst->private_data, src->private_data,
  202. src->private_data_len);
  203. dst->private_data_len = src->private_data_len;
  204. ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
  205. dst->qp_num = src->qp_num;
  206. dst->qkey = src->qkey;
  207. }
  208. static void ucma_set_event_context(struct ucma_context *ctx,
  209. struct rdma_cm_event *event,
  210. struct ucma_event *uevent)
  211. {
  212. uevent->ctx = ctx;
  213. switch (event->event) {
  214. case RDMA_CM_EVENT_MULTICAST_JOIN:
  215. case RDMA_CM_EVENT_MULTICAST_ERROR:
  216. uevent->mc = (struct ucma_multicast *)
  217. event->param.ud.private_data;
  218. uevent->resp.uid = uevent->mc->uid;
  219. uevent->resp.id = uevent->mc->id;
  220. break;
  221. default:
  222. uevent->resp.uid = ctx->uid;
  223. uevent->resp.id = ctx->id;
  224. break;
  225. }
  226. }
  227. static int ucma_event_handler(struct rdma_cm_id *cm_id,
  228. struct rdma_cm_event *event)
  229. {
  230. struct ucma_event *uevent;
  231. struct ucma_context *ctx = cm_id->context;
  232. int ret = 0;
  233. uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
  234. if (!uevent)
  235. return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
  236. uevent->cm_id = cm_id;
  237. ucma_set_event_context(ctx, event, uevent);
  238. uevent->resp.event = event->event;
  239. uevent->resp.status = event->status;
  240. if (cm_id->qp_type == IB_QPT_UD)
  241. ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
  242. else
  243. ucma_copy_conn_event(&uevent->resp.param.conn,
  244. &event->param.conn);
  245. mutex_lock(&ctx->file->mut);
  246. if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
  247. if (!ctx->backlog) {
  248. ret = -ENOMEM;
  249. kfree(uevent);
  250. goto out;
  251. }
  252. ctx->backlog--;
  253. } else if (!ctx->uid) {
  254. /*
  255. * We ignore events for new connections until userspace has set
  256. * their context. This can only happen if an error occurs on a
  257. * new connection before the user accepts it. This is okay,
  258. * since the accept will just fail later.
  259. */
  260. kfree(uevent);
  261. goto out;
  262. }
  263. list_add_tail(&uevent->list, &ctx->file->event_list);
  264. wake_up_interruptible(&ctx->file->poll_wait);
  265. out:
  266. mutex_unlock(&ctx->file->mut);
  267. return ret;
  268. }
  269. static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
  270. int in_len, int out_len)
  271. {
  272. struct ucma_context *ctx;
  273. struct rdma_ucm_get_event cmd;
  274. struct ucma_event *uevent;
  275. int ret = 0;
  276. DEFINE_WAIT(wait);
  277. if (out_len < sizeof uevent->resp)
  278. return -ENOSPC;
  279. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  280. return -EFAULT;
  281. mutex_lock(&file->mut);
  282. while (list_empty(&file->event_list)) {
  283. mutex_unlock(&file->mut);
  284. if (file->filp->f_flags & O_NONBLOCK)
  285. return -EAGAIN;
  286. if (wait_event_interruptible(file->poll_wait,
  287. !list_empty(&file->event_list)))
  288. return -ERESTARTSYS;
  289. mutex_lock(&file->mut);
  290. }
  291. uevent = list_entry(file->event_list.next, struct ucma_event, list);
  292. if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
  293. ctx = ucma_alloc_ctx(file);
  294. if (!ctx) {
  295. ret = -ENOMEM;
  296. goto done;
  297. }
  298. uevent->ctx->backlog++;
  299. ctx->cm_id = uevent->cm_id;
  300. ctx->cm_id->context = ctx;
  301. uevent->resp.id = ctx->id;
  302. }
  303. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  304. &uevent->resp, sizeof uevent->resp)) {
  305. ret = -EFAULT;
  306. goto done;
  307. }
  308. list_del(&uevent->list);
  309. uevent->ctx->events_reported++;
  310. if (uevent->mc)
  311. uevent->mc->events_reported++;
  312. kfree(uevent);
  313. done:
  314. mutex_unlock(&file->mut);
  315. return ret;
  316. }
  317. static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
  318. {
  319. switch (cmd->ps) {
  320. case RDMA_PS_TCP:
  321. *qp_type = IB_QPT_RC;
  322. return 0;
  323. case RDMA_PS_UDP:
  324. case RDMA_PS_IPOIB:
  325. *qp_type = IB_QPT_UD;
  326. return 0;
  327. case RDMA_PS_IB:
  328. *qp_type = cmd->qp_type;
  329. return 0;
  330. default:
  331. return -EINVAL;
  332. }
  333. }
  334. static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
  335. int in_len, int out_len)
  336. {
  337. struct rdma_ucm_create_id cmd;
  338. struct rdma_ucm_create_id_resp resp;
  339. struct ucma_context *ctx;
  340. enum ib_qp_type qp_type;
  341. int ret;
  342. if (out_len < sizeof(resp))
  343. return -ENOSPC;
  344. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  345. return -EFAULT;
  346. ret = ucma_get_qp_type(&cmd, &qp_type);
  347. if (ret)
  348. return ret;
  349. mutex_lock(&file->mut);
  350. ctx = ucma_alloc_ctx(file);
  351. mutex_unlock(&file->mut);
  352. if (!ctx)
  353. return -ENOMEM;
  354. ctx->uid = cmd.uid;
  355. ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type);
  356. if (IS_ERR(ctx->cm_id)) {
  357. ret = PTR_ERR(ctx->cm_id);
  358. goto err1;
  359. }
  360. resp.id = ctx->id;
  361. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  362. &resp, sizeof(resp))) {
  363. ret = -EFAULT;
  364. goto err2;
  365. }
  366. return 0;
  367. err2:
  368. rdma_destroy_id(ctx->cm_id);
  369. err1:
  370. mutex_lock(&mut);
  371. idr_remove(&ctx_idr, ctx->id);
  372. mutex_unlock(&mut);
  373. kfree(ctx);
  374. return ret;
  375. }
  376. static void ucma_cleanup_multicast(struct ucma_context *ctx)
  377. {
  378. struct ucma_multicast *mc, *tmp;
  379. mutex_lock(&mut);
  380. list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
  381. list_del(&mc->list);
  382. idr_remove(&multicast_idr, mc->id);
  383. kfree(mc);
  384. }
  385. mutex_unlock(&mut);
  386. }
  387. static void ucma_cleanup_events(struct ucma_context *ctx)
  388. {
  389. struct ucma_event *uevent, *tmp;
  390. list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
  391. if (uevent->ctx != ctx)
  392. continue;
  393. list_del(&uevent->list);
  394. /* clear incoming connections. */
  395. if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
  396. rdma_destroy_id(uevent->cm_id);
  397. kfree(uevent);
  398. }
  399. }
  400. static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
  401. {
  402. struct ucma_event *uevent, *tmp;
  403. list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
  404. if (uevent->mc != mc)
  405. continue;
  406. list_del(&uevent->list);
  407. kfree(uevent);
  408. }
  409. }
  410. static int ucma_free_ctx(struct ucma_context *ctx)
  411. {
  412. int events_reported;
  413. /* No new events will be generated after destroying the id. */
  414. rdma_destroy_id(ctx->cm_id);
  415. ucma_cleanup_multicast(ctx);
  416. /* Cleanup events not yet reported to the user. */
  417. mutex_lock(&ctx->file->mut);
  418. ucma_cleanup_events(ctx);
  419. list_del(&ctx->list);
  420. mutex_unlock(&ctx->file->mut);
  421. events_reported = ctx->events_reported;
  422. kfree(ctx);
  423. return events_reported;
  424. }
  425. static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
  426. int in_len, int out_len)
  427. {
  428. struct rdma_ucm_destroy_id cmd;
  429. struct rdma_ucm_destroy_id_resp resp;
  430. struct ucma_context *ctx;
  431. int ret = 0;
  432. if (out_len < sizeof(resp))
  433. return -ENOSPC;
  434. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  435. return -EFAULT;
  436. mutex_lock(&mut);
  437. ctx = _ucma_find_context(cmd.id, file);
  438. if (!IS_ERR(ctx))
  439. idr_remove(&ctx_idr, ctx->id);
  440. mutex_unlock(&mut);
  441. if (IS_ERR(ctx))
  442. return PTR_ERR(ctx);
  443. ucma_put_ctx(ctx);
  444. wait_for_completion(&ctx->comp);
  445. resp.events_reported = ucma_free_ctx(ctx);
  446. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  447. &resp, sizeof(resp)))
  448. ret = -EFAULT;
  449. return ret;
  450. }
  451. static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
  452. int in_len, int out_len)
  453. {
  454. struct rdma_ucm_bind_addr cmd;
  455. struct ucma_context *ctx;
  456. int ret;
  457. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  458. return -EFAULT;
  459. ctx = ucma_get_ctx(file, cmd.id);
  460. if (IS_ERR(ctx))
  461. return PTR_ERR(ctx);
  462. ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
  463. ucma_put_ctx(ctx);
  464. return ret;
  465. }
  466. static ssize_t ucma_resolve_addr(struct ucma_file *file,
  467. const char __user *inbuf,
  468. int in_len, int out_len)
  469. {
  470. struct rdma_ucm_resolve_addr cmd;
  471. struct ucma_context *ctx;
  472. int ret;
  473. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  474. return -EFAULT;
  475. ctx = ucma_get_ctx(file, cmd.id);
  476. if (IS_ERR(ctx))
  477. return PTR_ERR(ctx);
  478. ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
  479. (struct sockaddr *) &cmd.dst_addr,
  480. cmd.timeout_ms);
  481. ucma_put_ctx(ctx);
  482. return ret;
  483. }
  484. static ssize_t ucma_resolve_route(struct ucma_file *file,
  485. const char __user *inbuf,
  486. int in_len, int out_len)
  487. {
  488. struct rdma_ucm_resolve_route cmd;
  489. struct ucma_context *ctx;
  490. int ret;
  491. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  492. return -EFAULT;
  493. ctx = ucma_get_ctx(file, cmd.id);
  494. if (IS_ERR(ctx))
  495. return PTR_ERR(ctx);
  496. ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
  497. ucma_put_ctx(ctx);
  498. return ret;
  499. }
  500. static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
  501. struct rdma_route *route)
  502. {
  503. struct rdma_dev_addr *dev_addr;
  504. resp->num_paths = route->num_paths;
  505. switch (route->num_paths) {
  506. case 0:
  507. dev_addr = &route->addr.dev_addr;
  508. rdma_addr_get_dgid(dev_addr,
  509. (union ib_gid *) &resp->ib_route[0].dgid);
  510. rdma_addr_get_sgid(dev_addr,
  511. (union ib_gid *) &resp->ib_route[0].sgid);
  512. resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
  513. break;
  514. case 2:
  515. ib_copy_path_rec_to_user(&resp->ib_route[1],
  516. &route->path_rec[1]);
  517. /* fall through */
  518. case 1:
  519. ib_copy_path_rec_to_user(&resp->ib_route[0],
  520. &route->path_rec[0]);
  521. break;
  522. default:
  523. break;
  524. }
  525. }
  526. static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
  527. struct rdma_route *route)
  528. {
  529. struct rdma_dev_addr *dev_addr;
  530. struct net_device *dev;
  531. u16 vid = 0;
  532. resp->num_paths = route->num_paths;
  533. switch (route->num_paths) {
  534. case 0:
  535. dev_addr = &route->addr.dev_addr;
  536. dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
  537. if (dev) {
  538. vid = rdma_vlan_dev_vlan_id(dev);
  539. dev_put(dev);
  540. }
  541. iboe_mac_vlan_to_ll((union ib_gid *) &resp->ib_route[0].dgid,
  542. dev_addr->dst_dev_addr, vid);
  543. iboe_addr_get_sgid(dev_addr,
  544. (union ib_gid *) &resp->ib_route[0].sgid);
  545. resp->ib_route[0].pkey = cpu_to_be16(0xffff);
  546. break;
  547. case 2:
  548. ib_copy_path_rec_to_user(&resp->ib_route[1],
  549. &route->path_rec[1]);
  550. /* fall through */
  551. case 1:
  552. ib_copy_path_rec_to_user(&resp->ib_route[0],
  553. &route->path_rec[0]);
  554. break;
  555. default:
  556. break;
  557. }
  558. }
  559. static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
  560. struct rdma_route *route)
  561. {
  562. struct rdma_dev_addr *dev_addr;
  563. dev_addr = &route->addr.dev_addr;
  564. rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
  565. rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
  566. }
  567. static ssize_t ucma_query_route(struct ucma_file *file,
  568. const char __user *inbuf,
  569. int in_len, int out_len)
  570. {
  571. struct rdma_ucm_query_route cmd;
  572. struct rdma_ucm_query_route_resp resp;
  573. struct ucma_context *ctx;
  574. struct sockaddr *addr;
  575. int ret = 0;
  576. if (out_len < sizeof(resp))
  577. return -ENOSPC;
  578. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  579. return -EFAULT;
  580. ctx = ucma_get_ctx(file, cmd.id);
  581. if (IS_ERR(ctx))
  582. return PTR_ERR(ctx);
  583. memset(&resp, 0, sizeof resp);
  584. addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
  585. memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
  586. sizeof(struct sockaddr_in) :
  587. sizeof(struct sockaddr_in6));
  588. addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
  589. memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
  590. sizeof(struct sockaddr_in) :
  591. sizeof(struct sockaddr_in6));
  592. if (!ctx->cm_id->device)
  593. goto out;
  594. resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
  595. resp.port_num = ctx->cm_id->port_num;
  596. switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
  597. case RDMA_TRANSPORT_IB:
  598. switch (rdma_port_get_link_layer(ctx->cm_id->device,
  599. ctx->cm_id->port_num)) {
  600. case IB_LINK_LAYER_INFINIBAND:
  601. ucma_copy_ib_route(&resp, &ctx->cm_id->route);
  602. break;
  603. case IB_LINK_LAYER_ETHERNET:
  604. ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
  605. break;
  606. default:
  607. break;
  608. }
  609. break;
  610. case RDMA_TRANSPORT_IWARP:
  611. ucma_copy_iw_route(&resp, &ctx->cm_id->route);
  612. break;
  613. default:
  614. break;
  615. }
  616. out:
  617. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  618. &resp, sizeof(resp)))
  619. ret = -EFAULT;
  620. ucma_put_ctx(ctx);
  621. return ret;
  622. }
  623. static void ucma_copy_conn_param(struct rdma_conn_param *dst,
  624. struct rdma_ucm_conn_param *src)
  625. {
  626. dst->private_data = src->private_data;
  627. dst->private_data_len = src->private_data_len;
  628. dst->responder_resources =src->responder_resources;
  629. dst->initiator_depth = src->initiator_depth;
  630. dst->flow_control = src->flow_control;
  631. dst->retry_count = src->retry_count;
  632. dst->rnr_retry_count = src->rnr_retry_count;
  633. dst->srq = src->srq;
  634. dst->qp_num = src->qp_num;
  635. }
  636. static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
  637. int in_len, int out_len)
  638. {
  639. struct rdma_ucm_connect cmd;
  640. struct rdma_conn_param conn_param;
  641. struct ucma_context *ctx;
  642. int ret;
  643. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  644. return -EFAULT;
  645. if (!cmd.conn_param.valid)
  646. return -EINVAL;
  647. ctx = ucma_get_ctx(file, cmd.id);
  648. if (IS_ERR(ctx))
  649. return PTR_ERR(ctx);
  650. ucma_copy_conn_param(&conn_param, &cmd.conn_param);
  651. ret = rdma_connect(ctx->cm_id, &conn_param);
  652. ucma_put_ctx(ctx);
  653. return ret;
  654. }
  655. static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
  656. int in_len, int out_len)
  657. {
  658. struct rdma_ucm_listen cmd;
  659. struct ucma_context *ctx;
  660. int ret;
  661. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  662. return -EFAULT;
  663. ctx = ucma_get_ctx(file, cmd.id);
  664. if (IS_ERR(ctx))
  665. return PTR_ERR(ctx);
  666. ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
  667. cmd.backlog : max_backlog;
  668. ret = rdma_listen(ctx->cm_id, ctx->backlog);
  669. ucma_put_ctx(ctx);
  670. return ret;
  671. }
  672. static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
  673. int in_len, int out_len)
  674. {
  675. struct rdma_ucm_accept cmd;
  676. struct rdma_conn_param conn_param;
  677. struct ucma_context *ctx;
  678. int ret;
  679. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  680. return -EFAULT;
  681. ctx = ucma_get_ctx(file, cmd.id);
  682. if (IS_ERR(ctx))
  683. return PTR_ERR(ctx);
  684. if (cmd.conn_param.valid) {
  685. ucma_copy_conn_param(&conn_param, &cmd.conn_param);
  686. mutex_lock(&file->mut);
  687. ret = rdma_accept(ctx->cm_id, &conn_param);
  688. if (!ret)
  689. ctx->uid = cmd.uid;
  690. mutex_unlock(&file->mut);
  691. } else
  692. ret = rdma_accept(ctx->cm_id, NULL);
  693. ucma_put_ctx(ctx);
  694. return ret;
  695. }
  696. static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
  697. int in_len, int out_len)
  698. {
  699. struct rdma_ucm_reject cmd;
  700. struct ucma_context *ctx;
  701. int ret;
  702. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  703. return -EFAULT;
  704. ctx = ucma_get_ctx(file, cmd.id);
  705. if (IS_ERR(ctx))
  706. return PTR_ERR(ctx);
  707. ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
  708. ucma_put_ctx(ctx);
  709. return ret;
  710. }
  711. static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
  712. int in_len, int out_len)
  713. {
  714. struct rdma_ucm_disconnect cmd;
  715. struct ucma_context *ctx;
  716. int ret;
  717. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  718. return -EFAULT;
  719. ctx = ucma_get_ctx(file, cmd.id);
  720. if (IS_ERR(ctx))
  721. return PTR_ERR(ctx);
  722. ret = rdma_disconnect(ctx->cm_id);
  723. ucma_put_ctx(ctx);
  724. return ret;
  725. }
  726. static ssize_t ucma_init_qp_attr(struct ucma_file *file,
  727. const char __user *inbuf,
  728. int in_len, int out_len)
  729. {
  730. struct rdma_ucm_init_qp_attr cmd;
  731. struct ib_uverbs_qp_attr resp;
  732. struct ucma_context *ctx;
  733. struct ib_qp_attr qp_attr;
  734. int ret;
  735. if (out_len < sizeof(resp))
  736. return -ENOSPC;
  737. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  738. return -EFAULT;
  739. ctx = ucma_get_ctx(file, cmd.id);
  740. if (IS_ERR(ctx))
  741. return PTR_ERR(ctx);
  742. resp.qp_attr_mask = 0;
  743. memset(&qp_attr, 0, sizeof qp_attr);
  744. qp_attr.qp_state = cmd.qp_state;
  745. ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
  746. if (ret)
  747. goto out;
  748. ib_copy_qp_attr_to_user(&resp, &qp_attr);
  749. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  750. &resp, sizeof(resp)))
  751. ret = -EFAULT;
  752. out:
  753. ucma_put_ctx(ctx);
  754. return ret;
  755. }
  756. static int ucma_set_option_id(struct ucma_context *ctx, int optname,
  757. void *optval, size_t optlen)
  758. {
  759. int ret = 0;
  760. switch (optname) {
  761. case RDMA_OPTION_ID_TOS:
  762. if (optlen != sizeof(u8)) {
  763. ret = -EINVAL;
  764. break;
  765. }
  766. rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
  767. break;
  768. case RDMA_OPTION_ID_REUSEADDR:
  769. if (optlen != sizeof(int)) {
  770. ret = -EINVAL;
  771. break;
  772. }
  773. ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
  774. break;
  775. default:
  776. ret = -ENOSYS;
  777. }
  778. return ret;
  779. }
  780. static int ucma_set_ib_path(struct ucma_context *ctx,
  781. struct ib_path_rec_data *path_data, size_t optlen)
  782. {
  783. struct ib_sa_path_rec sa_path;
  784. struct rdma_cm_event event;
  785. int ret;
  786. if (optlen % sizeof(*path_data))
  787. return -EINVAL;
  788. for (; optlen; optlen -= sizeof(*path_data), path_data++) {
  789. if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
  790. IB_PATH_BIDIRECTIONAL))
  791. break;
  792. }
  793. if (!optlen)
  794. return -EINVAL;
  795. ib_sa_unpack_path(path_data->path_rec, &sa_path);
  796. ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
  797. if (ret)
  798. return ret;
  799. memset(&event, 0, sizeof event);
  800. event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
  801. return ucma_event_handler(ctx->cm_id, &event);
  802. }
  803. static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
  804. void *optval, size_t optlen)
  805. {
  806. int ret;
  807. switch (optname) {
  808. case RDMA_OPTION_IB_PATH:
  809. ret = ucma_set_ib_path(ctx, optval, optlen);
  810. break;
  811. default:
  812. ret = -ENOSYS;
  813. }
  814. return ret;
  815. }
  816. static int ucma_set_option_level(struct ucma_context *ctx, int level,
  817. int optname, void *optval, size_t optlen)
  818. {
  819. int ret;
  820. switch (level) {
  821. case RDMA_OPTION_ID:
  822. ret = ucma_set_option_id(ctx, optname, optval, optlen);
  823. break;
  824. case RDMA_OPTION_IB:
  825. ret = ucma_set_option_ib(ctx, optname, optval, optlen);
  826. break;
  827. default:
  828. ret = -ENOSYS;
  829. }
  830. return ret;
  831. }
  832. static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
  833. int in_len, int out_len)
  834. {
  835. struct rdma_ucm_set_option cmd;
  836. struct ucma_context *ctx;
  837. void *optval;
  838. int ret;
  839. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  840. return -EFAULT;
  841. ctx = ucma_get_ctx(file, cmd.id);
  842. if (IS_ERR(ctx))
  843. return PTR_ERR(ctx);
  844. optval = kmalloc(cmd.optlen, GFP_KERNEL);
  845. if (!optval) {
  846. ret = -ENOMEM;
  847. goto out1;
  848. }
  849. if (copy_from_user(optval, (void __user *) (unsigned long) cmd.optval,
  850. cmd.optlen)) {
  851. ret = -EFAULT;
  852. goto out2;
  853. }
  854. ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
  855. cmd.optlen);
  856. out2:
  857. kfree(optval);
  858. out1:
  859. ucma_put_ctx(ctx);
  860. return ret;
  861. }
  862. static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
  863. int in_len, int out_len)
  864. {
  865. struct rdma_ucm_notify cmd;
  866. struct ucma_context *ctx;
  867. int ret;
  868. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  869. return -EFAULT;
  870. ctx = ucma_get_ctx(file, cmd.id);
  871. if (IS_ERR(ctx))
  872. return PTR_ERR(ctx);
  873. ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
  874. ucma_put_ctx(ctx);
  875. return ret;
  876. }
  877. static ssize_t ucma_join_multicast(struct ucma_file *file,
  878. const char __user *inbuf,
  879. int in_len, int out_len)
  880. {
  881. struct rdma_ucm_join_mcast cmd;
  882. struct rdma_ucm_create_id_resp resp;
  883. struct ucma_context *ctx;
  884. struct ucma_multicast *mc;
  885. int ret;
  886. if (out_len < sizeof(resp))
  887. return -ENOSPC;
  888. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  889. return -EFAULT;
  890. ctx = ucma_get_ctx(file, cmd.id);
  891. if (IS_ERR(ctx))
  892. return PTR_ERR(ctx);
  893. mutex_lock(&file->mut);
  894. mc = ucma_alloc_multicast(ctx);
  895. if (!mc) {
  896. ret = -ENOMEM;
  897. goto err1;
  898. }
  899. mc->uid = cmd.uid;
  900. memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
  901. ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
  902. if (ret)
  903. goto err2;
  904. resp.id = mc->id;
  905. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  906. &resp, sizeof(resp))) {
  907. ret = -EFAULT;
  908. goto err3;
  909. }
  910. mutex_unlock(&file->mut);
  911. ucma_put_ctx(ctx);
  912. return 0;
  913. err3:
  914. rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
  915. ucma_cleanup_mc_events(mc);
  916. err2:
  917. mutex_lock(&mut);
  918. idr_remove(&multicast_idr, mc->id);
  919. mutex_unlock(&mut);
  920. list_del(&mc->list);
  921. kfree(mc);
  922. err1:
  923. mutex_unlock(&file->mut);
  924. ucma_put_ctx(ctx);
  925. return ret;
  926. }
  927. static ssize_t ucma_leave_multicast(struct ucma_file *file,
  928. const char __user *inbuf,
  929. int in_len, int out_len)
  930. {
  931. struct rdma_ucm_destroy_id cmd;
  932. struct rdma_ucm_destroy_id_resp resp;
  933. struct ucma_multicast *mc;
  934. int ret = 0;
  935. if (out_len < sizeof(resp))
  936. return -ENOSPC;
  937. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  938. return -EFAULT;
  939. mutex_lock(&mut);
  940. mc = idr_find(&multicast_idr, cmd.id);
  941. if (!mc)
  942. mc = ERR_PTR(-ENOENT);
  943. else if (mc->ctx->file != file)
  944. mc = ERR_PTR(-EINVAL);
  945. else {
  946. idr_remove(&multicast_idr, mc->id);
  947. atomic_inc(&mc->ctx->ref);
  948. }
  949. mutex_unlock(&mut);
  950. if (IS_ERR(mc)) {
  951. ret = PTR_ERR(mc);
  952. goto out;
  953. }
  954. rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
  955. mutex_lock(&mc->ctx->file->mut);
  956. ucma_cleanup_mc_events(mc);
  957. list_del(&mc->list);
  958. mutex_unlock(&mc->ctx->file->mut);
  959. ucma_put_ctx(mc->ctx);
  960. resp.events_reported = mc->events_reported;
  961. kfree(mc);
  962. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  963. &resp, sizeof(resp)))
  964. ret = -EFAULT;
  965. out:
  966. return ret;
  967. }
  968. static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
  969. {
  970. /* Acquire mutex's based on pointer comparison to prevent deadlock. */
  971. if (file1 < file2) {
  972. mutex_lock(&file1->mut);
  973. mutex_lock(&file2->mut);
  974. } else {
  975. mutex_lock(&file2->mut);
  976. mutex_lock(&file1->mut);
  977. }
  978. }
  979. static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
  980. {
  981. if (file1 < file2) {
  982. mutex_unlock(&file2->mut);
  983. mutex_unlock(&file1->mut);
  984. } else {
  985. mutex_unlock(&file1->mut);
  986. mutex_unlock(&file2->mut);
  987. }
  988. }
  989. static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
  990. {
  991. struct ucma_event *uevent, *tmp;
  992. list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
  993. if (uevent->ctx == ctx)
  994. list_move_tail(&uevent->list, &file->event_list);
  995. }
  996. static ssize_t ucma_migrate_id(struct ucma_file *new_file,
  997. const char __user *inbuf,
  998. int in_len, int out_len)
  999. {
  1000. struct rdma_ucm_migrate_id cmd;
  1001. struct rdma_ucm_migrate_resp resp;
  1002. struct ucma_context *ctx;
  1003. struct file *filp;
  1004. struct ucma_file *cur_file;
  1005. int ret = 0;
  1006. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  1007. return -EFAULT;
  1008. /* Get current fd to protect against it being closed */
  1009. filp = fget(cmd.fd);
  1010. if (!filp)
  1011. return -ENOENT;
  1012. /* Validate current fd and prevent destruction of id. */
  1013. ctx = ucma_get_ctx(filp->private_data, cmd.id);
  1014. if (IS_ERR(ctx)) {
  1015. ret = PTR_ERR(ctx);
  1016. goto file_put;
  1017. }
  1018. cur_file = ctx->file;
  1019. if (cur_file == new_file) {
  1020. resp.events_reported = ctx->events_reported;
  1021. goto response;
  1022. }
  1023. /*
  1024. * Migrate events between fd's, maintaining order, and avoiding new
  1025. * events being added before existing events.
  1026. */
  1027. ucma_lock_files(cur_file, new_file);
  1028. mutex_lock(&mut);
  1029. list_move_tail(&ctx->list, &new_file->ctx_list);
  1030. ucma_move_events(ctx, new_file);
  1031. ctx->file = new_file;
  1032. resp.events_reported = ctx->events_reported;
  1033. mutex_unlock(&mut);
  1034. ucma_unlock_files(cur_file, new_file);
  1035. response:
  1036. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  1037. &resp, sizeof(resp)))
  1038. ret = -EFAULT;
  1039. ucma_put_ctx(ctx);
  1040. file_put:
  1041. fput(filp);
  1042. return ret;
  1043. }
  1044. static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
  1045. const char __user *inbuf,
  1046. int in_len, int out_len) = {
  1047. [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
  1048. [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
  1049. [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr,
  1050. [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
  1051. [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
  1052. [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
  1053. [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
  1054. [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
  1055. [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
  1056. [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
  1057. [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
  1058. [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
  1059. [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
  1060. [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
  1061. [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
  1062. [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
  1063. [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
  1064. [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
  1065. [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id
  1066. };
  1067. static ssize_t ucma_write(struct file *filp, const char __user *buf,
  1068. size_t len, loff_t *pos)
  1069. {
  1070. struct ucma_file *file = filp->private_data;
  1071. struct rdma_ucm_cmd_hdr hdr;
  1072. ssize_t ret;
  1073. if (len < sizeof(hdr))
  1074. return -EINVAL;
  1075. if (copy_from_user(&hdr, buf, sizeof(hdr)))
  1076. return -EFAULT;
  1077. if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
  1078. return -EINVAL;
  1079. if (hdr.in + sizeof(hdr) > len)
  1080. return -EINVAL;
  1081. if (!ucma_cmd_table[hdr.cmd])
  1082. return -ENOSYS;
  1083. ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
  1084. if (!ret)
  1085. ret = len;
  1086. return ret;
  1087. }
  1088. static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
  1089. {
  1090. struct ucma_file *file = filp->private_data;
  1091. unsigned int mask = 0;
  1092. poll_wait(filp, &file->poll_wait, wait);
  1093. if (!list_empty(&file->event_list))
  1094. mask = POLLIN | POLLRDNORM;
  1095. return mask;
  1096. }
  1097. /*
  1098. * ucma_open() does not need the BKL:
  1099. *
  1100. * - no global state is referred to;
  1101. * - there is no ioctl method to race against;
  1102. * - no further module initialization is required for open to work
  1103. * after the device is registered.
  1104. */
  1105. static int ucma_open(struct inode *inode, struct file *filp)
  1106. {
  1107. struct ucma_file *file;
  1108. file = kmalloc(sizeof *file, GFP_KERNEL);
  1109. if (!file)
  1110. return -ENOMEM;
  1111. INIT_LIST_HEAD(&file->event_list);
  1112. INIT_LIST_HEAD(&file->ctx_list);
  1113. init_waitqueue_head(&file->poll_wait);
  1114. mutex_init(&file->mut);
  1115. filp->private_data = file;
  1116. file->filp = filp;
  1117. return nonseekable_open(inode, filp);
  1118. }
  1119. static int ucma_close(struct inode *inode, struct file *filp)
  1120. {
  1121. struct ucma_file *file = filp->private_data;
  1122. struct ucma_context *ctx, *tmp;
  1123. mutex_lock(&file->mut);
  1124. list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
  1125. mutex_unlock(&file->mut);
  1126. mutex_lock(&mut);
  1127. idr_remove(&ctx_idr, ctx->id);
  1128. mutex_unlock(&mut);
  1129. ucma_free_ctx(ctx);
  1130. mutex_lock(&file->mut);
  1131. }
  1132. mutex_unlock(&file->mut);
  1133. kfree(file);
  1134. return 0;
  1135. }
  1136. static const struct file_operations ucma_fops = {
  1137. .owner = THIS_MODULE,
  1138. .open = ucma_open,
  1139. .release = ucma_close,
  1140. .write = ucma_write,
  1141. .poll = ucma_poll,
  1142. .llseek = no_llseek,
  1143. };
  1144. static struct miscdevice ucma_misc = {
  1145. .minor = MISC_DYNAMIC_MINOR,
  1146. .name = "rdma_cm",
  1147. .nodename = "infiniband/rdma_cm",
  1148. .mode = 0666,
  1149. .fops = &ucma_fops,
  1150. };
  1151. static ssize_t show_abi_version(struct device *dev,
  1152. struct device_attribute *attr,
  1153. char *buf)
  1154. {
  1155. return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
  1156. }
  1157. static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
  1158. static int __init ucma_init(void)
  1159. {
  1160. int ret;
  1161. ret = misc_register(&ucma_misc);
  1162. if (ret)
  1163. return ret;
  1164. ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
  1165. if (ret) {
  1166. printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
  1167. goto err1;
  1168. }
  1169. ucma_ctl_table_hdr = register_sysctl_paths(ucma_ctl_path, ucma_ctl_table);
  1170. if (!ucma_ctl_table_hdr) {
  1171. printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
  1172. ret = -ENOMEM;
  1173. goto err2;
  1174. }
  1175. return 0;
  1176. err2:
  1177. device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
  1178. err1:
  1179. misc_deregister(&ucma_misc);
  1180. return ret;
  1181. }
  1182. static void __exit ucma_cleanup(void)
  1183. {
  1184. unregister_sysctl_table(ucma_ctl_table_hdr);
  1185. device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
  1186. misc_deregister(&ucma_misc);
  1187. idr_destroy(&ctx_idr);
  1188. }
  1189. module_init(ucma_init);
  1190. module_exit(ucma_cleanup);