ucma.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581
  1. /*
  2. * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/completion.h>
  33. #include <linux/file.h>
  34. #include <linux/mutex.h>
  35. #include <linux/poll.h>
  36. #include <linux/sched.h>
  37. #include <linux/idr.h>
  38. #include <linux/in.h>
  39. #include <linux/in6.h>
  40. #include <linux/miscdevice.h>
  41. #include <linux/slab.h>
  42. #include <linux/sysctl.h>
  43. #include <linux/module.h>
  44. #include <rdma/rdma_user_cm.h>
  45. #include <rdma/ib_marshall.h>
  46. #include <rdma/rdma_cm.h>
  47. #include <rdma/rdma_cm_ib.h>
  48. #include <rdma/ib_addr.h>
  49. #include <rdma/ib.h>
  50. MODULE_AUTHOR("Sean Hefty");
  51. MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
  52. MODULE_LICENSE("Dual BSD/GPL");
  53. static unsigned int max_backlog = 1024;
  54. static struct ctl_table_header *ucma_ctl_table_hdr;
  55. static ctl_table ucma_ctl_table[] = {
  56. {
  57. .procname = "max_backlog",
  58. .data = &max_backlog,
  59. .maxlen = sizeof max_backlog,
  60. .mode = 0644,
  61. .proc_handler = proc_dointvec,
  62. },
  63. { }
  64. };
  65. struct ucma_file {
  66. struct mutex mut;
  67. struct file *filp;
  68. struct list_head ctx_list;
  69. struct list_head event_list;
  70. wait_queue_head_t poll_wait;
  71. };
  72. struct ucma_context {
  73. int id;
  74. struct completion comp;
  75. atomic_t ref;
  76. int events_reported;
  77. int backlog;
  78. struct ucma_file *file;
  79. struct rdma_cm_id *cm_id;
  80. u64 uid;
  81. struct list_head list;
  82. struct list_head mc_list;
  83. };
  84. struct ucma_multicast {
  85. struct ucma_context *ctx;
  86. int id;
  87. int events_reported;
  88. u64 uid;
  89. struct list_head list;
  90. struct sockaddr_storage addr;
  91. };
  92. struct ucma_event {
  93. struct ucma_context *ctx;
  94. struct ucma_multicast *mc;
  95. struct list_head list;
  96. struct rdma_cm_id *cm_id;
  97. struct rdma_ucm_event_resp resp;
  98. };
  99. static DEFINE_MUTEX(mut);
  100. static DEFINE_IDR(ctx_idr);
  101. static DEFINE_IDR(multicast_idr);
  102. static inline struct ucma_context *_ucma_find_context(int id,
  103. struct ucma_file *file)
  104. {
  105. struct ucma_context *ctx;
  106. ctx = idr_find(&ctx_idr, id);
  107. if (!ctx)
  108. ctx = ERR_PTR(-ENOENT);
  109. else if (ctx->file != file)
  110. ctx = ERR_PTR(-EINVAL);
  111. return ctx;
  112. }
  113. static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
  114. {
  115. struct ucma_context *ctx;
  116. mutex_lock(&mut);
  117. ctx = _ucma_find_context(id, file);
  118. if (!IS_ERR(ctx))
  119. atomic_inc(&ctx->ref);
  120. mutex_unlock(&mut);
  121. return ctx;
  122. }
  123. static void ucma_put_ctx(struct ucma_context *ctx)
  124. {
  125. if (atomic_dec_and_test(&ctx->ref))
  126. complete(&ctx->comp);
  127. }
  128. static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
  129. {
  130. struct ucma_context *ctx;
  131. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  132. if (!ctx)
  133. return NULL;
  134. atomic_set(&ctx->ref, 1);
  135. init_completion(&ctx->comp);
  136. INIT_LIST_HEAD(&ctx->mc_list);
  137. ctx->file = file;
  138. mutex_lock(&mut);
  139. ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
  140. mutex_unlock(&mut);
  141. if (ctx->id < 0)
  142. goto error;
  143. list_add_tail(&ctx->list, &file->ctx_list);
  144. return ctx;
  145. error:
  146. kfree(ctx);
  147. return NULL;
  148. }
  149. static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
  150. {
  151. struct ucma_multicast *mc;
  152. mc = kzalloc(sizeof(*mc), GFP_KERNEL);
  153. if (!mc)
  154. return NULL;
  155. mutex_lock(&mut);
  156. mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
  157. mutex_unlock(&mut);
  158. if (mc->id < 0)
  159. goto error;
  160. mc->ctx = ctx;
  161. list_add_tail(&mc->list, &ctx->mc_list);
  162. return mc;
  163. error:
  164. kfree(mc);
  165. return NULL;
  166. }
  167. static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
  168. struct rdma_conn_param *src)
  169. {
  170. if (src->private_data_len)
  171. memcpy(dst->private_data, src->private_data,
  172. src->private_data_len);
  173. dst->private_data_len = src->private_data_len;
  174. dst->responder_resources =src->responder_resources;
  175. dst->initiator_depth = src->initiator_depth;
  176. dst->flow_control = src->flow_control;
  177. dst->retry_count = src->retry_count;
  178. dst->rnr_retry_count = src->rnr_retry_count;
  179. dst->srq = src->srq;
  180. dst->qp_num = src->qp_num;
  181. }
  182. static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
  183. struct rdma_ud_param *src)
  184. {
  185. if (src->private_data_len)
  186. memcpy(dst->private_data, src->private_data,
  187. src->private_data_len);
  188. dst->private_data_len = src->private_data_len;
  189. ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
  190. dst->qp_num = src->qp_num;
  191. dst->qkey = src->qkey;
  192. }
  193. static void ucma_set_event_context(struct ucma_context *ctx,
  194. struct rdma_cm_event *event,
  195. struct ucma_event *uevent)
  196. {
  197. uevent->ctx = ctx;
  198. switch (event->event) {
  199. case RDMA_CM_EVENT_MULTICAST_JOIN:
  200. case RDMA_CM_EVENT_MULTICAST_ERROR:
  201. uevent->mc = (struct ucma_multicast *)
  202. event->param.ud.private_data;
  203. uevent->resp.uid = uevent->mc->uid;
  204. uevent->resp.id = uevent->mc->id;
  205. break;
  206. default:
  207. uevent->resp.uid = ctx->uid;
  208. uevent->resp.id = ctx->id;
  209. break;
  210. }
  211. }
  212. static int ucma_event_handler(struct rdma_cm_id *cm_id,
  213. struct rdma_cm_event *event)
  214. {
  215. struct ucma_event *uevent;
  216. struct ucma_context *ctx = cm_id->context;
  217. int ret = 0;
  218. uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
  219. if (!uevent)
  220. return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
  221. mutex_lock(&ctx->file->mut);
  222. uevent->cm_id = cm_id;
  223. ucma_set_event_context(ctx, event, uevent);
  224. uevent->resp.event = event->event;
  225. uevent->resp.status = event->status;
  226. if (cm_id->qp_type == IB_QPT_UD)
  227. ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
  228. else
  229. ucma_copy_conn_event(&uevent->resp.param.conn,
  230. &event->param.conn);
  231. if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
  232. if (!ctx->backlog) {
  233. ret = -ENOMEM;
  234. kfree(uevent);
  235. goto out;
  236. }
  237. ctx->backlog--;
  238. } else if (!ctx->uid) {
  239. /*
  240. * We ignore events for new connections until userspace has set
  241. * their context. This can only happen if an error occurs on a
  242. * new connection before the user accepts it. This is okay,
  243. * since the accept will just fail later.
  244. */
  245. kfree(uevent);
  246. goto out;
  247. }
  248. list_add_tail(&uevent->list, &ctx->file->event_list);
  249. wake_up_interruptible(&ctx->file->poll_wait);
  250. out:
  251. mutex_unlock(&ctx->file->mut);
  252. return ret;
  253. }
  254. static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
  255. int in_len, int out_len)
  256. {
  257. struct ucma_context *ctx;
  258. struct rdma_ucm_get_event cmd;
  259. struct ucma_event *uevent;
  260. int ret = 0;
  261. if (out_len < sizeof uevent->resp)
  262. return -ENOSPC;
  263. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  264. return -EFAULT;
  265. mutex_lock(&file->mut);
  266. while (list_empty(&file->event_list)) {
  267. mutex_unlock(&file->mut);
  268. if (file->filp->f_flags & O_NONBLOCK)
  269. return -EAGAIN;
  270. if (wait_event_interruptible(file->poll_wait,
  271. !list_empty(&file->event_list)))
  272. return -ERESTARTSYS;
  273. mutex_lock(&file->mut);
  274. }
  275. uevent = list_entry(file->event_list.next, struct ucma_event, list);
  276. if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
  277. ctx = ucma_alloc_ctx(file);
  278. if (!ctx) {
  279. ret = -ENOMEM;
  280. goto done;
  281. }
  282. uevent->ctx->backlog++;
  283. ctx->cm_id = uevent->cm_id;
  284. ctx->cm_id->context = ctx;
  285. uevent->resp.id = ctx->id;
  286. }
  287. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  288. &uevent->resp, sizeof uevent->resp)) {
  289. ret = -EFAULT;
  290. goto done;
  291. }
  292. list_del(&uevent->list);
  293. uevent->ctx->events_reported++;
  294. if (uevent->mc)
  295. uevent->mc->events_reported++;
  296. kfree(uevent);
  297. done:
  298. mutex_unlock(&file->mut);
  299. return ret;
  300. }
  301. static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
  302. {
  303. switch (cmd->ps) {
  304. case RDMA_PS_TCP:
  305. *qp_type = IB_QPT_RC;
  306. return 0;
  307. case RDMA_PS_UDP:
  308. case RDMA_PS_IPOIB:
  309. *qp_type = IB_QPT_UD;
  310. return 0;
  311. case RDMA_PS_IB:
  312. *qp_type = cmd->qp_type;
  313. return 0;
  314. default:
  315. return -EINVAL;
  316. }
  317. }
  318. static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
  319. int in_len, int out_len)
  320. {
  321. struct rdma_ucm_create_id cmd;
  322. struct rdma_ucm_create_id_resp resp;
  323. struct ucma_context *ctx;
  324. enum ib_qp_type qp_type;
  325. int ret;
  326. if (out_len < sizeof(resp))
  327. return -ENOSPC;
  328. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  329. return -EFAULT;
  330. ret = ucma_get_qp_type(&cmd, &qp_type);
  331. if (ret)
  332. return ret;
  333. mutex_lock(&file->mut);
  334. ctx = ucma_alloc_ctx(file);
  335. mutex_unlock(&file->mut);
  336. if (!ctx)
  337. return -ENOMEM;
  338. ctx->uid = cmd.uid;
  339. ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type);
  340. if (IS_ERR(ctx->cm_id)) {
  341. ret = PTR_ERR(ctx->cm_id);
  342. goto err1;
  343. }
  344. resp.id = ctx->id;
  345. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  346. &resp, sizeof(resp))) {
  347. ret = -EFAULT;
  348. goto err2;
  349. }
  350. return 0;
  351. err2:
  352. rdma_destroy_id(ctx->cm_id);
  353. err1:
  354. mutex_lock(&mut);
  355. idr_remove(&ctx_idr, ctx->id);
  356. mutex_unlock(&mut);
  357. kfree(ctx);
  358. return ret;
  359. }
  360. static void ucma_cleanup_multicast(struct ucma_context *ctx)
  361. {
  362. struct ucma_multicast *mc, *tmp;
  363. mutex_lock(&mut);
  364. list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
  365. list_del(&mc->list);
  366. idr_remove(&multicast_idr, mc->id);
  367. kfree(mc);
  368. }
  369. mutex_unlock(&mut);
  370. }
  371. static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
  372. {
  373. struct ucma_event *uevent, *tmp;
  374. list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
  375. if (uevent->mc != mc)
  376. continue;
  377. list_del(&uevent->list);
  378. kfree(uevent);
  379. }
  380. }
  381. /*
  382. * We cannot hold file->mut when calling rdma_destroy_id() or we can
  383. * deadlock. We also acquire file->mut in ucma_event_handler(), and
  384. * rdma_destroy_id() will wait until all callbacks have completed.
  385. */
  386. static int ucma_free_ctx(struct ucma_context *ctx)
  387. {
  388. int events_reported;
  389. struct ucma_event *uevent, *tmp;
  390. LIST_HEAD(list);
  391. /* No new events will be generated after destroying the id. */
  392. rdma_destroy_id(ctx->cm_id);
  393. ucma_cleanup_multicast(ctx);
  394. /* Cleanup events not yet reported to the user. */
  395. mutex_lock(&ctx->file->mut);
  396. list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
  397. if (uevent->ctx == ctx)
  398. list_move_tail(&uevent->list, &list);
  399. }
  400. list_del(&ctx->list);
  401. mutex_unlock(&ctx->file->mut);
  402. list_for_each_entry_safe(uevent, tmp, &list, list) {
  403. list_del(&uevent->list);
  404. if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
  405. rdma_destroy_id(uevent->cm_id);
  406. kfree(uevent);
  407. }
  408. events_reported = ctx->events_reported;
  409. kfree(ctx);
  410. return events_reported;
  411. }
  412. static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
  413. int in_len, int out_len)
  414. {
  415. struct rdma_ucm_destroy_id cmd;
  416. struct rdma_ucm_destroy_id_resp resp;
  417. struct ucma_context *ctx;
  418. int ret = 0;
  419. if (out_len < sizeof(resp))
  420. return -ENOSPC;
  421. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  422. return -EFAULT;
  423. mutex_lock(&mut);
  424. ctx = _ucma_find_context(cmd.id, file);
  425. if (!IS_ERR(ctx))
  426. idr_remove(&ctx_idr, ctx->id);
  427. mutex_unlock(&mut);
  428. if (IS_ERR(ctx))
  429. return PTR_ERR(ctx);
  430. ucma_put_ctx(ctx);
  431. wait_for_completion(&ctx->comp);
  432. resp.events_reported = ucma_free_ctx(ctx);
  433. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  434. &resp, sizeof(resp)))
  435. ret = -EFAULT;
  436. return ret;
  437. }
  438. static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
  439. int in_len, int out_len)
  440. {
  441. struct rdma_ucm_bind_ip cmd;
  442. struct ucma_context *ctx;
  443. int ret;
  444. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  445. return -EFAULT;
  446. ctx = ucma_get_ctx(file, cmd.id);
  447. if (IS_ERR(ctx))
  448. return PTR_ERR(ctx);
  449. ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
  450. ucma_put_ctx(ctx);
  451. return ret;
  452. }
  453. static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
  454. int in_len, int out_len)
  455. {
  456. struct rdma_ucm_bind cmd;
  457. struct sockaddr *addr;
  458. struct ucma_context *ctx;
  459. int ret;
  460. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  461. return -EFAULT;
  462. addr = (struct sockaddr *) &cmd.addr;
  463. if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr)))
  464. return -EINVAL;
  465. ctx = ucma_get_ctx(file, cmd.id);
  466. if (IS_ERR(ctx))
  467. return PTR_ERR(ctx);
  468. ret = rdma_bind_addr(ctx->cm_id, addr);
  469. ucma_put_ctx(ctx);
  470. return ret;
  471. }
  472. static ssize_t ucma_resolve_ip(struct ucma_file *file,
  473. const char __user *inbuf,
  474. int in_len, int out_len)
  475. {
  476. struct rdma_ucm_resolve_ip cmd;
  477. struct ucma_context *ctx;
  478. int ret;
  479. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  480. return -EFAULT;
  481. ctx = ucma_get_ctx(file, cmd.id);
  482. if (IS_ERR(ctx))
  483. return PTR_ERR(ctx);
  484. ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
  485. (struct sockaddr *) &cmd.dst_addr,
  486. cmd.timeout_ms);
  487. ucma_put_ctx(ctx);
  488. return ret;
  489. }
  490. static ssize_t ucma_resolve_route(struct ucma_file *file,
  491. const char __user *inbuf,
  492. int in_len, int out_len)
  493. {
  494. struct rdma_ucm_resolve_route cmd;
  495. struct ucma_context *ctx;
  496. int ret;
  497. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  498. return -EFAULT;
  499. ctx = ucma_get_ctx(file, cmd.id);
  500. if (IS_ERR(ctx))
  501. return PTR_ERR(ctx);
  502. ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
  503. ucma_put_ctx(ctx);
  504. return ret;
  505. }
  506. static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
  507. struct rdma_route *route)
  508. {
  509. struct rdma_dev_addr *dev_addr;
  510. resp->num_paths = route->num_paths;
  511. switch (route->num_paths) {
  512. case 0:
  513. dev_addr = &route->addr.dev_addr;
  514. rdma_addr_get_dgid(dev_addr,
  515. (union ib_gid *) &resp->ib_route[0].dgid);
  516. rdma_addr_get_sgid(dev_addr,
  517. (union ib_gid *) &resp->ib_route[0].sgid);
  518. resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
  519. break;
  520. case 2:
  521. ib_copy_path_rec_to_user(&resp->ib_route[1],
  522. &route->path_rec[1]);
  523. /* fall through */
  524. case 1:
  525. ib_copy_path_rec_to_user(&resp->ib_route[0],
  526. &route->path_rec[0]);
  527. break;
  528. default:
  529. break;
  530. }
  531. }
  532. static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
  533. struct rdma_route *route)
  534. {
  535. struct rdma_dev_addr *dev_addr;
  536. struct net_device *dev;
  537. u16 vid = 0;
  538. resp->num_paths = route->num_paths;
  539. switch (route->num_paths) {
  540. case 0:
  541. dev_addr = &route->addr.dev_addr;
  542. dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
  543. if (dev) {
  544. vid = rdma_vlan_dev_vlan_id(dev);
  545. dev_put(dev);
  546. }
  547. iboe_mac_vlan_to_ll((union ib_gid *) &resp->ib_route[0].dgid,
  548. dev_addr->dst_dev_addr, vid);
  549. iboe_addr_get_sgid(dev_addr,
  550. (union ib_gid *) &resp->ib_route[0].sgid);
  551. resp->ib_route[0].pkey = cpu_to_be16(0xffff);
  552. break;
  553. case 2:
  554. ib_copy_path_rec_to_user(&resp->ib_route[1],
  555. &route->path_rec[1]);
  556. /* fall through */
  557. case 1:
  558. ib_copy_path_rec_to_user(&resp->ib_route[0],
  559. &route->path_rec[0]);
  560. break;
  561. default:
  562. break;
  563. }
  564. }
  565. static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
  566. struct rdma_route *route)
  567. {
  568. struct rdma_dev_addr *dev_addr;
  569. dev_addr = &route->addr.dev_addr;
  570. rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
  571. rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
  572. }
  573. static ssize_t ucma_query_route(struct ucma_file *file,
  574. const char __user *inbuf,
  575. int in_len, int out_len)
  576. {
  577. struct rdma_ucm_query cmd;
  578. struct rdma_ucm_query_route_resp resp;
  579. struct ucma_context *ctx;
  580. struct sockaddr *addr;
  581. int ret = 0;
  582. if (out_len < sizeof(resp))
  583. return -ENOSPC;
  584. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  585. return -EFAULT;
  586. ctx = ucma_get_ctx(file, cmd.id);
  587. if (IS_ERR(ctx))
  588. return PTR_ERR(ctx);
  589. memset(&resp, 0, sizeof resp);
  590. addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
  591. memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
  592. sizeof(struct sockaddr_in) :
  593. sizeof(struct sockaddr_in6));
  594. addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
  595. memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
  596. sizeof(struct sockaddr_in) :
  597. sizeof(struct sockaddr_in6));
  598. if (!ctx->cm_id->device)
  599. goto out;
  600. resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
  601. resp.port_num = ctx->cm_id->port_num;
  602. switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
  603. case RDMA_TRANSPORT_IB:
  604. switch (rdma_port_get_link_layer(ctx->cm_id->device,
  605. ctx->cm_id->port_num)) {
  606. case IB_LINK_LAYER_INFINIBAND:
  607. ucma_copy_ib_route(&resp, &ctx->cm_id->route);
  608. break;
  609. case IB_LINK_LAYER_ETHERNET:
  610. ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
  611. break;
  612. default:
  613. break;
  614. }
  615. break;
  616. case RDMA_TRANSPORT_IWARP:
  617. ucma_copy_iw_route(&resp, &ctx->cm_id->route);
  618. break;
  619. default:
  620. break;
  621. }
  622. out:
  623. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  624. &resp, sizeof(resp)))
  625. ret = -EFAULT;
  626. ucma_put_ctx(ctx);
  627. return ret;
  628. }
  629. static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
  630. struct rdma_ucm_query_addr_resp *resp)
  631. {
  632. if (!cm_id->device)
  633. return;
  634. resp->node_guid = (__force __u64) cm_id->device->node_guid;
  635. resp->port_num = cm_id->port_num;
  636. resp->pkey = (__force __u16) cpu_to_be16(
  637. ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
  638. }
  639. static ssize_t ucma_query_addr(struct ucma_context *ctx,
  640. void __user *response, int out_len)
  641. {
  642. struct rdma_ucm_query_addr_resp resp;
  643. struct sockaddr *addr;
  644. int ret = 0;
  645. if (out_len < sizeof(resp))
  646. return -ENOSPC;
  647. memset(&resp, 0, sizeof resp);
  648. addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
  649. resp.src_size = rdma_addr_size(addr);
  650. memcpy(&resp.src_addr, addr, resp.src_size);
  651. addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
  652. resp.dst_size = rdma_addr_size(addr);
  653. memcpy(&resp.dst_addr, addr, resp.dst_size);
  654. ucma_query_device_addr(ctx->cm_id, &resp);
  655. if (copy_to_user(response, &resp, sizeof(resp)))
  656. ret = -EFAULT;
  657. return ret;
  658. }
  659. static ssize_t ucma_query_path(struct ucma_context *ctx,
  660. void __user *response, int out_len)
  661. {
  662. struct rdma_ucm_query_path_resp *resp;
  663. int i, ret = 0;
  664. if (out_len < sizeof(*resp))
  665. return -ENOSPC;
  666. resp = kzalloc(out_len, GFP_KERNEL);
  667. if (!resp)
  668. return -ENOMEM;
  669. resp->num_paths = ctx->cm_id->route.num_paths;
  670. for (i = 0, out_len -= sizeof(*resp);
  671. i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
  672. i++, out_len -= sizeof(struct ib_path_rec_data)) {
  673. resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
  674. IB_PATH_BIDIRECTIONAL;
  675. ib_sa_pack_path(&ctx->cm_id->route.path_rec[i],
  676. &resp->path_data[i].path_rec);
  677. }
  678. if (copy_to_user(response, resp,
  679. sizeof(*resp) + (i * sizeof(struct ib_path_rec_data))))
  680. ret = -EFAULT;
  681. kfree(resp);
  682. return ret;
  683. }
  684. static ssize_t ucma_query_gid(struct ucma_context *ctx,
  685. void __user *response, int out_len)
  686. {
  687. struct rdma_ucm_query_addr_resp resp;
  688. struct sockaddr_ib *addr;
  689. int ret = 0;
  690. if (out_len < sizeof(resp))
  691. return -ENOSPC;
  692. memset(&resp, 0, sizeof resp);
  693. ucma_query_device_addr(ctx->cm_id, &resp);
  694. addr = (struct sockaddr_ib *) &resp.src_addr;
  695. resp.src_size = sizeof(*addr);
  696. if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
  697. memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
  698. } else {
  699. addr->sib_family = AF_IB;
  700. addr->sib_pkey = (__force __be16) resp.pkey;
  701. rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr,
  702. (union ib_gid *) &addr->sib_addr);
  703. addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
  704. &ctx->cm_id->route.addr.src_addr);
  705. }
  706. addr = (struct sockaddr_ib *) &resp.dst_addr;
  707. resp.dst_size = sizeof(*addr);
  708. if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
  709. memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
  710. } else {
  711. addr->sib_family = AF_IB;
  712. addr->sib_pkey = (__force __be16) resp.pkey;
  713. rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr,
  714. (union ib_gid *) &addr->sib_addr);
  715. addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
  716. &ctx->cm_id->route.addr.dst_addr);
  717. }
  718. if (copy_to_user(response, &resp, sizeof(resp)))
  719. ret = -EFAULT;
  720. return ret;
  721. }
  722. static ssize_t ucma_query(struct ucma_file *file,
  723. const char __user *inbuf,
  724. int in_len, int out_len)
  725. {
  726. struct rdma_ucm_query cmd;
  727. struct ucma_context *ctx;
  728. void __user *response;
  729. int ret;
  730. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  731. return -EFAULT;
  732. response = (void __user *)(unsigned long) cmd.response;
  733. ctx = ucma_get_ctx(file, cmd.id);
  734. if (IS_ERR(ctx))
  735. return PTR_ERR(ctx);
  736. switch (cmd.option) {
  737. case RDMA_USER_CM_QUERY_ADDR:
  738. ret = ucma_query_addr(ctx, response, out_len);
  739. break;
  740. case RDMA_USER_CM_QUERY_PATH:
  741. ret = ucma_query_path(ctx, response, out_len);
  742. break;
  743. case RDMA_USER_CM_QUERY_GID:
  744. ret = ucma_query_gid(ctx, response, out_len);
  745. break;
  746. default:
  747. ret = -ENOSYS;
  748. break;
  749. }
  750. ucma_put_ctx(ctx);
  751. return ret;
  752. }
  753. static void ucma_copy_conn_param(struct rdma_cm_id *id,
  754. struct rdma_conn_param *dst,
  755. struct rdma_ucm_conn_param *src)
  756. {
  757. dst->private_data = src->private_data;
  758. dst->private_data_len = src->private_data_len;
  759. dst->responder_resources =src->responder_resources;
  760. dst->initiator_depth = src->initiator_depth;
  761. dst->flow_control = src->flow_control;
  762. dst->retry_count = src->retry_count;
  763. dst->rnr_retry_count = src->rnr_retry_count;
  764. dst->srq = src->srq;
  765. dst->qp_num = src->qp_num;
  766. dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
  767. }
  768. static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
  769. int in_len, int out_len)
  770. {
  771. struct rdma_ucm_connect cmd;
  772. struct rdma_conn_param conn_param;
  773. struct ucma_context *ctx;
  774. int ret;
  775. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  776. return -EFAULT;
  777. if (!cmd.conn_param.valid)
  778. return -EINVAL;
  779. ctx = ucma_get_ctx(file, cmd.id);
  780. if (IS_ERR(ctx))
  781. return PTR_ERR(ctx);
  782. ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
  783. ret = rdma_connect(ctx->cm_id, &conn_param);
  784. ucma_put_ctx(ctx);
  785. return ret;
  786. }
  787. static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
  788. int in_len, int out_len)
  789. {
  790. struct rdma_ucm_listen cmd;
  791. struct ucma_context *ctx;
  792. int ret;
  793. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  794. return -EFAULT;
  795. ctx = ucma_get_ctx(file, cmd.id);
  796. if (IS_ERR(ctx))
  797. return PTR_ERR(ctx);
  798. ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
  799. cmd.backlog : max_backlog;
  800. ret = rdma_listen(ctx->cm_id, ctx->backlog);
  801. ucma_put_ctx(ctx);
  802. return ret;
  803. }
  804. static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
  805. int in_len, int out_len)
  806. {
  807. struct rdma_ucm_accept cmd;
  808. struct rdma_conn_param conn_param;
  809. struct ucma_context *ctx;
  810. int ret;
  811. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  812. return -EFAULT;
  813. ctx = ucma_get_ctx(file, cmd.id);
  814. if (IS_ERR(ctx))
  815. return PTR_ERR(ctx);
  816. if (cmd.conn_param.valid) {
  817. ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
  818. mutex_lock(&file->mut);
  819. ret = rdma_accept(ctx->cm_id, &conn_param);
  820. if (!ret)
  821. ctx->uid = cmd.uid;
  822. mutex_unlock(&file->mut);
  823. } else
  824. ret = rdma_accept(ctx->cm_id, NULL);
  825. ucma_put_ctx(ctx);
  826. return ret;
  827. }
  828. static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
  829. int in_len, int out_len)
  830. {
  831. struct rdma_ucm_reject cmd;
  832. struct ucma_context *ctx;
  833. int ret;
  834. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  835. return -EFAULT;
  836. ctx = ucma_get_ctx(file, cmd.id);
  837. if (IS_ERR(ctx))
  838. return PTR_ERR(ctx);
  839. ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
  840. ucma_put_ctx(ctx);
  841. return ret;
  842. }
  843. static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
  844. int in_len, int out_len)
  845. {
  846. struct rdma_ucm_disconnect cmd;
  847. struct ucma_context *ctx;
  848. int ret;
  849. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  850. return -EFAULT;
  851. ctx = ucma_get_ctx(file, cmd.id);
  852. if (IS_ERR(ctx))
  853. return PTR_ERR(ctx);
  854. ret = rdma_disconnect(ctx->cm_id);
  855. ucma_put_ctx(ctx);
  856. return ret;
  857. }
  858. static ssize_t ucma_init_qp_attr(struct ucma_file *file,
  859. const char __user *inbuf,
  860. int in_len, int out_len)
  861. {
  862. struct rdma_ucm_init_qp_attr cmd;
  863. struct ib_uverbs_qp_attr resp;
  864. struct ucma_context *ctx;
  865. struct ib_qp_attr qp_attr;
  866. int ret;
  867. if (out_len < sizeof(resp))
  868. return -ENOSPC;
  869. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  870. return -EFAULT;
  871. ctx = ucma_get_ctx(file, cmd.id);
  872. if (IS_ERR(ctx))
  873. return PTR_ERR(ctx);
  874. resp.qp_attr_mask = 0;
  875. memset(&qp_attr, 0, sizeof qp_attr);
  876. qp_attr.qp_state = cmd.qp_state;
  877. ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
  878. if (ret)
  879. goto out;
  880. ib_copy_qp_attr_to_user(&resp, &qp_attr);
  881. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  882. &resp, sizeof(resp)))
  883. ret = -EFAULT;
  884. out:
  885. ucma_put_ctx(ctx);
  886. return ret;
  887. }
  888. static int ucma_set_option_id(struct ucma_context *ctx, int optname,
  889. void *optval, size_t optlen)
  890. {
  891. int ret = 0;
  892. switch (optname) {
  893. case RDMA_OPTION_ID_TOS:
  894. if (optlen != sizeof(u8)) {
  895. ret = -EINVAL;
  896. break;
  897. }
  898. rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
  899. break;
  900. case RDMA_OPTION_ID_REUSEADDR:
  901. if (optlen != sizeof(int)) {
  902. ret = -EINVAL;
  903. break;
  904. }
  905. ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
  906. break;
  907. case RDMA_OPTION_ID_AFONLY:
  908. if (optlen != sizeof(int)) {
  909. ret = -EINVAL;
  910. break;
  911. }
  912. ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
  913. break;
  914. default:
  915. ret = -ENOSYS;
  916. }
  917. return ret;
  918. }
  919. static int ucma_set_ib_path(struct ucma_context *ctx,
  920. struct ib_path_rec_data *path_data, size_t optlen)
  921. {
  922. struct ib_sa_path_rec sa_path;
  923. struct rdma_cm_event event;
  924. int ret;
  925. if (optlen % sizeof(*path_data))
  926. return -EINVAL;
  927. for (; optlen; optlen -= sizeof(*path_data), path_data++) {
  928. if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
  929. IB_PATH_BIDIRECTIONAL))
  930. break;
  931. }
  932. if (!optlen)
  933. return -EINVAL;
  934. ib_sa_unpack_path(path_data->path_rec, &sa_path);
  935. ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
  936. if (ret)
  937. return ret;
  938. memset(&event, 0, sizeof event);
  939. event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
  940. return ucma_event_handler(ctx->cm_id, &event);
  941. }
  942. static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
  943. void *optval, size_t optlen)
  944. {
  945. int ret;
  946. switch (optname) {
  947. case RDMA_OPTION_IB_PATH:
  948. ret = ucma_set_ib_path(ctx, optval, optlen);
  949. break;
  950. default:
  951. ret = -ENOSYS;
  952. }
  953. return ret;
  954. }
  955. static int ucma_set_option_level(struct ucma_context *ctx, int level,
  956. int optname, void *optval, size_t optlen)
  957. {
  958. int ret;
  959. switch (level) {
  960. case RDMA_OPTION_ID:
  961. ret = ucma_set_option_id(ctx, optname, optval, optlen);
  962. break;
  963. case RDMA_OPTION_IB:
  964. ret = ucma_set_option_ib(ctx, optname, optval, optlen);
  965. break;
  966. default:
  967. ret = -ENOSYS;
  968. }
  969. return ret;
  970. }
  971. static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
  972. int in_len, int out_len)
  973. {
  974. struct rdma_ucm_set_option cmd;
  975. struct ucma_context *ctx;
  976. void *optval;
  977. int ret;
  978. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  979. return -EFAULT;
  980. ctx = ucma_get_ctx(file, cmd.id);
  981. if (IS_ERR(ctx))
  982. return PTR_ERR(ctx);
  983. optval = memdup_user((void __user *) (unsigned long) cmd.optval,
  984. cmd.optlen);
  985. if (IS_ERR(optval)) {
  986. ret = PTR_ERR(optval);
  987. goto out;
  988. }
  989. ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
  990. cmd.optlen);
  991. kfree(optval);
  992. out:
  993. ucma_put_ctx(ctx);
  994. return ret;
  995. }
  996. static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
  997. int in_len, int out_len)
  998. {
  999. struct rdma_ucm_notify cmd;
  1000. struct ucma_context *ctx;
  1001. int ret;
  1002. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  1003. return -EFAULT;
  1004. ctx = ucma_get_ctx(file, cmd.id);
  1005. if (IS_ERR(ctx))
  1006. return PTR_ERR(ctx);
  1007. ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
  1008. ucma_put_ctx(ctx);
  1009. return ret;
  1010. }
  1011. static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
  1012. const char __user *inbuf,
  1013. int in_len, int out_len)
  1014. {
  1015. struct rdma_ucm_join_ip_mcast cmd;
  1016. struct rdma_ucm_create_id_resp resp;
  1017. struct ucma_context *ctx;
  1018. struct ucma_multicast *mc;
  1019. int ret;
  1020. if (out_len < sizeof(resp))
  1021. return -ENOSPC;
  1022. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  1023. return -EFAULT;
  1024. ctx = ucma_get_ctx(file, cmd.id);
  1025. if (IS_ERR(ctx))
  1026. return PTR_ERR(ctx);
  1027. mutex_lock(&file->mut);
  1028. mc = ucma_alloc_multicast(ctx);
  1029. if (!mc) {
  1030. ret = -ENOMEM;
  1031. goto err1;
  1032. }
  1033. mc->uid = cmd.uid;
  1034. memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
  1035. ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
  1036. if (ret)
  1037. goto err2;
  1038. resp.id = mc->id;
  1039. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  1040. &resp, sizeof(resp))) {
  1041. ret = -EFAULT;
  1042. goto err3;
  1043. }
  1044. mutex_unlock(&file->mut);
  1045. ucma_put_ctx(ctx);
  1046. return 0;
  1047. err3:
  1048. rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
  1049. ucma_cleanup_mc_events(mc);
  1050. err2:
  1051. mutex_lock(&mut);
  1052. idr_remove(&multicast_idr, mc->id);
  1053. mutex_unlock(&mut);
  1054. list_del(&mc->list);
  1055. kfree(mc);
  1056. err1:
  1057. mutex_unlock(&file->mut);
  1058. ucma_put_ctx(ctx);
  1059. return ret;
  1060. }
  1061. static ssize_t ucma_leave_multicast(struct ucma_file *file,
  1062. const char __user *inbuf,
  1063. int in_len, int out_len)
  1064. {
  1065. struct rdma_ucm_destroy_id cmd;
  1066. struct rdma_ucm_destroy_id_resp resp;
  1067. struct ucma_multicast *mc;
  1068. int ret = 0;
  1069. if (out_len < sizeof(resp))
  1070. return -ENOSPC;
  1071. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  1072. return -EFAULT;
  1073. mutex_lock(&mut);
  1074. mc = idr_find(&multicast_idr, cmd.id);
  1075. if (!mc)
  1076. mc = ERR_PTR(-ENOENT);
  1077. else if (mc->ctx->file != file)
  1078. mc = ERR_PTR(-EINVAL);
  1079. else {
  1080. idr_remove(&multicast_idr, mc->id);
  1081. atomic_inc(&mc->ctx->ref);
  1082. }
  1083. mutex_unlock(&mut);
  1084. if (IS_ERR(mc)) {
  1085. ret = PTR_ERR(mc);
  1086. goto out;
  1087. }
  1088. rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
  1089. mutex_lock(&mc->ctx->file->mut);
  1090. ucma_cleanup_mc_events(mc);
  1091. list_del(&mc->list);
  1092. mutex_unlock(&mc->ctx->file->mut);
  1093. ucma_put_ctx(mc->ctx);
  1094. resp.events_reported = mc->events_reported;
  1095. kfree(mc);
  1096. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  1097. &resp, sizeof(resp)))
  1098. ret = -EFAULT;
  1099. out:
  1100. return ret;
  1101. }
  1102. static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
  1103. {
  1104. /* Acquire mutex's based on pointer comparison to prevent deadlock. */
  1105. if (file1 < file2) {
  1106. mutex_lock(&file1->mut);
  1107. mutex_lock(&file2->mut);
  1108. } else {
  1109. mutex_lock(&file2->mut);
  1110. mutex_lock(&file1->mut);
  1111. }
  1112. }
  1113. static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
  1114. {
  1115. if (file1 < file2) {
  1116. mutex_unlock(&file2->mut);
  1117. mutex_unlock(&file1->mut);
  1118. } else {
  1119. mutex_unlock(&file1->mut);
  1120. mutex_unlock(&file2->mut);
  1121. }
  1122. }
  1123. static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
  1124. {
  1125. struct ucma_event *uevent, *tmp;
  1126. list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
  1127. if (uevent->ctx == ctx)
  1128. list_move_tail(&uevent->list, &file->event_list);
  1129. }
  1130. static ssize_t ucma_migrate_id(struct ucma_file *new_file,
  1131. const char __user *inbuf,
  1132. int in_len, int out_len)
  1133. {
  1134. struct rdma_ucm_migrate_id cmd;
  1135. struct rdma_ucm_migrate_resp resp;
  1136. struct ucma_context *ctx;
  1137. struct fd f;
  1138. struct ucma_file *cur_file;
  1139. int ret = 0;
  1140. if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
  1141. return -EFAULT;
  1142. /* Get current fd to protect against it being closed */
  1143. f = fdget(cmd.fd);
  1144. if (!f.file)
  1145. return -ENOENT;
  1146. /* Validate current fd and prevent destruction of id. */
  1147. ctx = ucma_get_ctx(f.file->private_data, cmd.id);
  1148. if (IS_ERR(ctx)) {
  1149. ret = PTR_ERR(ctx);
  1150. goto file_put;
  1151. }
  1152. cur_file = ctx->file;
  1153. if (cur_file == new_file) {
  1154. resp.events_reported = ctx->events_reported;
  1155. goto response;
  1156. }
  1157. /*
  1158. * Migrate events between fd's, maintaining order, and avoiding new
  1159. * events being added before existing events.
  1160. */
  1161. ucma_lock_files(cur_file, new_file);
  1162. mutex_lock(&mut);
  1163. list_move_tail(&ctx->list, &new_file->ctx_list);
  1164. ucma_move_events(ctx, new_file);
  1165. ctx->file = new_file;
  1166. resp.events_reported = ctx->events_reported;
  1167. mutex_unlock(&mut);
  1168. ucma_unlock_files(cur_file, new_file);
  1169. response:
  1170. if (copy_to_user((void __user *)(unsigned long)cmd.response,
  1171. &resp, sizeof(resp)))
  1172. ret = -EFAULT;
  1173. ucma_put_ctx(ctx);
  1174. file_put:
  1175. fdput(f);
  1176. return ret;
  1177. }
  1178. static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
  1179. const char __user *inbuf,
  1180. int in_len, int out_len) = {
  1181. [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
  1182. [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
  1183. [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip,
  1184. [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip,
  1185. [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
  1186. [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
  1187. [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
  1188. [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
  1189. [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
  1190. [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
  1191. [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
  1192. [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
  1193. [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
  1194. [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
  1195. [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
  1196. [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
  1197. [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
  1198. [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
  1199. [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id,
  1200. [RDMA_USER_CM_CMD_QUERY] = ucma_query,
  1201. [RDMA_USER_CM_CMD_BIND] = ucma_bind
  1202. };
  1203. static ssize_t ucma_write(struct file *filp, const char __user *buf,
  1204. size_t len, loff_t *pos)
  1205. {
  1206. struct ucma_file *file = filp->private_data;
  1207. struct rdma_ucm_cmd_hdr hdr;
  1208. ssize_t ret;
  1209. if (len < sizeof(hdr))
  1210. return -EINVAL;
  1211. if (copy_from_user(&hdr, buf, sizeof(hdr)))
  1212. return -EFAULT;
  1213. if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
  1214. return -EINVAL;
  1215. if (hdr.in + sizeof(hdr) > len)
  1216. return -EINVAL;
  1217. if (!ucma_cmd_table[hdr.cmd])
  1218. return -ENOSYS;
  1219. ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
  1220. if (!ret)
  1221. ret = len;
  1222. return ret;
  1223. }
  1224. static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
  1225. {
  1226. struct ucma_file *file = filp->private_data;
  1227. unsigned int mask = 0;
  1228. poll_wait(filp, &file->poll_wait, wait);
  1229. if (!list_empty(&file->event_list))
  1230. mask = POLLIN | POLLRDNORM;
  1231. return mask;
  1232. }
  1233. /*
  1234. * ucma_open() does not need the BKL:
  1235. *
  1236. * - no global state is referred to;
  1237. * - there is no ioctl method to race against;
  1238. * - no further module initialization is required for open to work
  1239. * after the device is registered.
  1240. */
  1241. static int ucma_open(struct inode *inode, struct file *filp)
  1242. {
  1243. struct ucma_file *file;
  1244. file = kmalloc(sizeof *file, GFP_KERNEL);
  1245. if (!file)
  1246. return -ENOMEM;
  1247. INIT_LIST_HEAD(&file->event_list);
  1248. INIT_LIST_HEAD(&file->ctx_list);
  1249. init_waitqueue_head(&file->poll_wait);
  1250. mutex_init(&file->mut);
  1251. filp->private_data = file;
  1252. file->filp = filp;
  1253. return nonseekable_open(inode, filp);
  1254. }
  1255. static int ucma_close(struct inode *inode, struct file *filp)
  1256. {
  1257. struct ucma_file *file = filp->private_data;
  1258. struct ucma_context *ctx, *tmp;
  1259. mutex_lock(&file->mut);
  1260. list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
  1261. mutex_unlock(&file->mut);
  1262. mutex_lock(&mut);
  1263. idr_remove(&ctx_idr, ctx->id);
  1264. mutex_unlock(&mut);
  1265. ucma_free_ctx(ctx);
  1266. mutex_lock(&file->mut);
  1267. }
  1268. mutex_unlock(&file->mut);
  1269. kfree(file);
  1270. return 0;
  1271. }
  1272. static const struct file_operations ucma_fops = {
  1273. .owner = THIS_MODULE,
  1274. .open = ucma_open,
  1275. .release = ucma_close,
  1276. .write = ucma_write,
  1277. .poll = ucma_poll,
  1278. .llseek = no_llseek,
  1279. };
  1280. static struct miscdevice ucma_misc = {
  1281. .minor = MISC_DYNAMIC_MINOR,
  1282. .name = "rdma_cm",
  1283. .nodename = "infiniband/rdma_cm",
  1284. .mode = 0666,
  1285. .fops = &ucma_fops,
  1286. };
  1287. static ssize_t show_abi_version(struct device *dev,
  1288. struct device_attribute *attr,
  1289. char *buf)
  1290. {
  1291. return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
  1292. }
  1293. static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
  1294. static int __init ucma_init(void)
  1295. {
  1296. int ret;
  1297. ret = misc_register(&ucma_misc);
  1298. if (ret)
  1299. return ret;
  1300. ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
  1301. if (ret) {
  1302. printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
  1303. goto err1;
  1304. }
  1305. ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
  1306. if (!ucma_ctl_table_hdr) {
  1307. printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
  1308. ret = -ENOMEM;
  1309. goto err2;
  1310. }
  1311. return 0;
  1312. err2:
  1313. device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
  1314. err1:
  1315. misc_deregister(&ucma_misc);
  1316. return ret;
  1317. }
  1318. static void __exit ucma_cleanup(void)
  1319. {
  1320. unregister_net_sysctl_table(ucma_ctl_table_hdr);
  1321. device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
  1322. misc_deregister(&ucma_misc);
  1323. idr_destroy(&ctx_idr);
  1324. }
  1325. module_init(ucma_init);
  1326. module_exit(ucma_cleanup);