uverbs_cmd.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243
  1. /*
  2. * Copyright (c) 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Cisco Systems. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. *
  33. * $Id: uverbs_cmd.c 2708 2005-06-24 17:27:21Z roland $
  34. */
  35. #include <linux/file.h>
  36. #include <asm/uaccess.h>
  37. #include "uverbs.h"
  38. #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
  39. do { \
  40. (udata)->inbuf = (void __user *) (ibuf); \
  41. (udata)->outbuf = (void __user *) (obuf); \
  42. (udata)->inlen = (ilen); \
  43. (udata)->outlen = (olen); \
  44. } while (0)
  45. ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
  46. const char __user *buf,
  47. int in_len, int out_len)
  48. {
  49. struct ib_uverbs_get_context cmd;
  50. struct ib_uverbs_get_context_resp resp;
  51. struct ib_udata udata;
  52. struct ib_device *ibdev = file->device->ib_dev;
  53. struct ib_ucontext *ucontext;
  54. struct file *filp;
  55. int ret;
  56. if (out_len < sizeof resp)
  57. return -ENOSPC;
  58. if (copy_from_user(&cmd, buf, sizeof cmd))
  59. return -EFAULT;
  60. down(&file->mutex);
  61. if (file->ucontext) {
  62. ret = -EINVAL;
  63. goto err;
  64. }
  65. INIT_UDATA(&udata, buf + sizeof cmd,
  66. (unsigned long) cmd.response + sizeof resp,
  67. in_len - sizeof cmd, out_len - sizeof resp);
  68. ucontext = ibdev->alloc_ucontext(ibdev, &udata);
  69. if (IS_ERR(ucontext))
  70. return PTR_ERR(file->ucontext);
  71. ucontext->device = ibdev;
  72. INIT_LIST_HEAD(&ucontext->pd_list);
  73. INIT_LIST_HEAD(&ucontext->mr_list);
  74. INIT_LIST_HEAD(&ucontext->mw_list);
  75. INIT_LIST_HEAD(&ucontext->cq_list);
  76. INIT_LIST_HEAD(&ucontext->qp_list);
  77. INIT_LIST_HEAD(&ucontext->srq_list);
  78. INIT_LIST_HEAD(&ucontext->ah_list);
  79. resp.num_comp_vectors = file->device->num_comp_vectors;
  80. filp = ib_uverbs_alloc_event_file(file, 1, &resp.async_fd);
  81. if (IS_ERR(filp)) {
  82. ret = PTR_ERR(filp);
  83. goto err_free;
  84. }
  85. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  86. &resp, sizeof resp)) {
  87. ret = -EFAULT;
  88. goto err_file;
  89. }
  90. file->async_file = filp->private_data;
  91. INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
  92. ib_uverbs_event_handler);
  93. ret = ib_register_event_handler(&file->event_handler);
  94. if (ret)
  95. goto err_file;
  96. kref_get(&file->async_file->ref);
  97. kref_get(&file->ref);
  98. file->ucontext = ucontext;
  99. fd_install(resp.async_fd, filp);
  100. up(&file->mutex);
  101. return in_len;
  102. err_file:
  103. put_unused_fd(resp.async_fd);
  104. fput(filp);
  105. err_free:
  106. ibdev->dealloc_ucontext(ucontext);
  107. err:
  108. up(&file->mutex);
  109. return ret;
  110. }
  111. ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
  112. const char __user *buf,
  113. int in_len, int out_len)
  114. {
  115. struct ib_uverbs_query_device cmd;
  116. struct ib_uverbs_query_device_resp resp;
  117. struct ib_device_attr attr;
  118. int ret;
  119. if (out_len < sizeof resp)
  120. return -ENOSPC;
  121. if (copy_from_user(&cmd, buf, sizeof cmd))
  122. return -EFAULT;
  123. ret = ib_query_device(file->device->ib_dev, &attr);
  124. if (ret)
  125. return ret;
  126. memset(&resp, 0, sizeof resp);
  127. resp.fw_ver = attr.fw_ver;
  128. resp.node_guid = attr.node_guid;
  129. resp.sys_image_guid = attr.sys_image_guid;
  130. resp.max_mr_size = attr.max_mr_size;
  131. resp.page_size_cap = attr.page_size_cap;
  132. resp.vendor_id = attr.vendor_id;
  133. resp.vendor_part_id = attr.vendor_part_id;
  134. resp.hw_ver = attr.hw_ver;
  135. resp.max_qp = attr.max_qp;
  136. resp.max_qp_wr = attr.max_qp_wr;
  137. resp.device_cap_flags = attr.device_cap_flags;
  138. resp.max_sge = attr.max_sge;
  139. resp.max_sge_rd = attr.max_sge_rd;
  140. resp.max_cq = attr.max_cq;
  141. resp.max_cqe = attr.max_cqe;
  142. resp.max_mr = attr.max_mr;
  143. resp.max_pd = attr.max_pd;
  144. resp.max_qp_rd_atom = attr.max_qp_rd_atom;
  145. resp.max_ee_rd_atom = attr.max_ee_rd_atom;
  146. resp.max_res_rd_atom = attr.max_res_rd_atom;
  147. resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
  148. resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
  149. resp.atomic_cap = attr.atomic_cap;
  150. resp.max_ee = attr.max_ee;
  151. resp.max_rdd = attr.max_rdd;
  152. resp.max_mw = attr.max_mw;
  153. resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
  154. resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
  155. resp.max_mcast_grp = attr.max_mcast_grp;
  156. resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
  157. resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
  158. resp.max_ah = attr.max_ah;
  159. resp.max_fmr = attr.max_fmr;
  160. resp.max_map_per_fmr = attr.max_map_per_fmr;
  161. resp.max_srq = attr.max_srq;
  162. resp.max_srq_wr = attr.max_srq_wr;
  163. resp.max_srq_sge = attr.max_srq_sge;
  164. resp.max_pkeys = attr.max_pkeys;
  165. resp.local_ca_ack_delay = attr.local_ca_ack_delay;
  166. resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
  167. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  168. &resp, sizeof resp))
  169. return -EFAULT;
  170. return in_len;
  171. }
  172. ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
  173. const char __user *buf,
  174. int in_len, int out_len)
  175. {
  176. struct ib_uverbs_query_port cmd;
  177. struct ib_uverbs_query_port_resp resp;
  178. struct ib_port_attr attr;
  179. int ret;
  180. if (out_len < sizeof resp)
  181. return -ENOSPC;
  182. if (copy_from_user(&cmd, buf, sizeof cmd))
  183. return -EFAULT;
  184. ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
  185. if (ret)
  186. return ret;
  187. memset(&resp, 0, sizeof resp);
  188. resp.state = attr.state;
  189. resp.max_mtu = attr.max_mtu;
  190. resp.active_mtu = attr.active_mtu;
  191. resp.gid_tbl_len = attr.gid_tbl_len;
  192. resp.port_cap_flags = attr.port_cap_flags;
  193. resp.max_msg_sz = attr.max_msg_sz;
  194. resp.bad_pkey_cntr = attr.bad_pkey_cntr;
  195. resp.qkey_viol_cntr = attr.qkey_viol_cntr;
  196. resp.pkey_tbl_len = attr.pkey_tbl_len;
  197. resp.lid = attr.lid;
  198. resp.sm_lid = attr.sm_lid;
  199. resp.lmc = attr.lmc;
  200. resp.max_vl_num = attr.max_vl_num;
  201. resp.sm_sl = attr.sm_sl;
  202. resp.subnet_timeout = attr.subnet_timeout;
  203. resp.init_type_reply = attr.init_type_reply;
  204. resp.active_width = attr.active_width;
  205. resp.active_speed = attr.active_speed;
  206. resp.phys_state = attr.phys_state;
  207. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  208. &resp, sizeof resp))
  209. return -EFAULT;
  210. return in_len;
  211. }
  212. ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
  213. const char __user *buf,
  214. int in_len, int out_len)
  215. {
  216. struct ib_uverbs_alloc_pd cmd;
  217. struct ib_uverbs_alloc_pd_resp resp;
  218. struct ib_udata udata;
  219. struct ib_uobject *uobj;
  220. struct ib_pd *pd;
  221. int ret;
  222. if (out_len < sizeof resp)
  223. return -ENOSPC;
  224. if (copy_from_user(&cmd, buf, sizeof cmd))
  225. return -EFAULT;
  226. INIT_UDATA(&udata, buf + sizeof cmd,
  227. (unsigned long) cmd.response + sizeof resp,
  228. in_len - sizeof cmd, out_len - sizeof resp);
  229. uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
  230. if (!uobj)
  231. return -ENOMEM;
  232. uobj->context = file->ucontext;
  233. pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
  234. file->ucontext, &udata);
  235. if (IS_ERR(pd)) {
  236. ret = PTR_ERR(pd);
  237. goto err;
  238. }
  239. pd->device = file->device->ib_dev;
  240. pd->uobject = uobj;
  241. atomic_set(&pd->usecnt, 0);
  242. retry:
  243. if (!idr_pre_get(&ib_uverbs_pd_idr, GFP_KERNEL)) {
  244. ret = -ENOMEM;
  245. goto err_pd;
  246. }
  247. down(&ib_uverbs_idr_mutex);
  248. ret = idr_get_new(&ib_uverbs_pd_idr, pd, &uobj->id);
  249. up(&ib_uverbs_idr_mutex);
  250. if (ret == -EAGAIN)
  251. goto retry;
  252. if (ret)
  253. goto err_pd;
  254. down(&file->mutex);
  255. list_add_tail(&uobj->list, &file->ucontext->pd_list);
  256. up(&file->mutex);
  257. memset(&resp, 0, sizeof resp);
  258. resp.pd_handle = uobj->id;
  259. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  260. &resp, sizeof resp)) {
  261. ret = -EFAULT;
  262. goto err_list;
  263. }
  264. return in_len;
  265. err_list:
  266. down(&file->mutex);
  267. list_del(&uobj->list);
  268. up(&file->mutex);
  269. down(&ib_uverbs_idr_mutex);
  270. idr_remove(&ib_uverbs_pd_idr, uobj->id);
  271. up(&ib_uverbs_idr_mutex);
  272. err_pd:
  273. ib_dealloc_pd(pd);
  274. err:
  275. kfree(uobj);
  276. return ret;
  277. }
  278. ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
  279. const char __user *buf,
  280. int in_len, int out_len)
  281. {
  282. struct ib_uverbs_dealloc_pd cmd;
  283. struct ib_pd *pd;
  284. struct ib_uobject *uobj;
  285. int ret = -EINVAL;
  286. if (copy_from_user(&cmd, buf, sizeof cmd))
  287. return -EFAULT;
  288. down(&ib_uverbs_idr_mutex);
  289. pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
  290. if (!pd || pd->uobject->context != file->ucontext)
  291. goto out;
  292. uobj = pd->uobject;
  293. ret = ib_dealloc_pd(pd);
  294. if (ret)
  295. goto out;
  296. idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle);
  297. down(&file->mutex);
  298. list_del(&uobj->list);
  299. up(&file->mutex);
  300. kfree(uobj);
  301. out:
  302. up(&ib_uverbs_idr_mutex);
  303. return ret ? ret : in_len;
  304. }
  305. ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
  306. const char __user *buf, int in_len,
  307. int out_len)
  308. {
  309. struct ib_uverbs_reg_mr cmd;
  310. struct ib_uverbs_reg_mr_resp resp;
  311. struct ib_udata udata;
  312. struct ib_umem_object *obj;
  313. struct ib_pd *pd;
  314. struct ib_mr *mr;
  315. int ret;
  316. if (out_len < sizeof resp)
  317. return -ENOSPC;
  318. if (copy_from_user(&cmd, buf, sizeof cmd))
  319. return -EFAULT;
  320. INIT_UDATA(&udata, buf + sizeof cmd,
  321. (unsigned long) cmd.response + sizeof resp,
  322. in_len - sizeof cmd, out_len - sizeof resp);
  323. if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
  324. return -EINVAL;
  325. obj = kmalloc(sizeof *obj, GFP_KERNEL);
  326. if (!obj)
  327. return -ENOMEM;
  328. obj->uobject.context = file->ucontext;
  329. /*
  330. * We ask for writable memory if any access flags other than
  331. * "remote read" are set. "Local write" and "remote write"
  332. * obviously require write access. "Remote atomic" can do
  333. * things like fetch and add, which will modify memory, and
  334. * "MW bind" can change permissions by binding a window.
  335. */
  336. ret = ib_umem_get(file->device->ib_dev, &obj->umem,
  337. (void *) (unsigned long) cmd.start, cmd.length,
  338. !!(cmd.access_flags & ~IB_ACCESS_REMOTE_READ));
  339. if (ret)
  340. goto err_free;
  341. obj->umem.virt_base = cmd.hca_va;
  342. down(&ib_uverbs_idr_mutex);
  343. pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
  344. if (!pd || pd->uobject->context != file->ucontext) {
  345. ret = -EINVAL;
  346. goto err_up;
  347. }
  348. if (!pd->device->reg_user_mr) {
  349. ret = -ENOSYS;
  350. goto err_up;
  351. }
  352. mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata);
  353. if (IS_ERR(mr)) {
  354. ret = PTR_ERR(mr);
  355. goto err_up;
  356. }
  357. mr->device = pd->device;
  358. mr->pd = pd;
  359. mr->uobject = &obj->uobject;
  360. atomic_inc(&pd->usecnt);
  361. atomic_set(&mr->usecnt, 0);
  362. memset(&resp, 0, sizeof resp);
  363. resp.lkey = mr->lkey;
  364. resp.rkey = mr->rkey;
  365. retry:
  366. if (!idr_pre_get(&ib_uverbs_mr_idr, GFP_KERNEL)) {
  367. ret = -ENOMEM;
  368. goto err_unreg;
  369. }
  370. ret = idr_get_new(&ib_uverbs_mr_idr, mr, &obj->uobject.id);
  371. if (ret == -EAGAIN)
  372. goto retry;
  373. if (ret)
  374. goto err_unreg;
  375. resp.mr_handle = obj->uobject.id;
  376. down(&file->mutex);
  377. list_add_tail(&obj->uobject.list, &file->ucontext->mr_list);
  378. up(&file->mutex);
  379. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  380. &resp, sizeof resp)) {
  381. ret = -EFAULT;
  382. goto err_list;
  383. }
  384. up(&ib_uverbs_idr_mutex);
  385. return in_len;
  386. err_list:
  387. down(&file->mutex);
  388. list_del(&obj->uobject.list);
  389. up(&file->mutex);
  390. err_unreg:
  391. ib_dereg_mr(mr);
  392. err_up:
  393. up(&ib_uverbs_idr_mutex);
  394. ib_umem_release(file->device->ib_dev, &obj->umem);
  395. err_free:
  396. kfree(obj);
  397. return ret;
  398. }
  399. ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
  400. const char __user *buf, int in_len,
  401. int out_len)
  402. {
  403. struct ib_uverbs_dereg_mr cmd;
  404. struct ib_mr *mr;
  405. struct ib_umem_object *memobj;
  406. int ret = -EINVAL;
  407. if (copy_from_user(&cmd, buf, sizeof cmd))
  408. return -EFAULT;
  409. down(&ib_uverbs_idr_mutex);
  410. mr = idr_find(&ib_uverbs_mr_idr, cmd.mr_handle);
  411. if (!mr || mr->uobject->context != file->ucontext)
  412. goto out;
  413. memobj = container_of(mr->uobject, struct ib_umem_object, uobject);
  414. ret = ib_dereg_mr(mr);
  415. if (ret)
  416. goto out;
  417. idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle);
  418. down(&file->mutex);
  419. list_del(&memobj->uobject.list);
  420. up(&file->mutex);
  421. ib_umem_release(file->device->ib_dev, &memobj->umem);
  422. kfree(memobj);
  423. out:
  424. up(&ib_uverbs_idr_mutex);
  425. return ret ? ret : in_len;
  426. }
  427. ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
  428. const char __user *buf, int in_len,
  429. int out_len)
  430. {
  431. struct ib_uverbs_create_comp_channel cmd;
  432. struct ib_uverbs_create_comp_channel_resp resp;
  433. struct file *filp;
  434. if (out_len < sizeof resp)
  435. return -ENOSPC;
  436. if (copy_from_user(&cmd, buf, sizeof cmd))
  437. return -EFAULT;
  438. filp = ib_uverbs_alloc_event_file(file, 0, &resp.fd);
  439. if (IS_ERR(filp))
  440. return PTR_ERR(filp);
  441. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  442. &resp, sizeof resp)) {
  443. put_unused_fd(resp.fd);
  444. fput(filp);
  445. return -EFAULT;
  446. }
  447. fd_install(resp.fd, filp);
  448. return in_len;
  449. }
  450. ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
  451. const char __user *buf, int in_len,
  452. int out_len)
  453. {
  454. struct ib_uverbs_create_cq cmd;
  455. struct ib_uverbs_create_cq_resp resp;
  456. struct ib_udata udata;
  457. struct ib_ucq_object *uobj;
  458. struct ib_uverbs_event_file *ev_file = NULL;
  459. struct ib_cq *cq;
  460. int ret;
  461. if (out_len < sizeof resp)
  462. return -ENOSPC;
  463. if (copy_from_user(&cmd, buf, sizeof cmd))
  464. return -EFAULT;
  465. INIT_UDATA(&udata, buf + sizeof cmd,
  466. (unsigned long) cmd.response + sizeof resp,
  467. in_len - sizeof cmd, out_len - sizeof resp);
  468. if (cmd.comp_vector >= file->device->num_comp_vectors)
  469. return -EINVAL;
  470. if (cmd.comp_channel >= 0)
  471. ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
  472. uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
  473. if (!uobj)
  474. return -ENOMEM;
  475. uobj->uobject.user_handle = cmd.user_handle;
  476. uobj->uobject.context = file->ucontext;
  477. uobj->comp_events_reported = 0;
  478. uobj->async_events_reported = 0;
  479. INIT_LIST_HEAD(&uobj->comp_list);
  480. INIT_LIST_HEAD(&uobj->async_list);
  481. cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
  482. file->ucontext, &udata);
  483. if (IS_ERR(cq)) {
  484. ret = PTR_ERR(cq);
  485. goto err;
  486. }
  487. cq->device = file->device->ib_dev;
  488. cq->uobject = &uobj->uobject;
  489. cq->comp_handler = ib_uverbs_comp_handler;
  490. cq->event_handler = ib_uverbs_cq_event_handler;
  491. cq->cq_context = ev_file;
  492. atomic_set(&cq->usecnt, 0);
  493. retry:
  494. if (!idr_pre_get(&ib_uverbs_cq_idr, GFP_KERNEL)) {
  495. ret = -ENOMEM;
  496. goto err_cq;
  497. }
  498. down(&ib_uverbs_idr_mutex);
  499. ret = idr_get_new(&ib_uverbs_cq_idr, cq, &uobj->uobject.id);
  500. up(&ib_uverbs_idr_mutex);
  501. if (ret == -EAGAIN)
  502. goto retry;
  503. if (ret)
  504. goto err_cq;
  505. down(&file->mutex);
  506. list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list);
  507. up(&file->mutex);
  508. memset(&resp, 0, sizeof resp);
  509. resp.cq_handle = uobj->uobject.id;
  510. resp.cqe = cq->cqe;
  511. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  512. &resp, sizeof resp)) {
  513. ret = -EFAULT;
  514. goto err_list;
  515. }
  516. return in_len;
  517. err_list:
  518. down(&file->mutex);
  519. list_del(&uobj->uobject.list);
  520. up(&file->mutex);
  521. down(&ib_uverbs_idr_mutex);
  522. idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id);
  523. up(&ib_uverbs_idr_mutex);
  524. err_cq:
  525. ib_destroy_cq(cq);
  526. err:
  527. kfree(uobj);
  528. return ret;
  529. }
  530. ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
  531. const char __user *buf, int in_len,
  532. int out_len)
  533. {
  534. struct ib_uverbs_destroy_cq cmd;
  535. struct ib_uverbs_destroy_cq_resp resp;
  536. struct ib_cq *cq;
  537. struct ib_ucq_object *uobj;
  538. struct ib_uverbs_event_file *ev_file;
  539. struct ib_uverbs_event *evt, *tmp;
  540. u64 user_handle;
  541. int ret = -EINVAL;
  542. if (copy_from_user(&cmd, buf, sizeof cmd))
  543. return -EFAULT;
  544. memset(&resp, 0, sizeof resp);
  545. down(&ib_uverbs_idr_mutex);
  546. cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
  547. if (!cq || cq->uobject->context != file->ucontext)
  548. goto out;
  549. user_handle = cq->uobject->user_handle;
  550. uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
  551. ev_file = cq->cq_context;
  552. ret = ib_destroy_cq(cq);
  553. if (ret)
  554. goto out;
  555. idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle);
  556. down(&file->mutex);
  557. list_del(&uobj->uobject.list);
  558. up(&file->mutex);
  559. if (ev_file) {
  560. spin_lock_irq(&ev_file->lock);
  561. list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
  562. list_del(&evt->list);
  563. kfree(evt);
  564. }
  565. spin_unlock_irq(&ev_file->lock);
  566. kref_put(&ev_file->ref, ib_uverbs_release_event_file);
  567. }
  568. spin_lock_irq(&file->async_file->lock);
  569. list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
  570. list_del(&evt->list);
  571. kfree(evt);
  572. }
  573. spin_unlock_irq(&file->async_file->lock);
  574. resp.comp_events_reported = uobj->comp_events_reported;
  575. resp.async_events_reported = uobj->async_events_reported;
  576. kfree(uobj);
  577. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  578. &resp, sizeof resp))
  579. ret = -EFAULT;
  580. out:
  581. up(&ib_uverbs_idr_mutex);
  582. return ret ? ret : in_len;
  583. }
  584. ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
  585. const char __user *buf, int in_len,
  586. int out_len)
  587. {
  588. struct ib_uverbs_create_qp cmd;
  589. struct ib_uverbs_create_qp_resp resp;
  590. struct ib_udata udata;
  591. struct ib_uevent_object *uobj;
  592. struct ib_pd *pd;
  593. struct ib_cq *scq, *rcq;
  594. struct ib_srq *srq;
  595. struct ib_qp *qp;
  596. struct ib_qp_init_attr attr;
  597. int ret;
  598. if (out_len < sizeof resp)
  599. return -ENOSPC;
  600. if (copy_from_user(&cmd, buf, sizeof cmd))
  601. return -EFAULT;
  602. INIT_UDATA(&udata, buf + sizeof cmd,
  603. (unsigned long) cmd.response + sizeof resp,
  604. in_len - sizeof cmd, out_len - sizeof resp);
  605. uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
  606. if (!uobj)
  607. return -ENOMEM;
  608. down(&ib_uverbs_idr_mutex);
  609. pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
  610. scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle);
  611. rcq = idr_find(&ib_uverbs_cq_idr, cmd.recv_cq_handle);
  612. srq = cmd.is_srq ? idr_find(&ib_uverbs_srq_idr, cmd.srq_handle) : NULL;
  613. if (!pd || pd->uobject->context != file->ucontext ||
  614. !scq || scq->uobject->context != file->ucontext ||
  615. !rcq || rcq->uobject->context != file->ucontext ||
  616. (cmd.is_srq && (!srq || srq->uobject->context != file->ucontext))) {
  617. ret = -EINVAL;
  618. goto err_up;
  619. }
  620. attr.event_handler = ib_uverbs_qp_event_handler;
  621. attr.qp_context = file;
  622. attr.send_cq = scq;
  623. attr.recv_cq = rcq;
  624. attr.srq = srq;
  625. attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
  626. attr.qp_type = cmd.qp_type;
  627. attr.cap.max_send_wr = cmd.max_send_wr;
  628. attr.cap.max_recv_wr = cmd.max_recv_wr;
  629. attr.cap.max_send_sge = cmd.max_send_sge;
  630. attr.cap.max_recv_sge = cmd.max_recv_sge;
  631. attr.cap.max_inline_data = cmd.max_inline_data;
  632. uobj->uobject.user_handle = cmd.user_handle;
  633. uobj->uobject.context = file->ucontext;
  634. uobj->events_reported = 0;
  635. INIT_LIST_HEAD(&uobj->event_list);
  636. qp = pd->device->create_qp(pd, &attr, &udata);
  637. if (IS_ERR(qp)) {
  638. ret = PTR_ERR(qp);
  639. goto err_up;
  640. }
  641. qp->device = pd->device;
  642. qp->pd = pd;
  643. qp->send_cq = attr.send_cq;
  644. qp->recv_cq = attr.recv_cq;
  645. qp->srq = attr.srq;
  646. qp->uobject = &uobj->uobject;
  647. qp->event_handler = attr.event_handler;
  648. qp->qp_context = attr.qp_context;
  649. qp->qp_type = attr.qp_type;
  650. atomic_inc(&pd->usecnt);
  651. atomic_inc(&attr.send_cq->usecnt);
  652. atomic_inc(&attr.recv_cq->usecnt);
  653. if (attr.srq)
  654. atomic_inc(&attr.srq->usecnt);
  655. memset(&resp, 0, sizeof resp);
  656. resp.qpn = qp->qp_num;
  657. retry:
  658. if (!idr_pre_get(&ib_uverbs_qp_idr, GFP_KERNEL)) {
  659. ret = -ENOMEM;
  660. goto err_destroy;
  661. }
  662. ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uobject.id);
  663. if (ret == -EAGAIN)
  664. goto retry;
  665. if (ret)
  666. goto err_destroy;
  667. resp.qp_handle = uobj->uobject.id;
  668. down(&file->mutex);
  669. list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list);
  670. up(&file->mutex);
  671. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  672. &resp, sizeof resp)) {
  673. ret = -EFAULT;
  674. goto err_list;
  675. }
  676. up(&ib_uverbs_idr_mutex);
  677. return in_len;
  678. err_list:
  679. down(&file->mutex);
  680. list_del(&uobj->uobject.list);
  681. up(&file->mutex);
  682. err_destroy:
  683. ib_destroy_qp(qp);
  684. err_up:
  685. up(&ib_uverbs_idr_mutex);
  686. kfree(uobj);
  687. return ret;
  688. }
  689. ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
  690. const char __user *buf, int in_len,
  691. int out_len)
  692. {
  693. struct ib_uverbs_modify_qp cmd;
  694. struct ib_qp *qp;
  695. struct ib_qp_attr *attr;
  696. int ret;
  697. if (copy_from_user(&cmd, buf, sizeof cmd))
  698. return -EFAULT;
  699. attr = kmalloc(sizeof *attr, GFP_KERNEL);
  700. if (!attr)
  701. return -ENOMEM;
  702. down(&ib_uverbs_idr_mutex);
  703. qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
  704. if (!qp || qp->uobject->context != file->ucontext) {
  705. ret = -EINVAL;
  706. goto out;
  707. }
  708. attr->qp_state = cmd.qp_state;
  709. attr->cur_qp_state = cmd.cur_qp_state;
  710. attr->path_mtu = cmd.path_mtu;
  711. attr->path_mig_state = cmd.path_mig_state;
  712. attr->qkey = cmd.qkey;
  713. attr->rq_psn = cmd.rq_psn;
  714. attr->sq_psn = cmd.sq_psn;
  715. attr->dest_qp_num = cmd.dest_qp_num;
  716. attr->qp_access_flags = cmd.qp_access_flags;
  717. attr->pkey_index = cmd.pkey_index;
  718. attr->alt_pkey_index = cmd.pkey_index;
  719. attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
  720. attr->max_rd_atomic = cmd.max_rd_atomic;
  721. attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
  722. attr->min_rnr_timer = cmd.min_rnr_timer;
  723. attr->port_num = cmd.port_num;
  724. attr->timeout = cmd.timeout;
  725. attr->retry_cnt = cmd.retry_cnt;
  726. attr->rnr_retry = cmd.rnr_retry;
  727. attr->alt_port_num = cmd.alt_port_num;
  728. attr->alt_timeout = cmd.alt_timeout;
  729. memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
  730. attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
  731. attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
  732. attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
  733. attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
  734. attr->ah_attr.dlid = cmd.dest.dlid;
  735. attr->ah_attr.sl = cmd.dest.sl;
  736. attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
  737. attr->ah_attr.static_rate = cmd.dest.static_rate;
  738. attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
  739. attr->ah_attr.port_num = cmd.dest.port_num;
  740. memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
  741. attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
  742. attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
  743. attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
  744. attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
  745. attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
  746. attr->alt_ah_attr.sl = cmd.alt_dest.sl;
  747. attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
  748. attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
  749. attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
  750. attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
  751. ret = ib_modify_qp(qp, attr, cmd.attr_mask);
  752. if (ret)
  753. goto out;
  754. ret = in_len;
  755. out:
  756. up(&ib_uverbs_idr_mutex);
  757. kfree(attr);
  758. return ret;
  759. }
  760. ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
  761. const char __user *buf, int in_len,
  762. int out_len)
  763. {
  764. struct ib_uverbs_destroy_qp cmd;
  765. struct ib_uverbs_destroy_qp_resp resp;
  766. struct ib_qp *qp;
  767. struct ib_uevent_object *uobj;
  768. struct ib_uverbs_event *evt, *tmp;
  769. int ret = -EINVAL;
  770. if (copy_from_user(&cmd, buf, sizeof cmd))
  771. return -EFAULT;
  772. memset(&resp, 0, sizeof resp);
  773. down(&ib_uverbs_idr_mutex);
  774. qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
  775. if (!qp || qp->uobject->context != file->ucontext)
  776. goto out;
  777. uobj = container_of(qp->uobject, struct ib_uevent_object, uobject);
  778. ret = ib_destroy_qp(qp);
  779. if (ret)
  780. goto out;
  781. idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle);
  782. down(&file->mutex);
  783. list_del(&uobj->uobject.list);
  784. up(&file->mutex);
  785. spin_lock_irq(&file->async_file->lock);
  786. list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
  787. list_del(&evt->list);
  788. kfree(evt);
  789. }
  790. spin_unlock_irq(&file->async_file->lock);
  791. resp.events_reported = uobj->events_reported;
  792. kfree(uobj);
  793. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  794. &resp, sizeof resp))
  795. ret = -EFAULT;
  796. out:
  797. up(&ib_uverbs_idr_mutex);
  798. return ret ? ret : in_len;
  799. }
  800. ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
  801. const char __user *buf, int in_len,
  802. int out_len)
  803. {
  804. struct ib_uverbs_attach_mcast cmd;
  805. struct ib_qp *qp;
  806. int ret = -EINVAL;
  807. if (copy_from_user(&cmd, buf, sizeof cmd))
  808. return -EFAULT;
  809. down(&ib_uverbs_idr_mutex);
  810. qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
  811. if (qp && qp->uobject->context == file->ucontext)
  812. ret = ib_attach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
  813. up(&ib_uverbs_idr_mutex);
  814. return ret ? ret : in_len;
  815. }
  816. ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
  817. const char __user *buf, int in_len,
  818. int out_len)
  819. {
  820. struct ib_uverbs_detach_mcast cmd;
  821. struct ib_qp *qp;
  822. int ret = -EINVAL;
  823. if (copy_from_user(&cmd, buf, sizeof cmd))
  824. return -EFAULT;
  825. down(&ib_uverbs_idr_mutex);
  826. qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
  827. if (qp && qp->uobject->context == file->ucontext)
  828. ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
  829. up(&ib_uverbs_idr_mutex);
  830. return ret ? ret : in_len;
  831. }
  832. ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
  833. const char __user *buf, int in_len,
  834. int out_len)
  835. {
  836. struct ib_uverbs_create_srq cmd;
  837. struct ib_uverbs_create_srq_resp resp;
  838. struct ib_udata udata;
  839. struct ib_uevent_object *uobj;
  840. struct ib_pd *pd;
  841. struct ib_srq *srq;
  842. struct ib_srq_init_attr attr;
  843. int ret;
  844. if (out_len < sizeof resp)
  845. return -ENOSPC;
  846. if (copy_from_user(&cmd, buf, sizeof cmd))
  847. return -EFAULT;
  848. INIT_UDATA(&udata, buf + sizeof cmd,
  849. (unsigned long) cmd.response + sizeof resp,
  850. in_len - sizeof cmd, out_len - sizeof resp);
  851. uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
  852. if (!uobj)
  853. return -ENOMEM;
  854. down(&ib_uverbs_idr_mutex);
  855. pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
  856. if (!pd || pd->uobject->context != file->ucontext) {
  857. ret = -EINVAL;
  858. goto err_up;
  859. }
  860. attr.event_handler = ib_uverbs_srq_event_handler;
  861. attr.srq_context = file;
  862. attr.attr.max_wr = cmd.max_wr;
  863. attr.attr.max_sge = cmd.max_sge;
  864. attr.attr.srq_limit = cmd.srq_limit;
  865. uobj->uobject.user_handle = cmd.user_handle;
  866. uobj->uobject.context = file->ucontext;
  867. uobj->events_reported = 0;
  868. INIT_LIST_HEAD(&uobj->event_list);
  869. srq = pd->device->create_srq(pd, &attr, &udata);
  870. if (IS_ERR(srq)) {
  871. ret = PTR_ERR(srq);
  872. goto err_up;
  873. }
  874. srq->device = pd->device;
  875. srq->pd = pd;
  876. srq->uobject = &uobj->uobject;
  877. srq->event_handler = attr.event_handler;
  878. srq->srq_context = attr.srq_context;
  879. atomic_inc(&pd->usecnt);
  880. atomic_set(&srq->usecnt, 0);
  881. memset(&resp, 0, sizeof resp);
  882. retry:
  883. if (!idr_pre_get(&ib_uverbs_srq_idr, GFP_KERNEL)) {
  884. ret = -ENOMEM;
  885. goto err_destroy;
  886. }
  887. ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->uobject.id);
  888. if (ret == -EAGAIN)
  889. goto retry;
  890. if (ret)
  891. goto err_destroy;
  892. resp.srq_handle = uobj->uobject.id;
  893. down(&file->mutex);
  894. list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list);
  895. up(&file->mutex);
  896. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  897. &resp, sizeof resp)) {
  898. ret = -EFAULT;
  899. goto err_list;
  900. }
  901. up(&ib_uverbs_idr_mutex);
  902. return in_len;
  903. err_list:
  904. down(&file->mutex);
  905. list_del(&uobj->uobject.list);
  906. up(&file->mutex);
  907. err_destroy:
  908. ib_destroy_srq(srq);
  909. err_up:
  910. up(&ib_uverbs_idr_mutex);
  911. kfree(uobj);
  912. return ret;
  913. }
  914. ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
  915. const char __user *buf, int in_len,
  916. int out_len)
  917. {
  918. struct ib_uverbs_modify_srq cmd;
  919. struct ib_srq *srq;
  920. struct ib_srq_attr attr;
  921. int ret;
  922. if (copy_from_user(&cmd, buf, sizeof cmd))
  923. return -EFAULT;
  924. down(&ib_uverbs_idr_mutex);
  925. srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
  926. if (!srq || srq->uobject->context != file->ucontext) {
  927. ret = -EINVAL;
  928. goto out;
  929. }
  930. attr.max_wr = cmd.max_wr;
  931. attr.max_sge = cmd.max_sge;
  932. attr.srq_limit = cmd.srq_limit;
  933. ret = ib_modify_srq(srq, &attr, cmd.attr_mask);
  934. out:
  935. up(&ib_uverbs_idr_mutex);
  936. return ret ? ret : in_len;
  937. }
  938. ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
  939. const char __user *buf, int in_len,
  940. int out_len)
  941. {
  942. struct ib_uverbs_destroy_srq cmd;
  943. struct ib_uverbs_destroy_srq_resp resp;
  944. struct ib_srq *srq;
  945. struct ib_uevent_object *uobj;
  946. struct ib_uverbs_event *evt, *tmp;
  947. int ret = -EINVAL;
  948. if (copy_from_user(&cmd, buf, sizeof cmd))
  949. return -EFAULT;
  950. down(&ib_uverbs_idr_mutex);
  951. memset(&resp, 0, sizeof resp);
  952. srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
  953. if (!srq || srq->uobject->context != file->ucontext)
  954. goto out;
  955. uobj = container_of(srq->uobject, struct ib_uevent_object, uobject);
  956. ret = ib_destroy_srq(srq);
  957. if (ret)
  958. goto out;
  959. idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle);
  960. down(&file->mutex);
  961. list_del(&uobj->uobject.list);
  962. up(&file->mutex);
  963. spin_lock_irq(&file->async_file->lock);
  964. list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
  965. list_del(&evt->list);
  966. kfree(evt);
  967. }
  968. spin_unlock_irq(&file->async_file->lock);
  969. resp.events_reported = uobj->events_reported;
  970. kfree(uobj);
  971. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  972. &resp, sizeof resp))
  973. ret = -EFAULT;
  974. out:
  975. up(&ib_uverbs_idr_mutex);
  976. return ret ? ret : in_len;
  977. }