uverbs_cmd.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232
  1. /*
  2. * Copyright (c) 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Cisco Systems. All rights reserved.
  4. * Copyright (c) 2005 PathScale, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. *
  34. * $Id: uverbs_cmd.c 2708 2005-06-24 17:27:21Z roland $
  35. */
  36. #include <linux/file.h>
  37. #include <asm/uaccess.h>
  38. #include "uverbs.h"
  39. #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
  40. do { \
  41. (udata)->inbuf = (void __user *) (ibuf); \
  42. (udata)->outbuf = (void __user *) (obuf); \
  43. (udata)->inlen = (ilen); \
  44. (udata)->outlen = (olen); \
  45. } while (0)
  46. ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
  47. const char __user *buf,
  48. int in_len, int out_len)
  49. {
  50. struct ib_uverbs_get_context cmd;
  51. struct ib_uverbs_get_context_resp resp;
  52. struct ib_udata udata;
  53. struct ib_device *ibdev = file->device->ib_dev;
  54. struct ib_ucontext *ucontext;
  55. struct file *filp;
  56. int ret;
  57. if (out_len < sizeof resp)
  58. return -ENOSPC;
  59. if (copy_from_user(&cmd, buf, sizeof cmd))
  60. return -EFAULT;
  61. down(&file->mutex);
  62. if (file->ucontext) {
  63. ret = -EINVAL;
  64. goto err;
  65. }
  66. INIT_UDATA(&udata, buf + sizeof cmd,
  67. (unsigned long) cmd.response + sizeof resp,
  68. in_len - sizeof cmd, out_len - sizeof resp);
  69. ucontext = ibdev->alloc_ucontext(ibdev, &udata);
  70. if (IS_ERR(ucontext))
  71. return PTR_ERR(file->ucontext);
  72. ucontext->device = ibdev;
  73. INIT_LIST_HEAD(&ucontext->pd_list);
  74. INIT_LIST_HEAD(&ucontext->mr_list);
  75. INIT_LIST_HEAD(&ucontext->mw_list);
  76. INIT_LIST_HEAD(&ucontext->cq_list);
  77. INIT_LIST_HEAD(&ucontext->qp_list);
  78. INIT_LIST_HEAD(&ucontext->srq_list);
  79. INIT_LIST_HEAD(&ucontext->ah_list);
  80. resp.num_comp_vectors = file->device->num_comp_vectors;
  81. filp = ib_uverbs_alloc_event_file(file, 1, &resp.async_fd);
  82. if (IS_ERR(filp)) {
  83. ret = PTR_ERR(filp);
  84. goto err_free;
  85. }
  86. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  87. &resp, sizeof resp)) {
  88. ret = -EFAULT;
  89. goto err_file;
  90. }
  91. file->async_file = filp->private_data;
  92. INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
  93. ib_uverbs_event_handler);
  94. ret = ib_register_event_handler(&file->event_handler);
  95. if (ret)
  96. goto err_file;
  97. kref_get(&file->async_file->ref);
  98. kref_get(&file->ref);
  99. file->ucontext = ucontext;
  100. fd_install(resp.async_fd, filp);
  101. up(&file->mutex);
  102. return in_len;
  103. err_file:
  104. put_unused_fd(resp.async_fd);
  105. fput(filp);
  106. err_free:
  107. ibdev->dealloc_ucontext(ucontext);
  108. err:
  109. up(&file->mutex);
  110. return ret;
  111. }
  112. ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
  113. const char __user *buf,
  114. int in_len, int out_len)
  115. {
  116. struct ib_uverbs_query_device cmd;
  117. struct ib_uverbs_query_device_resp resp;
  118. struct ib_device_attr attr;
  119. int ret;
  120. if (out_len < sizeof resp)
  121. return -ENOSPC;
  122. if (copy_from_user(&cmd, buf, sizeof cmd))
  123. return -EFAULT;
  124. ret = ib_query_device(file->device->ib_dev, &attr);
  125. if (ret)
  126. return ret;
  127. memset(&resp, 0, sizeof resp);
  128. resp.fw_ver = attr.fw_ver;
  129. resp.node_guid = attr.node_guid;
  130. resp.sys_image_guid = attr.sys_image_guid;
  131. resp.max_mr_size = attr.max_mr_size;
  132. resp.page_size_cap = attr.page_size_cap;
  133. resp.vendor_id = attr.vendor_id;
  134. resp.vendor_part_id = attr.vendor_part_id;
  135. resp.hw_ver = attr.hw_ver;
  136. resp.max_qp = attr.max_qp;
  137. resp.max_qp_wr = attr.max_qp_wr;
  138. resp.device_cap_flags = attr.device_cap_flags;
  139. resp.max_sge = attr.max_sge;
  140. resp.max_sge_rd = attr.max_sge_rd;
  141. resp.max_cq = attr.max_cq;
  142. resp.max_cqe = attr.max_cqe;
  143. resp.max_mr = attr.max_mr;
  144. resp.max_pd = attr.max_pd;
  145. resp.max_qp_rd_atom = attr.max_qp_rd_atom;
  146. resp.max_ee_rd_atom = attr.max_ee_rd_atom;
  147. resp.max_res_rd_atom = attr.max_res_rd_atom;
  148. resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
  149. resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
  150. resp.atomic_cap = attr.atomic_cap;
  151. resp.max_ee = attr.max_ee;
  152. resp.max_rdd = attr.max_rdd;
  153. resp.max_mw = attr.max_mw;
  154. resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
  155. resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
  156. resp.max_mcast_grp = attr.max_mcast_grp;
  157. resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
  158. resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
  159. resp.max_ah = attr.max_ah;
  160. resp.max_fmr = attr.max_fmr;
  161. resp.max_map_per_fmr = attr.max_map_per_fmr;
  162. resp.max_srq = attr.max_srq;
  163. resp.max_srq_wr = attr.max_srq_wr;
  164. resp.max_srq_sge = attr.max_srq_sge;
  165. resp.max_pkeys = attr.max_pkeys;
  166. resp.local_ca_ack_delay = attr.local_ca_ack_delay;
  167. resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
  168. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  169. &resp, sizeof resp))
  170. return -EFAULT;
  171. return in_len;
  172. }
  173. ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
  174. const char __user *buf,
  175. int in_len, int out_len)
  176. {
  177. struct ib_uverbs_query_port cmd;
  178. struct ib_uverbs_query_port_resp resp;
  179. struct ib_port_attr attr;
  180. int ret;
  181. if (out_len < sizeof resp)
  182. return -ENOSPC;
  183. if (copy_from_user(&cmd, buf, sizeof cmd))
  184. return -EFAULT;
  185. ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
  186. if (ret)
  187. return ret;
  188. memset(&resp, 0, sizeof resp);
  189. resp.state = attr.state;
  190. resp.max_mtu = attr.max_mtu;
  191. resp.active_mtu = attr.active_mtu;
  192. resp.gid_tbl_len = attr.gid_tbl_len;
  193. resp.port_cap_flags = attr.port_cap_flags;
  194. resp.max_msg_sz = attr.max_msg_sz;
  195. resp.bad_pkey_cntr = attr.bad_pkey_cntr;
  196. resp.qkey_viol_cntr = attr.qkey_viol_cntr;
  197. resp.pkey_tbl_len = attr.pkey_tbl_len;
  198. resp.lid = attr.lid;
  199. resp.sm_lid = attr.sm_lid;
  200. resp.lmc = attr.lmc;
  201. resp.max_vl_num = attr.max_vl_num;
  202. resp.sm_sl = attr.sm_sl;
  203. resp.subnet_timeout = attr.subnet_timeout;
  204. resp.init_type_reply = attr.init_type_reply;
  205. resp.active_width = attr.active_width;
  206. resp.active_speed = attr.active_speed;
  207. resp.phys_state = attr.phys_state;
  208. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  209. &resp, sizeof resp))
  210. return -EFAULT;
  211. return in_len;
  212. }
  213. ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
  214. const char __user *buf,
  215. int in_len, int out_len)
  216. {
  217. struct ib_uverbs_alloc_pd cmd;
  218. struct ib_uverbs_alloc_pd_resp resp;
  219. struct ib_udata udata;
  220. struct ib_uobject *uobj;
  221. struct ib_pd *pd;
  222. int ret;
  223. if (out_len < sizeof resp)
  224. return -ENOSPC;
  225. if (copy_from_user(&cmd, buf, sizeof cmd))
  226. return -EFAULT;
  227. INIT_UDATA(&udata, buf + sizeof cmd,
  228. (unsigned long) cmd.response + sizeof resp,
  229. in_len - sizeof cmd, out_len - sizeof resp);
  230. uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
  231. if (!uobj)
  232. return -ENOMEM;
  233. uobj->context = file->ucontext;
  234. pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
  235. file->ucontext, &udata);
  236. if (IS_ERR(pd)) {
  237. ret = PTR_ERR(pd);
  238. goto err;
  239. }
  240. pd->device = file->device->ib_dev;
  241. pd->uobject = uobj;
  242. atomic_set(&pd->usecnt, 0);
  243. down(&ib_uverbs_idr_mutex);
  244. retry:
  245. if (!idr_pre_get(&ib_uverbs_pd_idr, GFP_KERNEL)) {
  246. ret = -ENOMEM;
  247. goto err_up;
  248. }
  249. ret = idr_get_new(&ib_uverbs_pd_idr, pd, &uobj->id);
  250. if (ret == -EAGAIN)
  251. goto retry;
  252. if (ret)
  253. goto err_up;
  254. memset(&resp, 0, sizeof resp);
  255. resp.pd_handle = uobj->id;
  256. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  257. &resp, sizeof resp)) {
  258. ret = -EFAULT;
  259. goto err_idr;
  260. }
  261. down(&file->mutex);
  262. list_add_tail(&uobj->list, &file->ucontext->pd_list);
  263. up(&file->mutex);
  264. up(&ib_uverbs_idr_mutex);
  265. return in_len;
  266. err_idr:
  267. idr_remove(&ib_uverbs_pd_idr, uobj->id);
  268. err_up:
  269. up(&ib_uverbs_idr_mutex);
  270. ib_dealloc_pd(pd);
  271. err:
  272. kfree(uobj);
  273. return ret;
  274. }
  275. ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
  276. const char __user *buf,
  277. int in_len, int out_len)
  278. {
  279. struct ib_uverbs_dealloc_pd cmd;
  280. struct ib_pd *pd;
  281. struct ib_uobject *uobj;
  282. int ret = -EINVAL;
  283. if (copy_from_user(&cmd, buf, sizeof cmd))
  284. return -EFAULT;
  285. down(&ib_uverbs_idr_mutex);
  286. pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
  287. if (!pd || pd->uobject->context != file->ucontext)
  288. goto out;
  289. uobj = pd->uobject;
  290. ret = ib_dealloc_pd(pd);
  291. if (ret)
  292. goto out;
  293. idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle);
  294. down(&file->mutex);
  295. list_del(&uobj->list);
  296. up(&file->mutex);
  297. kfree(uobj);
  298. out:
  299. up(&ib_uverbs_idr_mutex);
  300. return ret ? ret : in_len;
  301. }
  302. ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
  303. const char __user *buf, int in_len,
  304. int out_len)
  305. {
  306. struct ib_uverbs_reg_mr cmd;
  307. struct ib_uverbs_reg_mr_resp resp;
  308. struct ib_udata udata;
  309. struct ib_umem_object *obj;
  310. struct ib_pd *pd;
  311. struct ib_mr *mr;
  312. int ret;
  313. if (out_len < sizeof resp)
  314. return -ENOSPC;
  315. if (copy_from_user(&cmd, buf, sizeof cmd))
  316. return -EFAULT;
  317. INIT_UDATA(&udata, buf + sizeof cmd,
  318. (unsigned long) cmd.response + sizeof resp,
  319. in_len - sizeof cmd, out_len - sizeof resp);
  320. if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
  321. return -EINVAL;
  322. obj = kmalloc(sizeof *obj, GFP_KERNEL);
  323. if (!obj)
  324. return -ENOMEM;
  325. obj->uobject.context = file->ucontext;
  326. /*
  327. * We ask for writable memory if any access flags other than
  328. * "remote read" are set. "Local write" and "remote write"
  329. * obviously require write access. "Remote atomic" can do
  330. * things like fetch and add, which will modify memory, and
  331. * "MW bind" can change permissions by binding a window.
  332. */
  333. ret = ib_umem_get(file->device->ib_dev, &obj->umem,
  334. (void *) (unsigned long) cmd.start, cmd.length,
  335. !!(cmd.access_flags & ~IB_ACCESS_REMOTE_READ));
  336. if (ret)
  337. goto err_free;
  338. obj->umem.virt_base = cmd.hca_va;
  339. down(&ib_uverbs_idr_mutex);
  340. pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
  341. if (!pd || pd->uobject->context != file->ucontext) {
  342. ret = -EINVAL;
  343. goto err_up;
  344. }
  345. if (!pd->device->reg_user_mr) {
  346. ret = -ENOSYS;
  347. goto err_up;
  348. }
  349. mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata);
  350. if (IS_ERR(mr)) {
  351. ret = PTR_ERR(mr);
  352. goto err_up;
  353. }
  354. mr->device = pd->device;
  355. mr->pd = pd;
  356. mr->uobject = &obj->uobject;
  357. atomic_inc(&pd->usecnt);
  358. atomic_set(&mr->usecnt, 0);
  359. memset(&resp, 0, sizeof resp);
  360. resp.lkey = mr->lkey;
  361. resp.rkey = mr->rkey;
  362. retry:
  363. if (!idr_pre_get(&ib_uverbs_mr_idr, GFP_KERNEL)) {
  364. ret = -ENOMEM;
  365. goto err_unreg;
  366. }
  367. ret = idr_get_new(&ib_uverbs_mr_idr, mr, &obj->uobject.id);
  368. if (ret == -EAGAIN)
  369. goto retry;
  370. if (ret)
  371. goto err_unreg;
  372. resp.mr_handle = obj->uobject.id;
  373. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  374. &resp, sizeof resp)) {
  375. ret = -EFAULT;
  376. goto err_idr;
  377. }
  378. down(&file->mutex);
  379. list_add_tail(&obj->uobject.list, &file->ucontext->mr_list);
  380. up(&file->mutex);
  381. up(&ib_uverbs_idr_mutex);
  382. return in_len;
  383. err_idr:
  384. idr_remove(&ib_uverbs_mr_idr, obj->uobject.id);
  385. err_unreg:
  386. ib_dereg_mr(mr);
  387. err_up:
  388. up(&ib_uverbs_idr_mutex);
  389. ib_umem_release(file->device->ib_dev, &obj->umem);
  390. err_free:
  391. kfree(obj);
  392. return ret;
  393. }
  394. ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
  395. const char __user *buf, int in_len,
  396. int out_len)
  397. {
  398. struct ib_uverbs_dereg_mr cmd;
  399. struct ib_mr *mr;
  400. struct ib_umem_object *memobj;
  401. int ret = -EINVAL;
  402. if (copy_from_user(&cmd, buf, sizeof cmd))
  403. return -EFAULT;
  404. down(&ib_uverbs_idr_mutex);
  405. mr = idr_find(&ib_uverbs_mr_idr, cmd.mr_handle);
  406. if (!mr || mr->uobject->context != file->ucontext)
  407. goto out;
  408. memobj = container_of(mr->uobject, struct ib_umem_object, uobject);
  409. ret = ib_dereg_mr(mr);
  410. if (ret)
  411. goto out;
  412. idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle);
  413. down(&file->mutex);
  414. list_del(&memobj->uobject.list);
  415. up(&file->mutex);
  416. ib_umem_release(file->device->ib_dev, &memobj->umem);
  417. kfree(memobj);
  418. out:
  419. up(&ib_uverbs_idr_mutex);
  420. return ret ? ret : in_len;
  421. }
  422. ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
  423. const char __user *buf, int in_len,
  424. int out_len)
  425. {
  426. struct ib_uverbs_create_comp_channel cmd;
  427. struct ib_uverbs_create_comp_channel_resp resp;
  428. struct file *filp;
  429. if (out_len < sizeof resp)
  430. return -ENOSPC;
  431. if (copy_from_user(&cmd, buf, sizeof cmd))
  432. return -EFAULT;
  433. filp = ib_uverbs_alloc_event_file(file, 0, &resp.fd);
  434. if (IS_ERR(filp))
  435. return PTR_ERR(filp);
  436. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  437. &resp, sizeof resp)) {
  438. put_unused_fd(resp.fd);
  439. fput(filp);
  440. return -EFAULT;
  441. }
  442. fd_install(resp.fd, filp);
  443. return in_len;
  444. }
  445. ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
  446. const char __user *buf, int in_len,
  447. int out_len)
  448. {
  449. struct ib_uverbs_create_cq cmd;
  450. struct ib_uverbs_create_cq_resp resp;
  451. struct ib_udata udata;
  452. struct ib_ucq_object *uobj;
  453. struct ib_uverbs_event_file *ev_file = NULL;
  454. struct ib_cq *cq;
  455. int ret;
  456. if (out_len < sizeof resp)
  457. return -ENOSPC;
  458. if (copy_from_user(&cmd, buf, sizeof cmd))
  459. return -EFAULT;
  460. INIT_UDATA(&udata, buf + sizeof cmd,
  461. (unsigned long) cmd.response + sizeof resp,
  462. in_len - sizeof cmd, out_len - sizeof resp);
  463. if (cmd.comp_vector >= file->device->num_comp_vectors)
  464. return -EINVAL;
  465. if (cmd.comp_channel >= 0)
  466. ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
  467. uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
  468. if (!uobj)
  469. return -ENOMEM;
  470. uobj->uobject.user_handle = cmd.user_handle;
  471. uobj->uobject.context = file->ucontext;
  472. uobj->comp_events_reported = 0;
  473. uobj->async_events_reported = 0;
  474. INIT_LIST_HEAD(&uobj->comp_list);
  475. INIT_LIST_HEAD(&uobj->async_list);
  476. cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
  477. file->ucontext, &udata);
  478. if (IS_ERR(cq)) {
  479. ret = PTR_ERR(cq);
  480. goto err;
  481. }
  482. cq->device = file->device->ib_dev;
  483. cq->uobject = &uobj->uobject;
  484. cq->comp_handler = ib_uverbs_comp_handler;
  485. cq->event_handler = ib_uverbs_cq_event_handler;
  486. cq->cq_context = ev_file;
  487. atomic_set(&cq->usecnt, 0);
  488. down(&ib_uverbs_idr_mutex);
  489. retry:
  490. if (!idr_pre_get(&ib_uverbs_cq_idr, GFP_KERNEL)) {
  491. ret = -ENOMEM;
  492. goto err_up;
  493. }
  494. ret = idr_get_new(&ib_uverbs_cq_idr, cq, &uobj->uobject.id);
  495. if (ret == -EAGAIN)
  496. goto retry;
  497. if (ret)
  498. goto err_up;
  499. memset(&resp, 0, sizeof resp);
  500. resp.cq_handle = uobj->uobject.id;
  501. resp.cqe = cq->cqe;
  502. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  503. &resp, sizeof resp)) {
  504. ret = -EFAULT;
  505. goto err_idr;
  506. }
  507. down(&file->mutex);
  508. list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list);
  509. up(&file->mutex);
  510. up(&ib_uverbs_idr_mutex);
  511. return in_len;
  512. err_idr:
  513. idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id);
  514. err_up:
  515. up(&ib_uverbs_idr_mutex);
  516. ib_destroy_cq(cq);
  517. err:
  518. kfree(uobj);
  519. return ret;
  520. }
  521. ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
  522. const char __user *buf, int in_len,
  523. int out_len)
  524. {
  525. struct ib_uverbs_destroy_cq cmd;
  526. struct ib_uverbs_destroy_cq_resp resp;
  527. struct ib_cq *cq;
  528. struct ib_ucq_object *uobj;
  529. struct ib_uverbs_event_file *ev_file;
  530. struct ib_uverbs_event *evt, *tmp;
  531. u64 user_handle;
  532. int ret = -EINVAL;
  533. if (copy_from_user(&cmd, buf, sizeof cmd))
  534. return -EFAULT;
  535. memset(&resp, 0, sizeof resp);
  536. down(&ib_uverbs_idr_mutex);
  537. cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
  538. if (!cq || cq->uobject->context != file->ucontext)
  539. goto out;
  540. user_handle = cq->uobject->user_handle;
  541. uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
  542. ev_file = cq->cq_context;
  543. ret = ib_destroy_cq(cq);
  544. if (ret)
  545. goto out;
  546. idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle);
  547. down(&file->mutex);
  548. list_del(&uobj->uobject.list);
  549. up(&file->mutex);
  550. if (ev_file) {
  551. spin_lock_irq(&ev_file->lock);
  552. list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
  553. list_del(&evt->list);
  554. kfree(evt);
  555. }
  556. spin_unlock_irq(&ev_file->lock);
  557. kref_put(&ev_file->ref, ib_uverbs_release_event_file);
  558. }
  559. spin_lock_irq(&file->async_file->lock);
  560. list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
  561. list_del(&evt->list);
  562. kfree(evt);
  563. }
  564. spin_unlock_irq(&file->async_file->lock);
  565. resp.comp_events_reported = uobj->comp_events_reported;
  566. resp.async_events_reported = uobj->async_events_reported;
  567. kfree(uobj);
  568. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  569. &resp, sizeof resp))
  570. ret = -EFAULT;
  571. out:
  572. up(&ib_uverbs_idr_mutex);
  573. return ret ? ret : in_len;
  574. }
  575. ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
  576. const char __user *buf, int in_len,
  577. int out_len)
  578. {
  579. struct ib_uverbs_create_qp cmd;
  580. struct ib_uverbs_create_qp_resp resp;
  581. struct ib_udata udata;
  582. struct ib_uevent_object *uobj;
  583. struct ib_pd *pd;
  584. struct ib_cq *scq, *rcq;
  585. struct ib_srq *srq;
  586. struct ib_qp *qp;
  587. struct ib_qp_init_attr attr;
  588. int ret;
  589. if (out_len < sizeof resp)
  590. return -ENOSPC;
  591. if (copy_from_user(&cmd, buf, sizeof cmd))
  592. return -EFAULT;
  593. INIT_UDATA(&udata, buf + sizeof cmd,
  594. (unsigned long) cmd.response + sizeof resp,
  595. in_len - sizeof cmd, out_len - sizeof resp);
  596. uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
  597. if (!uobj)
  598. return -ENOMEM;
  599. down(&ib_uverbs_idr_mutex);
  600. pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
  601. scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle);
  602. rcq = idr_find(&ib_uverbs_cq_idr, cmd.recv_cq_handle);
  603. srq = cmd.is_srq ? idr_find(&ib_uverbs_srq_idr, cmd.srq_handle) : NULL;
  604. if (!pd || pd->uobject->context != file->ucontext ||
  605. !scq || scq->uobject->context != file->ucontext ||
  606. !rcq || rcq->uobject->context != file->ucontext ||
  607. (cmd.is_srq && (!srq || srq->uobject->context != file->ucontext))) {
  608. ret = -EINVAL;
  609. goto err_up;
  610. }
  611. attr.event_handler = ib_uverbs_qp_event_handler;
  612. attr.qp_context = file;
  613. attr.send_cq = scq;
  614. attr.recv_cq = rcq;
  615. attr.srq = srq;
  616. attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
  617. attr.qp_type = cmd.qp_type;
  618. attr.cap.max_send_wr = cmd.max_send_wr;
  619. attr.cap.max_recv_wr = cmd.max_recv_wr;
  620. attr.cap.max_send_sge = cmd.max_send_sge;
  621. attr.cap.max_recv_sge = cmd.max_recv_sge;
  622. attr.cap.max_inline_data = cmd.max_inline_data;
  623. uobj->uobject.user_handle = cmd.user_handle;
  624. uobj->uobject.context = file->ucontext;
  625. uobj->events_reported = 0;
  626. INIT_LIST_HEAD(&uobj->event_list);
  627. qp = pd->device->create_qp(pd, &attr, &udata);
  628. if (IS_ERR(qp)) {
  629. ret = PTR_ERR(qp);
  630. goto err_up;
  631. }
  632. qp->device = pd->device;
  633. qp->pd = pd;
  634. qp->send_cq = attr.send_cq;
  635. qp->recv_cq = attr.recv_cq;
  636. qp->srq = attr.srq;
  637. qp->uobject = &uobj->uobject;
  638. qp->event_handler = attr.event_handler;
  639. qp->qp_context = attr.qp_context;
  640. qp->qp_type = attr.qp_type;
  641. atomic_inc(&pd->usecnt);
  642. atomic_inc(&attr.send_cq->usecnt);
  643. atomic_inc(&attr.recv_cq->usecnt);
  644. if (attr.srq)
  645. atomic_inc(&attr.srq->usecnt);
  646. memset(&resp, 0, sizeof resp);
  647. resp.qpn = qp->qp_num;
  648. retry:
  649. if (!idr_pre_get(&ib_uverbs_qp_idr, GFP_KERNEL)) {
  650. ret = -ENOMEM;
  651. goto err_destroy;
  652. }
  653. ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uobject.id);
  654. if (ret == -EAGAIN)
  655. goto retry;
  656. if (ret)
  657. goto err_destroy;
  658. resp.qp_handle = uobj->uobject.id;
  659. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  660. &resp, sizeof resp)) {
  661. ret = -EFAULT;
  662. goto err_idr;
  663. }
  664. down(&file->mutex);
  665. list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list);
  666. up(&file->mutex);
  667. up(&ib_uverbs_idr_mutex);
  668. return in_len;
  669. err_idr:
  670. idr_remove(&ib_uverbs_qp_idr, uobj->uobject.id);
  671. err_destroy:
  672. ib_destroy_qp(qp);
  673. err_up:
  674. up(&ib_uverbs_idr_mutex);
  675. kfree(uobj);
  676. return ret;
  677. }
  678. ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
  679. const char __user *buf, int in_len,
  680. int out_len)
  681. {
  682. struct ib_uverbs_modify_qp cmd;
  683. struct ib_qp *qp;
  684. struct ib_qp_attr *attr;
  685. int ret;
  686. if (copy_from_user(&cmd, buf, sizeof cmd))
  687. return -EFAULT;
  688. attr = kmalloc(sizeof *attr, GFP_KERNEL);
  689. if (!attr)
  690. return -ENOMEM;
  691. down(&ib_uverbs_idr_mutex);
  692. qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
  693. if (!qp || qp->uobject->context != file->ucontext) {
  694. ret = -EINVAL;
  695. goto out;
  696. }
  697. attr->qp_state = cmd.qp_state;
  698. attr->cur_qp_state = cmd.cur_qp_state;
  699. attr->path_mtu = cmd.path_mtu;
  700. attr->path_mig_state = cmd.path_mig_state;
  701. attr->qkey = cmd.qkey;
  702. attr->rq_psn = cmd.rq_psn;
  703. attr->sq_psn = cmd.sq_psn;
  704. attr->dest_qp_num = cmd.dest_qp_num;
  705. attr->qp_access_flags = cmd.qp_access_flags;
  706. attr->pkey_index = cmd.pkey_index;
  707. attr->alt_pkey_index = cmd.pkey_index;
  708. attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
  709. attr->max_rd_atomic = cmd.max_rd_atomic;
  710. attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
  711. attr->min_rnr_timer = cmd.min_rnr_timer;
  712. attr->port_num = cmd.port_num;
  713. attr->timeout = cmd.timeout;
  714. attr->retry_cnt = cmd.retry_cnt;
  715. attr->rnr_retry = cmd.rnr_retry;
  716. attr->alt_port_num = cmd.alt_port_num;
  717. attr->alt_timeout = cmd.alt_timeout;
  718. memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
  719. attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
  720. attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
  721. attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
  722. attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
  723. attr->ah_attr.dlid = cmd.dest.dlid;
  724. attr->ah_attr.sl = cmd.dest.sl;
  725. attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
  726. attr->ah_attr.static_rate = cmd.dest.static_rate;
  727. attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
  728. attr->ah_attr.port_num = cmd.dest.port_num;
  729. memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
  730. attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
  731. attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
  732. attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
  733. attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
  734. attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
  735. attr->alt_ah_attr.sl = cmd.alt_dest.sl;
  736. attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
  737. attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
  738. attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
  739. attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
  740. ret = ib_modify_qp(qp, attr, cmd.attr_mask);
  741. if (ret)
  742. goto out;
  743. ret = in_len;
  744. out:
  745. up(&ib_uverbs_idr_mutex);
  746. kfree(attr);
  747. return ret;
  748. }
  749. ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
  750. const char __user *buf, int in_len,
  751. int out_len)
  752. {
  753. struct ib_uverbs_destroy_qp cmd;
  754. struct ib_uverbs_destroy_qp_resp resp;
  755. struct ib_qp *qp;
  756. struct ib_uevent_object *uobj;
  757. struct ib_uverbs_event *evt, *tmp;
  758. int ret = -EINVAL;
  759. if (copy_from_user(&cmd, buf, sizeof cmd))
  760. return -EFAULT;
  761. memset(&resp, 0, sizeof resp);
  762. down(&ib_uverbs_idr_mutex);
  763. qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
  764. if (!qp || qp->uobject->context != file->ucontext)
  765. goto out;
  766. uobj = container_of(qp->uobject, struct ib_uevent_object, uobject);
  767. ret = ib_destroy_qp(qp);
  768. if (ret)
  769. goto out;
  770. idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle);
  771. down(&file->mutex);
  772. list_del(&uobj->uobject.list);
  773. up(&file->mutex);
  774. spin_lock_irq(&file->async_file->lock);
  775. list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
  776. list_del(&evt->list);
  777. kfree(evt);
  778. }
  779. spin_unlock_irq(&file->async_file->lock);
  780. resp.events_reported = uobj->events_reported;
  781. kfree(uobj);
  782. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  783. &resp, sizeof resp))
  784. ret = -EFAULT;
  785. out:
  786. up(&ib_uverbs_idr_mutex);
  787. return ret ? ret : in_len;
  788. }
  789. ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
  790. const char __user *buf, int in_len,
  791. int out_len)
  792. {
  793. struct ib_uverbs_attach_mcast cmd;
  794. struct ib_qp *qp;
  795. int ret = -EINVAL;
  796. if (copy_from_user(&cmd, buf, sizeof cmd))
  797. return -EFAULT;
  798. down(&ib_uverbs_idr_mutex);
  799. qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
  800. if (qp && qp->uobject->context == file->ucontext)
  801. ret = ib_attach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
  802. up(&ib_uverbs_idr_mutex);
  803. return ret ? ret : in_len;
  804. }
  805. ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
  806. const char __user *buf, int in_len,
  807. int out_len)
  808. {
  809. struct ib_uverbs_detach_mcast cmd;
  810. struct ib_qp *qp;
  811. int ret = -EINVAL;
  812. if (copy_from_user(&cmd, buf, sizeof cmd))
  813. return -EFAULT;
  814. down(&ib_uverbs_idr_mutex);
  815. qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
  816. if (qp && qp->uobject->context == file->ucontext)
  817. ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
  818. up(&ib_uverbs_idr_mutex);
  819. return ret ? ret : in_len;
  820. }
  821. ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
  822. const char __user *buf, int in_len,
  823. int out_len)
  824. {
  825. struct ib_uverbs_create_srq cmd;
  826. struct ib_uverbs_create_srq_resp resp;
  827. struct ib_udata udata;
  828. struct ib_uevent_object *uobj;
  829. struct ib_pd *pd;
  830. struct ib_srq *srq;
  831. struct ib_srq_init_attr attr;
  832. int ret;
  833. if (out_len < sizeof resp)
  834. return -ENOSPC;
  835. if (copy_from_user(&cmd, buf, sizeof cmd))
  836. return -EFAULT;
  837. INIT_UDATA(&udata, buf + sizeof cmd,
  838. (unsigned long) cmd.response + sizeof resp,
  839. in_len - sizeof cmd, out_len - sizeof resp);
  840. uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
  841. if (!uobj)
  842. return -ENOMEM;
  843. down(&ib_uverbs_idr_mutex);
  844. pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
  845. if (!pd || pd->uobject->context != file->ucontext) {
  846. ret = -EINVAL;
  847. goto err_up;
  848. }
  849. attr.event_handler = ib_uverbs_srq_event_handler;
  850. attr.srq_context = file;
  851. attr.attr.max_wr = cmd.max_wr;
  852. attr.attr.max_sge = cmd.max_sge;
  853. attr.attr.srq_limit = cmd.srq_limit;
  854. uobj->uobject.user_handle = cmd.user_handle;
  855. uobj->uobject.context = file->ucontext;
  856. uobj->events_reported = 0;
  857. INIT_LIST_HEAD(&uobj->event_list);
  858. srq = pd->device->create_srq(pd, &attr, &udata);
  859. if (IS_ERR(srq)) {
  860. ret = PTR_ERR(srq);
  861. goto err_up;
  862. }
  863. srq->device = pd->device;
  864. srq->pd = pd;
  865. srq->uobject = &uobj->uobject;
  866. srq->event_handler = attr.event_handler;
  867. srq->srq_context = attr.srq_context;
  868. atomic_inc(&pd->usecnt);
  869. atomic_set(&srq->usecnt, 0);
  870. memset(&resp, 0, sizeof resp);
  871. retry:
  872. if (!idr_pre_get(&ib_uverbs_srq_idr, GFP_KERNEL)) {
  873. ret = -ENOMEM;
  874. goto err_destroy;
  875. }
  876. ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->uobject.id);
  877. if (ret == -EAGAIN)
  878. goto retry;
  879. if (ret)
  880. goto err_destroy;
  881. resp.srq_handle = uobj->uobject.id;
  882. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  883. &resp, sizeof resp)) {
  884. ret = -EFAULT;
  885. goto err_idr;
  886. }
  887. down(&file->mutex);
  888. list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list);
  889. up(&file->mutex);
  890. up(&ib_uverbs_idr_mutex);
  891. return in_len;
  892. err_idr:
  893. idr_remove(&ib_uverbs_srq_idr, uobj->uobject.id);
  894. err_destroy:
  895. ib_destroy_srq(srq);
  896. err_up:
  897. up(&ib_uverbs_idr_mutex);
  898. kfree(uobj);
  899. return ret;
  900. }
  901. ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
  902. const char __user *buf, int in_len,
  903. int out_len)
  904. {
  905. struct ib_uverbs_modify_srq cmd;
  906. struct ib_srq *srq;
  907. struct ib_srq_attr attr;
  908. int ret;
  909. if (copy_from_user(&cmd, buf, sizeof cmd))
  910. return -EFAULT;
  911. down(&ib_uverbs_idr_mutex);
  912. srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
  913. if (!srq || srq->uobject->context != file->ucontext) {
  914. ret = -EINVAL;
  915. goto out;
  916. }
  917. attr.max_wr = cmd.max_wr;
  918. attr.max_sge = cmd.max_sge;
  919. attr.srq_limit = cmd.srq_limit;
  920. ret = ib_modify_srq(srq, &attr, cmd.attr_mask);
  921. out:
  922. up(&ib_uverbs_idr_mutex);
  923. return ret ? ret : in_len;
  924. }
  925. ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
  926. const char __user *buf, int in_len,
  927. int out_len)
  928. {
  929. struct ib_uverbs_destroy_srq cmd;
  930. struct ib_uverbs_destroy_srq_resp resp;
  931. struct ib_srq *srq;
  932. struct ib_uevent_object *uobj;
  933. struct ib_uverbs_event *evt, *tmp;
  934. int ret = -EINVAL;
  935. if (copy_from_user(&cmd, buf, sizeof cmd))
  936. return -EFAULT;
  937. down(&ib_uverbs_idr_mutex);
  938. memset(&resp, 0, sizeof resp);
  939. srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
  940. if (!srq || srq->uobject->context != file->ucontext)
  941. goto out;
  942. uobj = container_of(srq->uobject, struct ib_uevent_object, uobject);
  943. ret = ib_destroy_srq(srq);
  944. if (ret)
  945. goto out;
  946. idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle);
  947. down(&file->mutex);
  948. list_del(&uobj->uobject.list);
  949. up(&file->mutex);
  950. spin_lock_irq(&file->async_file->lock);
  951. list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
  952. list_del(&evt->list);
  953. kfree(evt);
  954. }
  955. spin_unlock_irq(&file->async_file->lock);
  956. resp.events_reported = uobj->events_reported;
  957. kfree(uobj);
  958. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  959. &resp, sizeof resp))
  960. ret = -EFAULT;
  961. out:
  962. up(&ib_uverbs_idr_mutex);
  963. return ret ? ret : in_len;
  964. }