uverbs_cmd.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253
  1. /*
  2. * Copyright (c) 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Cisco Systems. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. *
  33. * $Id: uverbs_cmd.c 2708 2005-06-24 17:27:21Z roland $
  34. */
  35. #include <asm/uaccess.h>
  36. #include "uverbs.h"
  37. #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
  38. do { \
  39. (udata)->inbuf = (void __user *) (ibuf); \
  40. (udata)->outbuf = (void __user *) (obuf); \
  41. (udata)->inlen = (ilen); \
  42. (udata)->outlen = (olen); \
  43. } while (0)
  44. ssize_t ib_uverbs_query_params(struct ib_uverbs_file *file,
  45. const char __user *buf,
  46. int in_len, int out_len)
  47. {
  48. struct ib_uverbs_query_params cmd;
  49. struct ib_uverbs_query_params_resp resp;
  50. if (out_len < sizeof resp)
  51. return -ENOSPC;
  52. if (copy_from_user(&cmd, buf, sizeof cmd))
  53. return -EFAULT;
  54. memset(&resp, 0, sizeof resp);
  55. resp.num_cq_events = file->device->num_comp;
  56. if (copy_to_user((void __user *) (unsigned long) cmd.response, &resp, sizeof resp))
  57. return -EFAULT;
  58. return in_len;
  59. }
  60. ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
  61. const char __user *buf,
  62. int in_len, int out_len)
  63. {
  64. struct ib_uverbs_get_context cmd;
  65. struct ib_uverbs_get_context_resp resp;
  66. struct ib_udata udata;
  67. struct ib_device *ibdev = file->device->ib_dev;
  68. int i;
  69. int ret = in_len;
  70. if (out_len < sizeof resp)
  71. return -ENOSPC;
  72. if (copy_from_user(&cmd, buf, sizeof cmd))
  73. return -EFAULT;
  74. INIT_UDATA(&udata, buf + sizeof cmd,
  75. (unsigned long) cmd.response + sizeof resp,
  76. in_len - sizeof cmd, out_len - sizeof resp);
  77. file->ucontext = ibdev->alloc_ucontext(ibdev, &udata);
  78. if (IS_ERR(file->ucontext)) {
  79. ret = PTR_ERR(file->ucontext);
  80. file->ucontext = NULL;
  81. return ret;
  82. }
  83. file->ucontext->device = ibdev;
  84. INIT_LIST_HEAD(&file->ucontext->pd_list);
  85. INIT_LIST_HEAD(&file->ucontext->mr_list);
  86. INIT_LIST_HEAD(&file->ucontext->mw_list);
  87. INIT_LIST_HEAD(&file->ucontext->cq_list);
  88. INIT_LIST_HEAD(&file->ucontext->qp_list);
  89. INIT_LIST_HEAD(&file->ucontext->srq_list);
  90. INIT_LIST_HEAD(&file->ucontext->ah_list);
  91. spin_lock_init(&file->ucontext->lock);
  92. resp.async_fd = file->async_file.fd;
  93. for (i = 0; i < file->device->num_comp; ++i)
  94. if (copy_to_user((void __user *) (unsigned long) cmd.cq_fd_tab +
  95. i * sizeof (__u32),
  96. &file->comp_file[i].fd, sizeof (__u32)))
  97. goto err;
  98. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  99. &resp, sizeof resp))
  100. goto err;
  101. return in_len;
  102. err:
  103. ibdev->dealloc_ucontext(file->ucontext);
  104. file->ucontext = NULL;
  105. return -EFAULT;
  106. }
  107. ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
  108. const char __user *buf,
  109. int in_len, int out_len)
  110. {
  111. struct ib_uverbs_query_device cmd;
  112. struct ib_uverbs_query_device_resp resp;
  113. struct ib_device_attr attr;
  114. int ret;
  115. if (out_len < sizeof resp)
  116. return -ENOSPC;
  117. if (copy_from_user(&cmd, buf, sizeof cmd))
  118. return -EFAULT;
  119. ret = ib_query_device(file->device->ib_dev, &attr);
  120. if (ret)
  121. return ret;
  122. memset(&resp, 0, sizeof resp);
  123. resp.fw_ver = attr.fw_ver;
  124. resp.node_guid = attr.node_guid;
  125. resp.sys_image_guid = attr.sys_image_guid;
  126. resp.max_mr_size = attr.max_mr_size;
  127. resp.page_size_cap = attr.page_size_cap;
  128. resp.vendor_id = attr.vendor_id;
  129. resp.vendor_part_id = attr.vendor_part_id;
  130. resp.hw_ver = attr.hw_ver;
  131. resp.max_qp = attr.max_qp;
  132. resp.max_qp_wr = attr.max_qp_wr;
  133. resp.device_cap_flags = attr.device_cap_flags;
  134. resp.max_sge = attr.max_sge;
  135. resp.max_sge_rd = attr.max_sge_rd;
  136. resp.max_cq = attr.max_cq;
  137. resp.max_cqe = attr.max_cqe;
  138. resp.max_mr = attr.max_mr;
  139. resp.max_pd = attr.max_pd;
  140. resp.max_qp_rd_atom = attr.max_qp_rd_atom;
  141. resp.max_ee_rd_atom = attr.max_ee_rd_atom;
  142. resp.max_res_rd_atom = attr.max_res_rd_atom;
  143. resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
  144. resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
  145. resp.atomic_cap = attr.atomic_cap;
  146. resp.max_ee = attr.max_ee;
  147. resp.max_rdd = attr.max_rdd;
  148. resp.max_mw = attr.max_mw;
  149. resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
  150. resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
  151. resp.max_mcast_grp = attr.max_mcast_grp;
  152. resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
  153. resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
  154. resp.max_ah = attr.max_ah;
  155. resp.max_fmr = attr.max_fmr;
  156. resp.max_map_per_fmr = attr.max_map_per_fmr;
  157. resp.max_srq = attr.max_srq;
  158. resp.max_srq_wr = attr.max_srq_wr;
  159. resp.max_srq_sge = attr.max_srq_sge;
  160. resp.max_pkeys = attr.max_pkeys;
  161. resp.local_ca_ack_delay = attr.local_ca_ack_delay;
  162. resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
  163. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  164. &resp, sizeof resp))
  165. return -EFAULT;
  166. return in_len;
  167. }
  168. ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
  169. const char __user *buf,
  170. int in_len, int out_len)
  171. {
  172. struct ib_uverbs_query_port cmd;
  173. struct ib_uverbs_query_port_resp resp;
  174. struct ib_port_attr attr;
  175. int ret;
  176. if (out_len < sizeof resp)
  177. return -ENOSPC;
  178. if (copy_from_user(&cmd, buf, sizeof cmd))
  179. return -EFAULT;
  180. ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
  181. if (ret)
  182. return ret;
  183. memset(&resp, 0, sizeof resp);
  184. resp.state = attr.state;
  185. resp.max_mtu = attr.max_mtu;
  186. resp.active_mtu = attr.active_mtu;
  187. resp.gid_tbl_len = attr.gid_tbl_len;
  188. resp.port_cap_flags = attr.port_cap_flags;
  189. resp.max_msg_sz = attr.max_msg_sz;
  190. resp.bad_pkey_cntr = attr.bad_pkey_cntr;
  191. resp.qkey_viol_cntr = attr.qkey_viol_cntr;
  192. resp.pkey_tbl_len = attr.pkey_tbl_len;
  193. resp.lid = attr.lid;
  194. resp.sm_lid = attr.sm_lid;
  195. resp.lmc = attr.lmc;
  196. resp.max_vl_num = attr.max_vl_num;
  197. resp.sm_sl = attr.sm_sl;
  198. resp.subnet_timeout = attr.subnet_timeout;
  199. resp.init_type_reply = attr.init_type_reply;
  200. resp.active_width = attr.active_width;
  201. resp.active_speed = attr.active_speed;
  202. resp.phys_state = attr.phys_state;
  203. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  204. &resp, sizeof resp))
  205. return -EFAULT;
  206. return in_len;
  207. }
  208. ssize_t ib_uverbs_query_gid(struct ib_uverbs_file *file,
  209. const char __user *buf,
  210. int in_len, int out_len)
  211. {
  212. struct ib_uverbs_query_gid cmd;
  213. struct ib_uverbs_query_gid_resp resp;
  214. int ret;
  215. if (out_len < sizeof resp)
  216. return -ENOSPC;
  217. if (copy_from_user(&cmd, buf, sizeof cmd))
  218. return -EFAULT;
  219. memset(&resp, 0, sizeof resp);
  220. ret = ib_query_gid(file->device->ib_dev, cmd.port_num, cmd.index,
  221. (union ib_gid *) resp.gid);
  222. if (ret)
  223. return ret;
  224. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  225. &resp, sizeof resp))
  226. return -EFAULT;
  227. return in_len;
  228. }
  229. ssize_t ib_uverbs_query_pkey(struct ib_uverbs_file *file,
  230. const char __user *buf,
  231. int in_len, int out_len)
  232. {
  233. struct ib_uverbs_query_pkey cmd;
  234. struct ib_uverbs_query_pkey_resp resp;
  235. int ret;
  236. if (out_len < sizeof resp)
  237. return -ENOSPC;
  238. if (copy_from_user(&cmd, buf, sizeof cmd))
  239. return -EFAULT;
  240. memset(&resp, 0, sizeof resp);
  241. ret = ib_query_pkey(file->device->ib_dev, cmd.port_num, cmd.index,
  242. &resp.pkey);
  243. if (ret)
  244. return ret;
  245. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  246. &resp, sizeof resp))
  247. return -EFAULT;
  248. return in_len;
  249. }
  250. ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
  251. const char __user *buf,
  252. int in_len, int out_len)
  253. {
  254. struct ib_uverbs_alloc_pd cmd;
  255. struct ib_uverbs_alloc_pd_resp resp;
  256. struct ib_udata udata;
  257. struct ib_uobject *uobj;
  258. struct ib_pd *pd;
  259. int ret;
  260. if (out_len < sizeof resp)
  261. return -ENOSPC;
  262. if (copy_from_user(&cmd, buf, sizeof cmd))
  263. return -EFAULT;
  264. INIT_UDATA(&udata, buf + sizeof cmd,
  265. (unsigned long) cmd.response + sizeof resp,
  266. in_len - sizeof cmd, out_len - sizeof resp);
  267. uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
  268. if (!uobj)
  269. return -ENOMEM;
  270. uobj->context = file->ucontext;
  271. pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
  272. file->ucontext, &udata);
  273. if (IS_ERR(pd)) {
  274. ret = PTR_ERR(pd);
  275. goto err;
  276. }
  277. pd->device = file->device->ib_dev;
  278. pd->uobject = uobj;
  279. atomic_set(&pd->usecnt, 0);
  280. retry:
  281. if (!idr_pre_get(&ib_uverbs_pd_idr, GFP_KERNEL)) {
  282. ret = -ENOMEM;
  283. goto err_pd;
  284. }
  285. down(&ib_uverbs_idr_mutex);
  286. ret = idr_get_new(&ib_uverbs_pd_idr, pd, &uobj->id);
  287. up(&ib_uverbs_idr_mutex);
  288. if (ret == -EAGAIN)
  289. goto retry;
  290. if (ret)
  291. goto err_pd;
  292. spin_lock_irq(&file->ucontext->lock);
  293. list_add_tail(&uobj->list, &file->ucontext->pd_list);
  294. spin_unlock_irq(&file->ucontext->lock);
  295. memset(&resp, 0, sizeof resp);
  296. resp.pd_handle = uobj->id;
  297. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  298. &resp, sizeof resp)) {
  299. ret = -EFAULT;
  300. goto err_list;
  301. }
  302. return in_len;
  303. err_list:
  304. spin_lock_irq(&file->ucontext->lock);
  305. list_del(&uobj->list);
  306. spin_unlock_irq(&file->ucontext->lock);
  307. down(&ib_uverbs_idr_mutex);
  308. idr_remove(&ib_uverbs_pd_idr, uobj->id);
  309. up(&ib_uverbs_idr_mutex);
  310. err_pd:
  311. ib_dealloc_pd(pd);
  312. err:
  313. kfree(uobj);
  314. return ret;
  315. }
  316. ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
  317. const char __user *buf,
  318. int in_len, int out_len)
  319. {
  320. struct ib_uverbs_dealloc_pd cmd;
  321. struct ib_pd *pd;
  322. struct ib_uobject *uobj;
  323. int ret = -EINVAL;
  324. if (copy_from_user(&cmd, buf, sizeof cmd))
  325. return -EFAULT;
  326. down(&ib_uverbs_idr_mutex);
  327. pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
  328. if (!pd || pd->uobject->context != file->ucontext)
  329. goto out;
  330. uobj = pd->uobject;
  331. ret = ib_dealloc_pd(pd);
  332. if (ret)
  333. goto out;
  334. idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle);
  335. spin_lock_irq(&file->ucontext->lock);
  336. list_del(&uobj->list);
  337. spin_unlock_irq(&file->ucontext->lock);
  338. kfree(uobj);
  339. out:
  340. up(&ib_uverbs_idr_mutex);
  341. return ret ? ret : in_len;
  342. }
  343. ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
  344. const char __user *buf, int in_len,
  345. int out_len)
  346. {
  347. struct ib_uverbs_reg_mr cmd;
  348. struct ib_uverbs_reg_mr_resp resp;
  349. struct ib_udata udata;
  350. struct ib_umem_object *obj;
  351. struct ib_pd *pd;
  352. struct ib_mr *mr;
  353. int ret;
  354. if (out_len < sizeof resp)
  355. return -ENOSPC;
  356. if (copy_from_user(&cmd, buf, sizeof cmd))
  357. return -EFAULT;
  358. INIT_UDATA(&udata, buf + sizeof cmd,
  359. (unsigned long) cmd.response + sizeof resp,
  360. in_len - sizeof cmd, out_len - sizeof resp);
  361. if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
  362. return -EINVAL;
  363. obj = kmalloc(sizeof *obj, GFP_KERNEL);
  364. if (!obj)
  365. return -ENOMEM;
  366. obj->uobject.context = file->ucontext;
  367. /*
  368. * We ask for writable memory if any access flags other than
  369. * "remote read" are set. "Local write" and "remote write"
  370. * obviously require write access. "Remote atomic" can do
  371. * things like fetch and add, which will modify memory, and
  372. * "MW bind" can change permissions by binding a window.
  373. */
  374. ret = ib_umem_get(file->device->ib_dev, &obj->umem,
  375. (void *) (unsigned long) cmd.start, cmd.length,
  376. !!(cmd.access_flags & ~IB_ACCESS_REMOTE_READ));
  377. if (ret)
  378. goto err_free;
  379. obj->umem.virt_base = cmd.hca_va;
  380. down(&ib_uverbs_idr_mutex);
  381. pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
  382. if (!pd || pd->uobject->context != file->ucontext) {
  383. ret = -EINVAL;
  384. goto err_up;
  385. }
  386. if (!pd->device->reg_user_mr) {
  387. ret = -ENOSYS;
  388. goto err_up;
  389. }
  390. mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata);
  391. if (IS_ERR(mr)) {
  392. ret = PTR_ERR(mr);
  393. goto err_up;
  394. }
  395. mr->device = pd->device;
  396. mr->pd = pd;
  397. mr->uobject = &obj->uobject;
  398. atomic_inc(&pd->usecnt);
  399. atomic_set(&mr->usecnt, 0);
  400. memset(&resp, 0, sizeof resp);
  401. resp.lkey = mr->lkey;
  402. resp.rkey = mr->rkey;
  403. retry:
  404. if (!idr_pre_get(&ib_uverbs_mr_idr, GFP_KERNEL)) {
  405. ret = -ENOMEM;
  406. goto err_unreg;
  407. }
  408. ret = idr_get_new(&ib_uverbs_mr_idr, mr, &obj->uobject.id);
  409. if (ret == -EAGAIN)
  410. goto retry;
  411. if (ret)
  412. goto err_unreg;
  413. resp.mr_handle = obj->uobject.id;
  414. spin_lock_irq(&file->ucontext->lock);
  415. list_add_tail(&obj->uobject.list, &file->ucontext->mr_list);
  416. spin_unlock_irq(&file->ucontext->lock);
  417. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  418. &resp, sizeof resp)) {
  419. ret = -EFAULT;
  420. goto err_list;
  421. }
  422. up(&ib_uverbs_idr_mutex);
  423. return in_len;
  424. err_list:
  425. spin_lock_irq(&file->ucontext->lock);
  426. list_del(&obj->uobject.list);
  427. spin_unlock_irq(&file->ucontext->lock);
  428. err_unreg:
  429. ib_dereg_mr(mr);
  430. err_up:
  431. up(&ib_uverbs_idr_mutex);
  432. ib_umem_release(file->device->ib_dev, &obj->umem);
  433. err_free:
  434. kfree(obj);
  435. return ret;
  436. }
  437. ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
  438. const char __user *buf, int in_len,
  439. int out_len)
  440. {
  441. struct ib_uverbs_dereg_mr cmd;
  442. struct ib_mr *mr;
  443. struct ib_umem_object *memobj;
  444. int ret = -EINVAL;
  445. if (copy_from_user(&cmd, buf, sizeof cmd))
  446. return -EFAULT;
  447. down(&ib_uverbs_idr_mutex);
  448. mr = idr_find(&ib_uverbs_mr_idr, cmd.mr_handle);
  449. if (!mr || mr->uobject->context != file->ucontext)
  450. goto out;
  451. memobj = container_of(mr->uobject, struct ib_umem_object, uobject);
  452. ret = ib_dereg_mr(mr);
  453. if (ret)
  454. goto out;
  455. idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle);
  456. spin_lock_irq(&file->ucontext->lock);
  457. list_del(&memobj->uobject.list);
  458. spin_unlock_irq(&file->ucontext->lock);
  459. ib_umem_release(file->device->ib_dev, &memobj->umem);
  460. kfree(memobj);
  461. out:
  462. up(&ib_uverbs_idr_mutex);
  463. return ret ? ret : in_len;
  464. }
  465. ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
  466. const char __user *buf, int in_len,
  467. int out_len)
  468. {
  469. struct ib_uverbs_create_cq cmd;
  470. struct ib_uverbs_create_cq_resp resp;
  471. struct ib_udata udata;
  472. struct ib_ucq_object *uobj;
  473. struct ib_cq *cq;
  474. int ret;
  475. if (out_len < sizeof resp)
  476. return -ENOSPC;
  477. if (copy_from_user(&cmd, buf, sizeof cmd))
  478. return -EFAULT;
  479. INIT_UDATA(&udata, buf + sizeof cmd,
  480. (unsigned long) cmd.response + sizeof resp,
  481. in_len - sizeof cmd, out_len - sizeof resp);
  482. if (cmd.event_handler >= file->device->num_comp)
  483. return -EINVAL;
  484. uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
  485. if (!uobj)
  486. return -ENOMEM;
  487. uobj->uobject.user_handle = cmd.user_handle;
  488. uobj->uobject.context = file->ucontext;
  489. uobj->comp_events_reported = 0;
  490. uobj->async_events_reported = 0;
  491. INIT_LIST_HEAD(&uobj->comp_list);
  492. INIT_LIST_HEAD(&uobj->async_list);
  493. cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
  494. file->ucontext, &udata);
  495. if (IS_ERR(cq)) {
  496. ret = PTR_ERR(cq);
  497. goto err;
  498. }
  499. cq->device = file->device->ib_dev;
  500. cq->uobject = &uobj->uobject;
  501. cq->comp_handler = ib_uverbs_comp_handler;
  502. cq->event_handler = ib_uverbs_cq_event_handler;
  503. cq->cq_context = file;
  504. atomic_set(&cq->usecnt, 0);
  505. retry:
  506. if (!idr_pre_get(&ib_uverbs_cq_idr, GFP_KERNEL)) {
  507. ret = -ENOMEM;
  508. goto err_cq;
  509. }
  510. down(&ib_uverbs_idr_mutex);
  511. ret = idr_get_new(&ib_uverbs_cq_idr, cq, &uobj->uobject.id);
  512. up(&ib_uverbs_idr_mutex);
  513. if (ret == -EAGAIN)
  514. goto retry;
  515. if (ret)
  516. goto err_cq;
  517. spin_lock_irq(&file->ucontext->lock);
  518. list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list);
  519. spin_unlock_irq(&file->ucontext->lock);
  520. memset(&resp, 0, sizeof resp);
  521. resp.cq_handle = uobj->uobject.id;
  522. resp.cqe = cq->cqe;
  523. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  524. &resp, sizeof resp)) {
  525. ret = -EFAULT;
  526. goto err_list;
  527. }
  528. return in_len;
  529. err_list:
  530. spin_lock_irq(&file->ucontext->lock);
  531. list_del(&uobj->uobject.list);
  532. spin_unlock_irq(&file->ucontext->lock);
  533. down(&ib_uverbs_idr_mutex);
  534. idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id);
  535. up(&ib_uverbs_idr_mutex);
  536. err_cq:
  537. ib_destroy_cq(cq);
  538. err:
  539. kfree(uobj);
  540. return ret;
  541. }
  542. ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
  543. const char __user *buf, int in_len,
  544. int out_len)
  545. {
  546. struct ib_uverbs_destroy_cq cmd;
  547. struct ib_uverbs_destroy_cq_resp resp;
  548. struct ib_cq *cq;
  549. struct ib_ucq_object *uobj;
  550. struct ib_uverbs_event *evt, *tmp;
  551. u64 user_handle;
  552. int ret = -EINVAL;
  553. if (copy_from_user(&cmd, buf, sizeof cmd))
  554. return -EFAULT;
  555. memset(&resp, 0, sizeof resp);
  556. down(&ib_uverbs_idr_mutex);
  557. cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
  558. if (!cq || cq->uobject->context != file->ucontext)
  559. goto out;
  560. user_handle = cq->uobject->user_handle;
  561. uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
  562. ret = ib_destroy_cq(cq);
  563. if (ret)
  564. goto out;
  565. idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle);
  566. spin_lock_irq(&file->ucontext->lock);
  567. list_del(&uobj->uobject.list);
  568. spin_unlock_irq(&file->ucontext->lock);
  569. spin_lock_irq(&file->comp_file[0].lock);
  570. list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
  571. list_del(&evt->list);
  572. kfree(evt);
  573. }
  574. spin_unlock_irq(&file->comp_file[0].lock);
  575. spin_lock_irq(&file->async_file.lock);
  576. list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
  577. list_del(&evt->list);
  578. kfree(evt);
  579. }
  580. spin_unlock_irq(&file->async_file.lock);
  581. resp.comp_events_reported = uobj->comp_events_reported;
  582. resp.async_events_reported = uobj->async_events_reported;
  583. kfree(uobj);
  584. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  585. &resp, sizeof resp))
  586. ret = -EFAULT;
  587. out:
  588. up(&ib_uverbs_idr_mutex);
  589. return ret ? ret : in_len;
  590. }
  591. ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
  592. const char __user *buf, int in_len,
  593. int out_len)
  594. {
  595. struct ib_uverbs_create_qp cmd;
  596. struct ib_uverbs_create_qp_resp resp;
  597. struct ib_udata udata;
  598. struct ib_uevent_object *uobj;
  599. struct ib_pd *pd;
  600. struct ib_cq *scq, *rcq;
  601. struct ib_srq *srq;
  602. struct ib_qp *qp;
  603. struct ib_qp_init_attr attr;
  604. int ret;
  605. if (out_len < sizeof resp)
  606. return -ENOSPC;
  607. if (copy_from_user(&cmd, buf, sizeof cmd))
  608. return -EFAULT;
  609. INIT_UDATA(&udata, buf + sizeof cmd,
  610. (unsigned long) cmd.response + sizeof resp,
  611. in_len - sizeof cmd, out_len - sizeof resp);
  612. uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
  613. if (!uobj)
  614. return -ENOMEM;
  615. down(&ib_uverbs_idr_mutex);
  616. pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
  617. scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle);
  618. rcq = idr_find(&ib_uverbs_cq_idr, cmd.recv_cq_handle);
  619. srq = cmd.is_srq ? idr_find(&ib_uverbs_srq_idr, cmd.srq_handle) : NULL;
  620. if (!pd || pd->uobject->context != file->ucontext ||
  621. !scq || scq->uobject->context != file->ucontext ||
  622. !rcq || rcq->uobject->context != file->ucontext ||
  623. (cmd.is_srq && (!srq || srq->uobject->context != file->ucontext))) {
  624. ret = -EINVAL;
  625. goto err_up;
  626. }
  627. attr.event_handler = ib_uverbs_qp_event_handler;
  628. attr.qp_context = file;
  629. attr.send_cq = scq;
  630. attr.recv_cq = rcq;
  631. attr.srq = srq;
  632. attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
  633. attr.qp_type = cmd.qp_type;
  634. attr.cap.max_send_wr = cmd.max_send_wr;
  635. attr.cap.max_recv_wr = cmd.max_recv_wr;
  636. attr.cap.max_send_sge = cmd.max_send_sge;
  637. attr.cap.max_recv_sge = cmd.max_recv_sge;
  638. attr.cap.max_inline_data = cmd.max_inline_data;
  639. uobj->uobject.user_handle = cmd.user_handle;
  640. uobj->uobject.context = file->ucontext;
  641. uobj->events_reported = 0;
  642. INIT_LIST_HEAD(&uobj->event_list);
  643. qp = pd->device->create_qp(pd, &attr, &udata);
  644. if (IS_ERR(qp)) {
  645. ret = PTR_ERR(qp);
  646. goto err_up;
  647. }
  648. qp->device = pd->device;
  649. qp->pd = pd;
  650. qp->send_cq = attr.send_cq;
  651. qp->recv_cq = attr.recv_cq;
  652. qp->srq = attr.srq;
  653. qp->uobject = &uobj->uobject;
  654. qp->event_handler = attr.event_handler;
  655. qp->qp_context = attr.qp_context;
  656. qp->qp_type = attr.qp_type;
  657. atomic_inc(&pd->usecnt);
  658. atomic_inc(&attr.send_cq->usecnt);
  659. atomic_inc(&attr.recv_cq->usecnt);
  660. if (attr.srq)
  661. atomic_inc(&attr.srq->usecnt);
  662. memset(&resp, 0, sizeof resp);
  663. resp.qpn = qp->qp_num;
  664. retry:
  665. if (!idr_pre_get(&ib_uverbs_qp_idr, GFP_KERNEL)) {
  666. ret = -ENOMEM;
  667. goto err_destroy;
  668. }
  669. ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uobject.id);
  670. if (ret == -EAGAIN)
  671. goto retry;
  672. if (ret)
  673. goto err_destroy;
  674. resp.qp_handle = uobj->uobject.id;
  675. spin_lock_irq(&file->ucontext->lock);
  676. list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list);
  677. spin_unlock_irq(&file->ucontext->lock);
  678. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  679. &resp, sizeof resp)) {
  680. ret = -EFAULT;
  681. goto err_list;
  682. }
  683. up(&ib_uverbs_idr_mutex);
  684. return in_len;
  685. err_list:
  686. spin_lock_irq(&file->ucontext->lock);
  687. list_del(&uobj->uobject.list);
  688. spin_unlock_irq(&file->ucontext->lock);
  689. err_destroy:
  690. ib_destroy_qp(qp);
  691. err_up:
  692. up(&ib_uverbs_idr_mutex);
  693. kfree(uobj);
  694. return ret;
  695. }
  696. ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
  697. const char __user *buf, int in_len,
  698. int out_len)
  699. {
  700. struct ib_uverbs_modify_qp cmd;
  701. struct ib_qp *qp;
  702. struct ib_qp_attr *attr;
  703. int ret;
  704. if (copy_from_user(&cmd, buf, sizeof cmd))
  705. return -EFAULT;
  706. attr = kmalloc(sizeof *attr, GFP_KERNEL);
  707. if (!attr)
  708. return -ENOMEM;
  709. down(&ib_uverbs_idr_mutex);
  710. qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
  711. if (!qp || qp->uobject->context != file->ucontext) {
  712. ret = -EINVAL;
  713. goto out;
  714. }
  715. attr->qp_state = cmd.qp_state;
  716. attr->cur_qp_state = cmd.cur_qp_state;
  717. attr->path_mtu = cmd.path_mtu;
  718. attr->path_mig_state = cmd.path_mig_state;
  719. attr->qkey = cmd.qkey;
  720. attr->rq_psn = cmd.rq_psn;
  721. attr->sq_psn = cmd.sq_psn;
  722. attr->dest_qp_num = cmd.dest_qp_num;
  723. attr->qp_access_flags = cmd.qp_access_flags;
  724. attr->pkey_index = cmd.pkey_index;
  725. attr->alt_pkey_index = cmd.pkey_index;
  726. attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
  727. attr->max_rd_atomic = cmd.max_rd_atomic;
  728. attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
  729. attr->min_rnr_timer = cmd.min_rnr_timer;
  730. attr->port_num = cmd.port_num;
  731. attr->timeout = cmd.timeout;
  732. attr->retry_cnt = cmd.retry_cnt;
  733. attr->rnr_retry = cmd.rnr_retry;
  734. attr->alt_port_num = cmd.alt_port_num;
  735. attr->alt_timeout = cmd.alt_timeout;
  736. memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
  737. attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
  738. attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
  739. attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
  740. attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
  741. attr->ah_attr.dlid = cmd.dest.dlid;
  742. attr->ah_attr.sl = cmd.dest.sl;
  743. attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
  744. attr->ah_attr.static_rate = cmd.dest.static_rate;
  745. attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
  746. attr->ah_attr.port_num = cmd.dest.port_num;
  747. memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
  748. attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
  749. attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
  750. attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
  751. attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
  752. attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
  753. attr->alt_ah_attr.sl = cmd.alt_dest.sl;
  754. attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
  755. attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
  756. attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
  757. attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
  758. ret = ib_modify_qp(qp, attr, cmd.attr_mask);
  759. if (ret)
  760. goto out;
  761. ret = in_len;
  762. out:
  763. up(&ib_uverbs_idr_mutex);
  764. kfree(attr);
  765. return ret;
  766. }
  767. ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
  768. const char __user *buf, int in_len,
  769. int out_len)
  770. {
  771. struct ib_uverbs_destroy_qp cmd;
  772. struct ib_uverbs_destroy_qp_resp resp;
  773. struct ib_qp *qp;
  774. struct ib_uevent_object *uobj;
  775. struct ib_uverbs_event *evt, *tmp;
  776. int ret = -EINVAL;
  777. if (copy_from_user(&cmd, buf, sizeof cmd))
  778. return -EFAULT;
  779. memset(&resp, 0, sizeof resp);
  780. down(&ib_uverbs_idr_mutex);
  781. qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
  782. if (!qp || qp->uobject->context != file->ucontext)
  783. goto out;
  784. uobj = container_of(qp->uobject, struct ib_uevent_object, uobject);
  785. ret = ib_destroy_qp(qp);
  786. if (ret)
  787. goto out;
  788. idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle);
  789. spin_lock_irq(&file->ucontext->lock);
  790. list_del(&uobj->uobject.list);
  791. spin_unlock_irq(&file->ucontext->lock);
  792. spin_lock_irq(&file->async_file.lock);
  793. list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
  794. list_del(&evt->list);
  795. kfree(evt);
  796. }
  797. spin_unlock_irq(&file->async_file.lock);
  798. resp.events_reported = uobj->events_reported;
  799. kfree(uobj);
  800. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  801. &resp, sizeof resp))
  802. ret = -EFAULT;
  803. out:
  804. up(&ib_uverbs_idr_mutex);
  805. return ret ? ret : in_len;
  806. }
  807. ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
  808. const char __user *buf, int in_len,
  809. int out_len)
  810. {
  811. struct ib_uverbs_attach_mcast cmd;
  812. struct ib_qp *qp;
  813. int ret = -EINVAL;
  814. if (copy_from_user(&cmd, buf, sizeof cmd))
  815. return -EFAULT;
  816. down(&ib_uverbs_idr_mutex);
  817. qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
  818. if (qp && qp->uobject->context == file->ucontext)
  819. ret = ib_attach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
  820. up(&ib_uverbs_idr_mutex);
  821. return ret ? ret : in_len;
  822. }
  823. ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
  824. const char __user *buf, int in_len,
  825. int out_len)
  826. {
  827. struct ib_uverbs_detach_mcast cmd;
  828. struct ib_qp *qp;
  829. int ret = -EINVAL;
  830. if (copy_from_user(&cmd, buf, sizeof cmd))
  831. return -EFAULT;
  832. down(&ib_uverbs_idr_mutex);
  833. qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
  834. if (qp && qp->uobject->context == file->ucontext)
  835. ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
  836. up(&ib_uverbs_idr_mutex);
  837. return ret ? ret : in_len;
  838. }
  839. ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
  840. const char __user *buf, int in_len,
  841. int out_len)
  842. {
  843. struct ib_uverbs_create_srq cmd;
  844. struct ib_uverbs_create_srq_resp resp;
  845. struct ib_udata udata;
  846. struct ib_uevent_object *uobj;
  847. struct ib_pd *pd;
  848. struct ib_srq *srq;
  849. struct ib_srq_init_attr attr;
  850. int ret;
  851. if (out_len < sizeof resp)
  852. return -ENOSPC;
  853. if (copy_from_user(&cmd, buf, sizeof cmd))
  854. return -EFAULT;
  855. INIT_UDATA(&udata, buf + sizeof cmd,
  856. (unsigned long) cmd.response + sizeof resp,
  857. in_len - sizeof cmd, out_len - sizeof resp);
  858. uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
  859. if (!uobj)
  860. return -ENOMEM;
  861. down(&ib_uverbs_idr_mutex);
  862. pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
  863. if (!pd || pd->uobject->context != file->ucontext) {
  864. ret = -EINVAL;
  865. goto err_up;
  866. }
  867. attr.event_handler = ib_uverbs_srq_event_handler;
  868. attr.srq_context = file;
  869. attr.attr.max_wr = cmd.max_wr;
  870. attr.attr.max_sge = cmd.max_sge;
  871. attr.attr.srq_limit = cmd.srq_limit;
  872. uobj->uobject.user_handle = cmd.user_handle;
  873. uobj->uobject.context = file->ucontext;
  874. uobj->events_reported = 0;
  875. INIT_LIST_HEAD(&uobj->event_list);
  876. srq = pd->device->create_srq(pd, &attr, &udata);
  877. if (IS_ERR(srq)) {
  878. ret = PTR_ERR(srq);
  879. goto err_up;
  880. }
  881. srq->device = pd->device;
  882. srq->pd = pd;
  883. srq->uobject = &uobj->uobject;
  884. srq->event_handler = attr.event_handler;
  885. srq->srq_context = attr.srq_context;
  886. atomic_inc(&pd->usecnt);
  887. atomic_set(&srq->usecnt, 0);
  888. memset(&resp, 0, sizeof resp);
  889. retry:
  890. if (!idr_pre_get(&ib_uverbs_srq_idr, GFP_KERNEL)) {
  891. ret = -ENOMEM;
  892. goto err_destroy;
  893. }
  894. ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->uobject.id);
  895. if (ret == -EAGAIN)
  896. goto retry;
  897. if (ret)
  898. goto err_destroy;
  899. resp.srq_handle = uobj->uobject.id;
  900. spin_lock_irq(&file->ucontext->lock);
  901. list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list);
  902. spin_unlock_irq(&file->ucontext->lock);
  903. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  904. &resp, sizeof resp)) {
  905. ret = -EFAULT;
  906. goto err_list;
  907. }
  908. up(&ib_uverbs_idr_mutex);
  909. return in_len;
  910. err_list:
  911. spin_lock_irq(&file->ucontext->lock);
  912. list_del(&uobj->uobject.list);
  913. spin_unlock_irq(&file->ucontext->lock);
  914. err_destroy:
  915. ib_destroy_srq(srq);
  916. err_up:
  917. up(&ib_uverbs_idr_mutex);
  918. kfree(uobj);
  919. return ret;
  920. }
  921. ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
  922. const char __user *buf, int in_len,
  923. int out_len)
  924. {
  925. struct ib_uverbs_modify_srq cmd;
  926. struct ib_srq *srq;
  927. struct ib_srq_attr attr;
  928. int ret;
  929. if (copy_from_user(&cmd, buf, sizeof cmd))
  930. return -EFAULT;
  931. down(&ib_uverbs_idr_mutex);
  932. srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
  933. if (!srq || srq->uobject->context != file->ucontext) {
  934. ret = -EINVAL;
  935. goto out;
  936. }
  937. attr.max_wr = cmd.max_wr;
  938. attr.max_sge = cmd.max_sge;
  939. attr.srq_limit = cmd.srq_limit;
  940. ret = ib_modify_srq(srq, &attr, cmd.attr_mask);
  941. out:
  942. up(&ib_uverbs_idr_mutex);
  943. return ret ? ret : in_len;
  944. }
  945. ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
  946. const char __user *buf, int in_len,
  947. int out_len)
  948. {
  949. struct ib_uverbs_destroy_srq cmd;
  950. struct ib_uverbs_destroy_srq_resp resp;
  951. struct ib_srq *srq;
  952. struct ib_uevent_object *uobj;
  953. struct ib_uverbs_event *evt, *tmp;
  954. int ret = -EINVAL;
  955. if (copy_from_user(&cmd, buf, sizeof cmd))
  956. return -EFAULT;
  957. down(&ib_uverbs_idr_mutex);
  958. memset(&resp, 0, sizeof resp);
  959. srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
  960. if (!srq || srq->uobject->context != file->ucontext)
  961. goto out;
  962. uobj = container_of(srq->uobject, struct ib_uevent_object, uobject);
  963. ret = ib_destroy_srq(srq);
  964. if (ret)
  965. goto out;
  966. idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle);
  967. spin_lock_irq(&file->ucontext->lock);
  968. list_del(&uobj->uobject.list);
  969. spin_unlock_irq(&file->ucontext->lock);
  970. spin_lock_irq(&file->async_file.lock);
  971. list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
  972. list_del(&evt->list);
  973. kfree(evt);
  974. }
  975. spin_unlock_irq(&file->async_file.lock);
  976. resp.events_reported = uobj->events_reported;
  977. kfree(uobj);
  978. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  979. &resp, sizeof resp))
  980. ret = -EFAULT;
  981. out:
  982. up(&ib_uverbs_idr_mutex);
  983. return ret ? ret : in_len;
  984. }