uverbs_cmd.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265
  1. /*
  2. * Copyright (c) 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Cisco Systems. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. *
  33. * $Id: uverbs_cmd.c 2708 2005-06-24 17:27:21Z roland $
  34. */
  35. #include <asm/uaccess.h>
  36. #include "uverbs.h"
  37. #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
  38. do { \
  39. (udata)->inbuf = (void __user *) (ibuf); \
  40. (udata)->outbuf = (void __user *) (obuf); \
  41. (udata)->inlen = (ilen); \
  42. (udata)->outlen = (olen); \
  43. } while (0)
  44. ssize_t ib_uverbs_query_params(struct ib_uverbs_file *file,
  45. const char __user *buf,
  46. int in_len, int out_len)
  47. {
  48. struct ib_uverbs_query_params cmd;
  49. struct ib_uverbs_query_params_resp resp;
  50. if (out_len < sizeof resp)
  51. return -ENOSPC;
  52. if (copy_from_user(&cmd, buf, sizeof cmd))
  53. return -EFAULT;
  54. memset(&resp, 0, sizeof resp);
  55. resp.num_cq_events = file->device->num_comp;
  56. if (copy_to_user((void __user *) (unsigned long) cmd.response, &resp, sizeof resp))
  57. return -EFAULT;
  58. return in_len;
  59. }
  60. ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
  61. const char __user *buf,
  62. int in_len, int out_len)
  63. {
  64. struct ib_uverbs_get_context cmd;
  65. struct ib_uverbs_get_context_resp resp;
  66. struct ib_udata udata;
  67. struct ib_device *ibdev = file->device->ib_dev;
  68. struct ib_ucontext *ucontext;
  69. int i;
  70. int ret;
  71. if (out_len < sizeof resp)
  72. return -ENOSPC;
  73. if (copy_from_user(&cmd, buf, sizeof cmd))
  74. return -EFAULT;
  75. down(&file->mutex);
  76. if (file->ucontext) {
  77. ret = -EINVAL;
  78. goto err;
  79. }
  80. INIT_UDATA(&udata, buf + sizeof cmd,
  81. (unsigned long) cmd.response + sizeof resp,
  82. in_len - sizeof cmd, out_len - sizeof resp);
  83. ucontext = ibdev->alloc_ucontext(ibdev, &udata);
  84. if (IS_ERR(ucontext))
  85. return PTR_ERR(file->ucontext);
  86. ucontext->device = ibdev;
  87. INIT_LIST_HEAD(&ucontext->pd_list);
  88. INIT_LIST_HEAD(&ucontext->mr_list);
  89. INIT_LIST_HEAD(&ucontext->mw_list);
  90. INIT_LIST_HEAD(&ucontext->cq_list);
  91. INIT_LIST_HEAD(&ucontext->qp_list);
  92. INIT_LIST_HEAD(&ucontext->srq_list);
  93. INIT_LIST_HEAD(&ucontext->ah_list);
  94. resp.async_fd = file->async_file.fd;
  95. for (i = 0; i < file->device->num_comp; ++i)
  96. if (copy_to_user((void __user *) (unsigned long) cmd.cq_fd_tab +
  97. i * sizeof (__u32),
  98. &file->comp_file[i].fd, sizeof (__u32))) {
  99. ret = -EFAULT;
  100. goto err_free;
  101. }
  102. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  103. &resp, sizeof resp)) {
  104. ret = -EFAULT;
  105. goto err_free;
  106. }
  107. file->ucontext = ucontext;
  108. up(&file->mutex);
  109. return in_len;
  110. err_free:
  111. ibdev->dealloc_ucontext(ucontext);
  112. err:
  113. up(&file->mutex);
  114. return ret;
  115. }
  116. ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
  117. const char __user *buf,
  118. int in_len, int out_len)
  119. {
  120. struct ib_uverbs_query_device cmd;
  121. struct ib_uverbs_query_device_resp resp;
  122. struct ib_device_attr attr;
  123. int ret;
  124. if (out_len < sizeof resp)
  125. return -ENOSPC;
  126. if (copy_from_user(&cmd, buf, sizeof cmd))
  127. return -EFAULT;
  128. ret = ib_query_device(file->device->ib_dev, &attr);
  129. if (ret)
  130. return ret;
  131. memset(&resp, 0, sizeof resp);
  132. resp.fw_ver = attr.fw_ver;
  133. resp.node_guid = attr.node_guid;
  134. resp.sys_image_guid = attr.sys_image_guid;
  135. resp.max_mr_size = attr.max_mr_size;
  136. resp.page_size_cap = attr.page_size_cap;
  137. resp.vendor_id = attr.vendor_id;
  138. resp.vendor_part_id = attr.vendor_part_id;
  139. resp.hw_ver = attr.hw_ver;
  140. resp.max_qp = attr.max_qp;
  141. resp.max_qp_wr = attr.max_qp_wr;
  142. resp.device_cap_flags = attr.device_cap_flags;
  143. resp.max_sge = attr.max_sge;
  144. resp.max_sge_rd = attr.max_sge_rd;
  145. resp.max_cq = attr.max_cq;
  146. resp.max_cqe = attr.max_cqe;
  147. resp.max_mr = attr.max_mr;
  148. resp.max_pd = attr.max_pd;
  149. resp.max_qp_rd_atom = attr.max_qp_rd_atom;
  150. resp.max_ee_rd_atom = attr.max_ee_rd_atom;
  151. resp.max_res_rd_atom = attr.max_res_rd_atom;
  152. resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
  153. resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
  154. resp.atomic_cap = attr.atomic_cap;
  155. resp.max_ee = attr.max_ee;
  156. resp.max_rdd = attr.max_rdd;
  157. resp.max_mw = attr.max_mw;
  158. resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
  159. resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
  160. resp.max_mcast_grp = attr.max_mcast_grp;
  161. resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
  162. resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
  163. resp.max_ah = attr.max_ah;
  164. resp.max_fmr = attr.max_fmr;
  165. resp.max_map_per_fmr = attr.max_map_per_fmr;
  166. resp.max_srq = attr.max_srq;
  167. resp.max_srq_wr = attr.max_srq_wr;
  168. resp.max_srq_sge = attr.max_srq_sge;
  169. resp.max_pkeys = attr.max_pkeys;
  170. resp.local_ca_ack_delay = attr.local_ca_ack_delay;
  171. resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
  172. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  173. &resp, sizeof resp))
  174. return -EFAULT;
  175. return in_len;
  176. }
  177. ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
  178. const char __user *buf,
  179. int in_len, int out_len)
  180. {
  181. struct ib_uverbs_query_port cmd;
  182. struct ib_uverbs_query_port_resp resp;
  183. struct ib_port_attr attr;
  184. int ret;
  185. if (out_len < sizeof resp)
  186. return -ENOSPC;
  187. if (copy_from_user(&cmd, buf, sizeof cmd))
  188. return -EFAULT;
  189. ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
  190. if (ret)
  191. return ret;
  192. memset(&resp, 0, sizeof resp);
  193. resp.state = attr.state;
  194. resp.max_mtu = attr.max_mtu;
  195. resp.active_mtu = attr.active_mtu;
  196. resp.gid_tbl_len = attr.gid_tbl_len;
  197. resp.port_cap_flags = attr.port_cap_flags;
  198. resp.max_msg_sz = attr.max_msg_sz;
  199. resp.bad_pkey_cntr = attr.bad_pkey_cntr;
  200. resp.qkey_viol_cntr = attr.qkey_viol_cntr;
  201. resp.pkey_tbl_len = attr.pkey_tbl_len;
  202. resp.lid = attr.lid;
  203. resp.sm_lid = attr.sm_lid;
  204. resp.lmc = attr.lmc;
  205. resp.max_vl_num = attr.max_vl_num;
  206. resp.sm_sl = attr.sm_sl;
  207. resp.subnet_timeout = attr.subnet_timeout;
  208. resp.init_type_reply = attr.init_type_reply;
  209. resp.active_width = attr.active_width;
  210. resp.active_speed = attr.active_speed;
  211. resp.phys_state = attr.phys_state;
  212. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  213. &resp, sizeof resp))
  214. return -EFAULT;
  215. return in_len;
  216. }
  217. ssize_t ib_uverbs_query_gid(struct ib_uverbs_file *file,
  218. const char __user *buf,
  219. int in_len, int out_len)
  220. {
  221. struct ib_uverbs_query_gid cmd;
  222. struct ib_uverbs_query_gid_resp resp;
  223. int ret;
  224. if (out_len < sizeof resp)
  225. return -ENOSPC;
  226. if (copy_from_user(&cmd, buf, sizeof cmd))
  227. return -EFAULT;
  228. memset(&resp, 0, sizeof resp);
  229. ret = ib_query_gid(file->device->ib_dev, cmd.port_num, cmd.index,
  230. (union ib_gid *) resp.gid);
  231. if (ret)
  232. return ret;
  233. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  234. &resp, sizeof resp))
  235. return -EFAULT;
  236. return in_len;
  237. }
  238. ssize_t ib_uverbs_query_pkey(struct ib_uverbs_file *file,
  239. const char __user *buf,
  240. int in_len, int out_len)
  241. {
  242. struct ib_uverbs_query_pkey cmd;
  243. struct ib_uverbs_query_pkey_resp resp;
  244. int ret;
  245. if (out_len < sizeof resp)
  246. return -ENOSPC;
  247. if (copy_from_user(&cmd, buf, sizeof cmd))
  248. return -EFAULT;
  249. memset(&resp, 0, sizeof resp);
  250. ret = ib_query_pkey(file->device->ib_dev, cmd.port_num, cmd.index,
  251. &resp.pkey);
  252. if (ret)
  253. return ret;
  254. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  255. &resp, sizeof resp))
  256. return -EFAULT;
  257. return in_len;
  258. }
  259. ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
  260. const char __user *buf,
  261. int in_len, int out_len)
  262. {
  263. struct ib_uverbs_alloc_pd cmd;
  264. struct ib_uverbs_alloc_pd_resp resp;
  265. struct ib_udata udata;
  266. struct ib_uobject *uobj;
  267. struct ib_pd *pd;
  268. int ret;
  269. if (out_len < sizeof resp)
  270. return -ENOSPC;
  271. if (copy_from_user(&cmd, buf, sizeof cmd))
  272. return -EFAULT;
  273. INIT_UDATA(&udata, buf + sizeof cmd,
  274. (unsigned long) cmd.response + sizeof resp,
  275. in_len - sizeof cmd, out_len - sizeof resp);
  276. uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
  277. if (!uobj)
  278. return -ENOMEM;
  279. uobj->context = file->ucontext;
  280. pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
  281. file->ucontext, &udata);
  282. if (IS_ERR(pd)) {
  283. ret = PTR_ERR(pd);
  284. goto err;
  285. }
  286. pd->device = file->device->ib_dev;
  287. pd->uobject = uobj;
  288. atomic_set(&pd->usecnt, 0);
  289. retry:
  290. if (!idr_pre_get(&ib_uverbs_pd_idr, GFP_KERNEL)) {
  291. ret = -ENOMEM;
  292. goto err_pd;
  293. }
  294. down(&ib_uverbs_idr_mutex);
  295. ret = idr_get_new(&ib_uverbs_pd_idr, pd, &uobj->id);
  296. up(&ib_uverbs_idr_mutex);
  297. if (ret == -EAGAIN)
  298. goto retry;
  299. if (ret)
  300. goto err_pd;
  301. down(&file->mutex);
  302. list_add_tail(&uobj->list, &file->ucontext->pd_list);
  303. up(&file->mutex);
  304. memset(&resp, 0, sizeof resp);
  305. resp.pd_handle = uobj->id;
  306. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  307. &resp, sizeof resp)) {
  308. ret = -EFAULT;
  309. goto err_list;
  310. }
  311. return in_len;
  312. err_list:
  313. down(&file->mutex);
  314. list_del(&uobj->list);
  315. up(&file->mutex);
  316. down(&ib_uverbs_idr_mutex);
  317. idr_remove(&ib_uverbs_pd_idr, uobj->id);
  318. up(&ib_uverbs_idr_mutex);
  319. err_pd:
  320. ib_dealloc_pd(pd);
  321. err:
  322. kfree(uobj);
  323. return ret;
  324. }
  325. ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
  326. const char __user *buf,
  327. int in_len, int out_len)
  328. {
  329. struct ib_uverbs_dealloc_pd cmd;
  330. struct ib_pd *pd;
  331. struct ib_uobject *uobj;
  332. int ret = -EINVAL;
  333. if (copy_from_user(&cmd, buf, sizeof cmd))
  334. return -EFAULT;
  335. down(&ib_uverbs_idr_mutex);
  336. pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
  337. if (!pd || pd->uobject->context != file->ucontext)
  338. goto out;
  339. uobj = pd->uobject;
  340. ret = ib_dealloc_pd(pd);
  341. if (ret)
  342. goto out;
  343. idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle);
  344. down(&file->mutex);
  345. list_del(&uobj->list);
  346. up(&file->mutex);
  347. kfree(uobj);
  348. out:
  349. up(&ib_uverbs_idr_mutex);
  350. return ret ? ret : in_len;
  351. }
  352. ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
  353. const char __user *buf, int in_len,
  354. int out_len)
  355. {
  356. struct ib_uverbs_reg_mr cmd;
  357. struct ib_uverbs_reg_mr_resp resp;
  358. struct ib_udata udata;
  359. struct ib_umem_object *obj;
  360. struct ib_pd *pd;
  361. struct ib_mr *mr;
  362. int ret;
  363. if (out_len < sizeof resp)
  364. return -ENOSPC;
  365. if (copy_from_user(&cmd, buf, sizeof cmd))
  366. return -EFAULT;
  367. INIT_UDATA(&udata, buf + sizeof cmd,
  368. (unsigned long) cmd.response + sizeof resp,
  369. in_len - sizeof cmd, out_len - sizeof resp);
  370. if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
  371. return -EINVAL;
  372. obj = kmalloc(sizeof *obj, GFP_KERNEL);
  373. if (!obj)
  374. return -ENOMEM;
  375. obj->uobject.context = file->ucontext;
  376. /*
  377. * We ask for writable memory if any access flags other than
  378. * "remote read" are set. "Local write" and "remote write"
  379. * obviously require write access. "Remote atomic" can do
  380. * things like fetch and add, which will modify memory, and
  381. * "MW bind" can change permissions by binding a window.
  382. */
  383. ret = ib_umem_get(file->device->ib_dev, &obj->umem,
  384. (void *) (unsigned long) cmd.start, cmd.length,
  385. !!(cmd.access_flags & ~IB_ACCESS_REMOTE_READ));
  386. if (ret)
  387. goto err_free;
  388. obj->umem.virt_base = cmd.hca_va;
  389. down(&ib_uverbs_idr_mutex);
  390. pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
  391. if (!pd || pd->uobject->context != file->ucontext) {
  392. ret = -EINVAL;
  393. goto err_up;
  394. }
  395. if (!pd->device->reg_user_mr) {
  396. ret = -ENOSYS;
  397. goto err_up;
  398. }
  399. mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata);
  400. if (IS_ERR(mr)) {
  401. ret = PTR_ERR(mr);
  402. goto err_up;
  403. }
  404. mr->device = pd->device;
  405. mr->pd = pd;
  406. mr->uobject = &obj->uobject;
  407. atomic_inc(&pd->usecnt);
  408. atomic_set(&mr->usecnt, 0);
  409. memset(&resp, 0, sizeof resp);
  410. resp.lkey = mr->lkey;
  411. resp.rkey = mr->rkey;
  412. retry:
  413. if (!idr_pre_get(&ib_uverbs_mr_idr, GFP_KERNEL)) {
  414. ret = -ENOMEM;
  415. goto err_unreg;
  416. }
  417. ret = idr_get_new(&ib_uverbs_mr_idr, mr, &obj->uobject.id);
  418. if (ret == -EAGAIN)
  419. goto retry;
  420. if (ret)
  421. goto err_unreg;
  422. resp.mr_handle = obj->uobject.id;
  423. down(&file->mutex);
  424. list_add_tail(&obj->uobject.list, &file->ucontext->mr_list);
  425. up(&file->mutex);
  426. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  427. &resp, sizeof resp)) {
  428. ret = -EFAULT;
  429. goto err_list;
  430. }
  431. up(&ib_uverbs_idr_mutex);
  432. return in_len;
  433. err_list:
  434. down(&file->mutex);
  435. list_del(&obj->uobject.list);
  436. up(&file->mutex);
  437. err_unreg:
  438. ib_dereg_mr(mr);
  439. err_up:
  440. up(&ib_uverbs_idr_mutex);
  441. ib_umem_release(file->device->ib_dev, &obj->umem);
  442. err_free:
  443. kfree(obj);
  444. return ret;
  445. }
  446. ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
  447. const char __user *buf, int in_len,
  448. int out_len)
  449. {
  450. struct ib_uverbs_dereg_mr cmd;
  451. struct ib_mr *mr;
  452. struct ib_umem_object *memobj;
  453. int ret = -EINVAL;
  454. if (copy_from_user(&cmd, buf, sizeof cmd))
  455. return -EFAULT;
  456. down(&ib_uverbs_idr_mutex);
  457. mr = idr_find(&ib_uverbs_mr_idr, cmd.mr_handle);
  458. if (!mr || mr->uobject->context != file->ucontext)
  459. goto out;
  460. memobj = container_of(mr->uobject, struct ib_umem_object, uobject);
  461. ret = ib_dereg_mr(mr);
  462. if (ret)
  463. goto out;
  464. idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle);
  465. down(&file->mutex);
  466. list_del(&memobj->uobject.list);
  467. up(&file->mutex);
  468. ib_umem_release(file->device->ib_dev, &memobj->umem);
  469. kfree(memobj);
  470. out:
  471. up(&ib_uverbs_idr_mutex);
  472. return ret ? ret : in_len;
  473. }
  474. ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
  475. const char __user *buf, int in_len,
  476. int out_len)
  477. {
  478. struct ib_uverbs_create_cq cmd;
  479. struct ib_uverbs_create_cq_resp resp;
  480. struct ib_udata udata;
  481. struct ib_ucq_object *uobj;
  482. struct ib_cq *cq;
  483. int ret;
  484. if (out_len < sizeof resp)
  485. return -ENOSPC;
  486. if (copy_from_user(&cmd, buf, sizeof cmd))
  487. return -EFAULT;
  488. INIT_UDATA(&udata, buf + sizeof cmd,
  489. (unsigned long) cmd.response + sizeof resp,
  490. in_len - sizeof cmd, out_len - sizeof resp);
  491. if (cmd.event_handler >= file->device->num_comp)
  492. return -EINVAL;
  493. uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
  494. if (!uobj)
  495. return -ENOMEM;
  496. uobj->uobject.user_handle = cmd.user_handle;
  497. uobj->uobject.context = file->ucontext;
  498. uobj->comp_events_reported = 0;
  499. uobj->async_events_reported = 0;
  500. INIT_LIST_HEAD(&uobj->comp_list);
  501. INIT_LIST_HEAD(&uobj->async_list);
  502. cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
  503. file->ucontext, &udata);
  504. if (IS_ERR(cq)) {
  505. ret = PTR_ERR(cq);
  506. goto err;
  507. }
  508. cq->device = file->device->ib_dev;
  509. cq->uobject = &uobj->uobject;
  510. cq->comp_handler = ib_uverbs_comp_handler;
  511. cq->event_handler = ib_uverbs_cq_event_handler;
  512. cq->cq_context = file;
  513. atomic_set(&cq->usecnt, 0);
  514. retry:
  515. if (!idr_pre_get(&ib_uverbs_cq_idr, GFP_KERNEL)) {
  516. ret = -ENOMEM;
  517. goto err_cq;
  518. }
  519. down(&ib_uverbs_idr_mutex);
  520. ret = idr_get_new(&ib_uverbs_cq_idr, cq, &uobj->uobject.id);
  521. up(&ib_uverbs_idr_mutex);
  522. if (ret == -EAGAIN)
  523. goto retry;
  524. if (ret)
  525. goto err_cq;
  526. down(&file->mutex);
  527. list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list);
  528. up(&file->mutex);
  529. memset(&resp, 0, sizeof resp);
  530. resp.cq_handle = uobj->uobject.id;
  531. resp.cqe = cq->cqe;
  532. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  533. &resp, sizeof resp)) {
  534. ret = -EFAULT;
  535. goto err_list;
  536. }
  537. return in_len;
  538. err_list:
  539. down(&file->mutex);
  540. list_del(&uobj->uobject.list);
  541. up(&file->mutex);
  542. down(&ib_uverbs_idr_mutex);
  543. idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id);
  544. up(&ib_uverbs_idr_mutex);
  545. err_cq:
  546. ib_destroy_cq(cq);
  547. err:
  548. kfree(uobj);
  549. return ret;
  550. }
  551. ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
  552. const char __user *buf, int in_len,
  553. int out_len)
  554. {
  555. struct ib_uverbs_destroy_cq cmd;
  556. struct ib_uverbs_destroy_cq_resp resp;
  557. struct ib_cq *cq;
  558. struct ib_ucq_object *uobj;
  559. struct ib_uverbs_event *evt, *tmp;
  560. u64 user_handle;
  561. int ret = -EINVAL;
  562. if (copy_from_user(&cmd, buf, sizeof cmd))
  563. return -EFAULT;
  564. memset(&resp, 0, sizeof resp);
  565. down(&ib_uverbs_idr_mutex);
  566. cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
  567. if (!cq || cq->uobject->context != file->ucontext)
  568. goto out;
  569. user_handle = cq->uobject->user_handle;
  570. uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
  571. ret = ib_destroy_cq(cq);
  572. if (ret)
  573. goto out;
  574. idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle);
  575. down(&file->mutex);
  576. list_del(&uobj->uobject.list);
  577. up(&file->mutex);
  578. spin_lock_irq(&file->comp_file[0].lock);
  579. list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
  580. list_del(&evt->list);
  581. kfree(evt);
  582. }
  583. spin_unlock_irq(&file->comp_file[0].lock);
  584. spin_lock_irq(&file->async_file.lock);
  585. list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
  586. list_del(&evt->list);
  587. kfree(evt);
  588. }
  589. spin_unlock_irq(&file->async_file.lock);
  590. resp.comp_events_reported = uobj->comp_events_reported;
  591. resp.async_events_reported = uobj->async_events_reported;
  592. kfree(uobj);
  593. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  594. &resp, sizeof resp))
  595. ret = -EFAULT;
  596. out:
  597. up(&ib_uverbs_idr_mutex);
  598. return ret ? ret : in_len;
  599. }
  600. ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
  601. const char __user *buf, int in_len,
  602. int out_len)
  603. {
  604. struct ib_uverbs_create_qp cmd;
  605. struct ib_uverbs_create_qp_resp resp;
  606. struct ib_udata udata;
  607. struct ib_uevent_object *uobj;
  608. struct ib_pd *pd;
  609. struct ib_cq *scq, *rcq;
  610. struct ib_srq *srq;
  611. struct ib_qp *qp;
  612. struct ib_qp_init_attr attr;
  613. int ret;
  614. if (out_len < sizeof resp)
  615. return -ENOSPC;
  616. if (copy_from_user(&cmd, buf, sizeof cmd))
  617. return -EFAULT;
  618. INIT_UDATA(&udata, buf + sizeof cmd,
  619. (unsigned long) cmd.response + sizeof resp,
  620. in_len - sizeof cmd, out_len - sizeof resp);
  621. uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
  622. if (!uobj)
  623. return -ENOMEM;
  624. down(&ib_uverbs_idr_mutex);
  625. pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
  626. scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle);
  627. rcq = idr_find(&ib_uverbs_cq_idr, cmd.recv_cq_handle);
  628. srq = cmd.is_srq ? idr_find(&ib_uverbs_srq_idr, cmd.srq_handle) : NULL;
  629. if (!pd || pd->uobject->context != file->ucontext ||
  630. !scq || scq->uobject->context != file->ucontext ||
  631. !rcq || rcq->uobject->context != file->ucontext ||
  632. (cmd.is_srq && (!srq || srq->uobject->context != file->ucontext))) {
  633. ret = -EINVAL;
  634. goto err_up;
  635. }
  636. attr.event_handler = ib_uverbs_qp_event_handler;
  637. attr.qp_context = file;
  638. attr.send_cq = scq;
  639. attr.recv_cq = rcq;
  640. attr.srq = srq;
  641. attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
  642. attr.qp_type = cmd.qp_type;
  643. attr.cap.max_send_wr = cmd.max_send_wr;
  644. attr.cap.max_recv_wr = cmd.max_recv_wr;
  645. attr.cap.max_send_sge = cmd.max_send_sge;
  646. attr.cap.max_recv_sge = cmd.max_recv_sge;
  647. attr.cap.max_inline_data = cmd.max_inline_data;
  648. uobj->uobject.user_handle = cmd.user_handle;
  649. uobj->uobject.context = file->ucontext;
  650. uobj->events_reported = 0;
  651. INIT_LIST_HEAD(&uobj->event_list);
  652. qp = pd->device->create_qp(pd, &attr, &udata);
  653. if (IS_ERR(qp)) {
  654. ret = PTR_ERR(qp);
  655. goto err_up;
  656. }
  657. qp->device = pd->device;
  658. qp->pd = pd;
  659. qp->send_cq = attr.send_cq;
  660. qp->recv_cq = attr.recv_cq;
  661. qp->srq = attr.srq;
  662. qp->uobject = &uobj->uobject;
  663. qp->event_handler = attr.event_handler;
  664. qp->qp_context = attr.qp_context;
  665. qp->qp_type = attr.qp_type;
  666. atomic_inc(&pd->usecnt);
  667. atomic_inc(&attr.send_cq->usecnt);
  668. atomic_inc(&attr.recv_cq->usecnt);
  669. if (attr.srq)
  670. atomic_inc(&attr.srq->usecnt);
  671. memset(&resp, 0, sizeof resp);
  672. resp.qpn = qp->qp_num;
  673. retry:
  674. if (!idr_pre_get(&ib_uverbs_qp_idr, GFP_KERNEL)) {
  675. ret = -ENOMEM;
  676. goto err_destroy;
  677. }
  678. ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uobject.id);
  679. if (ret == -EAGAIN)
  680. goto retry;
  681. if (ret)
  682. goto err_destroy;
  683. resp.qp_handle = uobj->uobject.id;
  684. down(&file->mutex);
  685. list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list);
  686. up(&file->mutex);
  687. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  688. &resp, sizeof resp)) {
  689. ret = -EFAULT;
  690. goto err_list;
  691. }
  692. up(&ib_uverbs_idr_mutex);
  693. return in_len;
  694. err_list:
  695. down(&file->mutex);
  696. list_del(&uobj->uobject.list);
  697. up(&file->mutex);
  698. err_destroy:
  699. ib_destroy_qp(qp);
  700. err_up:
  701. up(&ib_uverbs_idr_mutex);
  702. kfree(uobj);
  703. return ret;
  704. }
  705. ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
  706. const char __user *buf, int in_len,
  707. int out_len)
  708. {
  709. struct ib_uverbs_modify_qp cmd;
  710. struct ib_qp *qp;
  711. struct ib_qp_attr *attr;
  712. int ret;
  713. if (copy_from_user(&cmd, buf, sizeof cmd))
  714. return -EFAULT;
  715. attr = kmalloc(sizeof *attr, GFP_KERNEL);
  716. if (!attr)
  717. return -ENOMEM;
  718. down(&ib_uverbs_idr_mutex);
  719. qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
  720. if (!qp || qp->uobject->context != file->ucontext) {
  721. ret = -EINVAL;
  722. goto out;
  723. }
  724. attr->qp_state = cmd.qp_state;
  725. attr->cur_qp_state = cmd.cur_qp_state;
  726. attr->path_mtu = cmd.path_mtu;
  727. attr->path_mig_state = cmd.path_mig_state;
  728. attr->qkey = cmd.qkey;
  729. attr->rq_psn = cmd.rq_psn;
  730. attr->sq_psn = cmd.sq_psn;
  731. attr->dest_qp_num = cmd.dest_qp_num;
  732. attr->qp_access_flags = cmd.qp_access_flags;
  733. attr->pkey_index = cmd.pkey_index;
  734. attr->alt_pkey_index = cmd.pkey_index;
  735. attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
  736. attr->max_rd_atomic = cmd.max_rd_atomic;
  737. attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
  738. attr->min_rnr_timer = cmd.min_rnr_timer;
  739. attr->port_num = cmd.port_num;
  740. attr->timeout = cmd.timeout;
  741. attr->retry_cnt = cmd.retry_cnt;
  742. attr->rnr_retry = cmd.rnr_retry;
  743. attr->alt_port_num = cmd.alt_port_num;
  744. attr->alt_timeout = cmd.alt_timeout;
  745. memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
  746. attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
  747. attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
  748. attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
  749. attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
  750. attr->ah_attr.dlid = cmd.dest.dlid;
  751. attr->ah_attr.sl = cmd.dest.sl;
  752. attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
  753. attr->ah_attr.static_rate = cmd.dest.static_rate;
  754. attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
  755. attr->ah_attr.port_num = cmd.dest.port_num;
  756. memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
  757. attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
  758. attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
  759. attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
  760. attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
  761. attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
  762. attr->alt_ah_attr.sl = cmd.alt_dest.sl;
  763. attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
  764. attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
  765. attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
  766. attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
  767. ret = ib_modify_qp(qp, attr, cmd.attr_mask);
  768. if (ret)
  769. goto out;
  770. ret = in_len;
  771. out:
  772. up(&ib_uverbs_idr_mutex);
  773. kfree(attr);
  774. return ret;
  775. }
  776. ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
  777. const char __user *buf, int in_len,
  778. int out_len)
  779. {
  780. struct ib_uverbs_destroy_qp cmd;
  781. struct ib_uverbs_destroy_qp_resp resp;
  782. struct ib_qp *qp;
  783. struct ib_uevent_object *uobj;
  784. struct ib_uverbs_event *evt, *tmp;
  785. int ret = -EINVAL;
  786. if (copy_from_user(&cmd, buf, sizeof cmd))
  787. return -EFAULT;
  788. memset(&resp, 0, sizeof resp);
  789. down(&ib_uverbs_idr_mutex);
  790. qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
  791. if (!qp || qp->uobject->context != file->ucontext)
  792. goto out;
  793. uobj = container_of(qp->uobject, struct ib_uevent_object, uobject);
  794. ret = ib_destroy_qp(qp);
  795. if (ret)
  796. goto out;
  797. idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle);
  798. down(&file->mutex);
  799. list_del(&uobj->uobject.list);
  800. up(&file->mutex);
  801. spin_lock_irq(&file->async_file.lock);
  802. list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
  803. list_del(&evt->list);
  804. kfree(evt);
  805. }
  806. spin_unlock_irq(&file->async_file.lock);
  807. resp.events_reported = uobj->events_reported;
  808. kfree(uobj);
  809. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  810. &resp, sizeof resp))
  811. ret = -EFAULT;
  812. out:
  813. up(&ib_uverbs_idr_mutex);
  814. return ret ? ret : in_len;
  815. }
  816. ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
  817. const char __user *buf, int in_len,
  818. int out_len)
  819. {
  820. struct ib_uverbs_attach_mcast cmd;
  821. struct ib_qp *qp;
  822. int ret = -EINVAL;
  823. if (copy_from_user(&cmd, buf, sizeof cmd))
  824. return -EFAULT;
  825. down(&ib_uverbs_idr_mutex);
  826. qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
  827. if (qp && qp->uobject->context == file->ucontext)
  828. ret = ib_attach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
  829. up(&ib_uverbs_idr_mutex);
  830. return ret ? ret : in_len;
  831. }
  832. ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
  833. const char __user *buf, int in_len,
  834. int out_len)
  835. {
  836. struct ib_uverbs_detach_mcast cmd;
  837. struct ib_qp *qp;
  838. int ret = -EINVAL;
  839. if (copy_from_user(&cmd, buf, sizeof cmd))
  840. return -EFAULT;
  841. down(&ib_uverbs_idr_mutex);
  842. qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
  843. if (qp && qp->uobject->context == file->ucontext)
  844. ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
  845. up(&ib_uverbs_idr_mutex);
  846. return ret ? ret : in_len;
  847. }
  848. ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
  849. const char __user *buf, int in_len,
  850. int out_len)
  851. {
  852. struct ib_uverbs_create_srq cmd;
  853. struct ib_uverbs_create_srq_resp resp;
  854. struct ib_udata udata;
  855. struct ib_uevent_object *uobj;
  856. struct ib_pd *pd;
  857. struct ib_srq *srq;
  858. struct ib_srq_init_attr attr;
  859. int ret;
  860. if (out_len < sizeof resp)
  861. return -ENOSPC;
  862. if (copy_from_user(&cmd, buf, sizeof cmd))
  863. return -EFAULT;
  864. INIT_UDATA(&udata, buf + sizeof cmd,
  865. (unsigned long) cmd.response + sizeof resp,
  866. in_len - sizeof cmd, out_len - sizeof resp);
  867. uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
  868. if (!uobj)
  869. return -ENOMEM;
  870. down(&ib_uverbs_idr_mutex);
  871. pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
  872. if (!pd || pd->uobject->context != file->ucontext) {
  873. ret = -EINVAL;
  874. goto err_up;
  875. }
  876. attr.event_handler = ib_uverbs_srq_event_handler;
  877. attr.srq_context = file;
  878. attr.attr.max_wr = cmd.max_wr;
  879. attr.attr.max_sge = cmd.max_sge;
  880. attr.attr.srq_limit = cmd.srq_limit;
  881. uobj->uobject.user_handle = cmd.user_handle;
  882. uobj->uobject.context = file->ucontext;
  883. uobj->events_reported = 0;
  884. INIT_LIST_HEAD(&uobj->event_list);
  885. srq = pd->device->create_srq(pd, &attr, &udata);
  886. if (IS_ERR(srq)) {
  887. ret = PTR_ERR(srq);
  888. goto err_up;
  889. }
  890. srq->device = pd->device;
  891. srq->pd = pd;
  892. srq->uobject = &uobj->uobject;
  893. srq->event_handler = attr.event_handler;
  894. srq->srq_context = attr.srq_context;
  895. atomic_inc(&pd->usecnt);
  896. atomic_set(&srq->usecnt, 0);
  897. memset(&resp, 0, sizeof resp);
  898. retry:
  899. if (!idr_pre_get(&ib_uverbs_srq_idr, GFP_KERNEL)) {
  900. ret = -ENOMEM;
  901. goto err_destroy;
  902. }
  903. ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->uobject.id);
  904. if (ret == -EAGAIN)
  905. goto retry;
  906. if (ret)
  907. goto err_destroy;
  908. resp.srq_handle = uobj->uobject.id;
  909. down(&file->mutex);
  910. list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list);
  911. up(&file->mutex);
  912. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  913. &resp, sizeof resp)) {
  914. ret = -EFAULT;
  915. goto err_list;
  916. }
  917. up(&ib_uverbs_idr_mutex);
  918. return in_len;
  919. err_list:
  920. down(&file->mutex);
  921. list_del(&uobj->uobject.list);
  922. up(&file->mutex);
  923. err_destroy:
  924. ib_destroy_srq(srq);
  925. err_up:
  926. up(&ib_uverbs_idr_mutex);
  927. kfree(uobj);
  928. return ret;
  929. }
  930. ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
  931. const char __user *buf, int in_len,
  932. int out_len)
  933. {
  934. struct ib_uverbs_modify_srq cmd;
  935. struct ib_srq *srq;
  936. struct ib_srq_attr attr;
  937. int ret;
  938. if (copy_from_user(&cmd, buf, sizeof cmd))
  939. return -EFAULT;
  940. down(&ib_uverbs_idr_mutex);
  941. srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
  942. if (!srq || srq->uobject->context != file->ucontext) {
  943. ret = -EINVAL;
  944. goto out;
  945. }
  946. attr.max_wr = cmd.max_wr;
  947. attr.max_sge = cmd.max_sge;
  948. attr.srq_limit = cmd.srq_limit;
  949. ret = ib_modify_srq(srq, &attr, cmd.attr_mask);
  950. out:
  951. up(&ib_uverbs_idr_mutex);
  952. return ret ? ret : in_len;
  953. }
  954. ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
  955. const char __user *buf, int in_len,
  956. int out_len)
  957. {
  958. struct ib_uverbs_destroy_srq cmd;
  959. struct ib_uverbs_destroy_srq_resp resp;
  960. struct ib_srq *srq;
  961. struct ib_uevent_object *uobj;
  962. struct ib_uverbs_event *evt, *tmp;
  963. int ret = -EINVAL;
  964. if (copy_from_user(&cmd, buf, sizeof cmd))
  965. return -EFAULT;
  966. down(&ib_uverbs_idr_mutex);
  967. memset(&resp, 0, sizeof resp);
  968. srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
  969. if (!srq || srq->uobject->context != file->ucontext)
  970. goto out;
  971. uobj = container_of(srq->uobject, struct ib_uevent_object, uobject);
  972. ret = ib_destroy_srq(srq);
  973. if (ret)
  974. goto out;
  975. idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle);
  976. down(&file->mutex);
  977. list_del(&uobj->uobject.list);
  978. up(&file->mutex);
  979. spin_lock_irq(&file->async_file.lock);
  980. list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
  981. list_del(&evt->list);
  982. kfree(evt);
  983. }
  984. spin_unlock_irq(&file->async_file.lock);
  985. resp.events_reported = uobj->events_reported;
  986. kfree(uobj);
  987. if (copy_to_user((void __user *) (unsigned long) cmd.response,
  988. &resp, sizeof resp))
  989. ret = -EFAULT;
  990. out:
  991. up(&ib_uverbs_idr_mutex);
  992. return ret ? ret : in_len;
  993. }