verbs.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788
  1. /*
  2. * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
  4. * Copyright (c) 2004 Intel Corporation. All rights reserved.
  5. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  6. * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
  7. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  8. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
  9. *
  10. * This software is available to you under a choice of one of two
  11. * licenses. You may choose to be licensed under the terms of the GNU
  12. * General Public License (GPL) Version 2, available from the file
  13. * COPYING in the main directory of this source tree, or the
  14. * OpenIB.org BSD license below:
  15. *
  16. * Redistribution and use in source and binary forms, with or
  17. * without modification, are permitted provided that the following
  18. * conditions are met:
  19. *
  20. * - Redistributions of source code must retain the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer.
  23. *
  24. * - Redistributions in binary form must reproduce the above
  25. * copyright notice, this list of conditions and the following
  26. * disclaimer in the documentation and/or other materials
  27. * provided with the distribution.
  28. *
  29. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36. * SOFTWARE.
  37. *
  38. * $Id: verbs.c 1349 2004-12-16 21:09:43Z roland $
  39. */
  40. #include <linux/errno.h>
  41. #include <linux/err.h>
  42. #include <linux/string.h>
  43. #include <rdma/ib_verbs.h>
  44. #include <rdma/ib_cache.h>
  45. /* Protection domains */
  46. struct ib_pd *ib_alloc_pd(struct ib_device *device)
  47. {
  48. struct ib_pd *pd;
  49. pd = device->alloc_pd(device, NULL, NULL);
  50. if (!IS_ERR(pd)) {
  51. pd->device = device;
  52. pd->uobject = NULL;
  53. atomic_set(&pd->usecnt, 0);
  54. }
  55. return pd;
  56. }
  57. EXPORT_SYMBOL(ib_alloc_pd);
  58. int ib_dealloc_pd(struct ib_pd *pd)
  59. {
  60. if (atomic_read(&pd->usecnt))
  61. return -EBUSY;
  62. return pd->device->dealloc_pd(pd);
  63. }
  64. EXPORT_SYMBOL(ib_dealloc_pd);
  65. /* Address handles */
  66. struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
  67. {
  68. struct ib_ah *ah;
  69. ah = pd->device->create_ah(pd, ah_attr);
  70. if (!IS_ERR(ah)) {
  71. ah->device = pd->device;
  72. ah->pd = pd;
  73. ah->uobject = NULL;
  74. atomic_inc(&pd->usecnt);
  75. }
  76. return ah;
  77. }
  78. EXPORT_SYMBOL(ib_create_ah);
  79. struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
  80. struct ib_grh *grh, u8 port_num)
  81. {
  82. struct ib_ah_attr ah_attr;
  83. u32 flow_class;
  84. u16 gid_index;
  85. int ret;
  86. memset(&ah_attr, 0, sizeof ah_attr);
  87. ah_attr.dlid = wc->slid;
  88. ah_attr.sl = wc->sl;
  89. ah_attr.src_path_bits = wc->dlid_path_bits;
  90. ah_attr.port_num = port_num;
  91. if (wc->wc_flags & IB_WC_GRH) {
  92. ah_attr.ah_flags = IB_AH_GRH;
  93. ah_attr.grh.dgid = grh->sgid;
  94. ret = ib_find_cached_gid(pd->device, &grh->dgid, &port_num,
  95. &gid_index);
  96. if (ret)
  97. return ERR_PTR(ret);
  98. ah_attr.grh.sgid_index = (u8) gid_index;
  99. flow_class = be32_to_cpu(grh->version_tclass_flow);
  100. ah_attr.grh.flow_label = flow_class & 0xFFFFF;
  101. ah_attr.grh.traffic_class = (flow_class >> 20) & 0xFF;
  102. ah_attr.grh.hop_limit = grh->hop_limit;
  103. }
  104. return ib_create_ah(pd, &ah_attr);
  105. }
  106. EXPORT_SYMBOL(ib_create_ah_from_wc);
  107. int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
  108. {
  109. return ah->device->modify_ah ?
  110. ah->device->modify_ah(ah, ah_attr) :
  111. -ENOSYS;
  112. }
  113. EXPORT_SYMBOL(ib_modify_ah);
  114. int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
  115. {
  116. return ah->device->query_ah ?
  117. ah->device->query_ah(ah, ah_attr) :
  118. -ENOSYS;
  119. }
  120. EXPORT_SYMBOL(ib_query_ah);
  121. int ib_destroy_ah(struct ib_ah *ah)
  122. {
  123. struct ib_pd *pd;
  124. int ret;
  125. pd = ah->pd;
  126. ret = ah->device->destroy_ah(ah);
  127. if (!ret)
  128. atomic_dec(&pd->usecnt);
  129. return ret;
  130. }
  131. EXPORT_SYMBOL(ib_destroy_ah);
  132. /* Shared receive queues */
  133. struct ib_srq *ib_create_srq(struct ib_pd *pd,
  134. struct ib_srq_init_attr *srq_init_attr)
  135. {
  136. struct ib_srq *srq;
  137. if (!pd->device->create_srq)
  138. return ERR_PTR(-ENOSYS);
  139. srq = pd->device->create_srq(pd, srq_init_attr, NULL);
  140. if (!IS_ERR(srq)) {
  141. srq->device = pd->device;
  142. srq->pd = pd;
  143. srq->uobject = NULL;
  144. srq->event_handler = srq_init_attr->event_handler;
  145. srq->srq_context = srq_init_attr->srq_context;
  146. atomic_inc(&pd->usecnt);
  147. atomic_set(&srq->usecnt, 0);
  148. }
  149. return srq;
  150. }
  151. EXPORT_SYMBOL(ib_create_srq);
  152. int ib_modify_srq(struct ib_srq *srq,
  153. struct ib_srq_attr *srq_attr,
  154. enum ib_srq_attr_mask srq_attr_mask)
  155. {
  156. return srq->device->modify_srq(srq, srq_attr, srq_attr_mask);
  157. }
  158. EXPORT_SYMBOL(ib_modify_srq);
  159. int ib_query_srq(struct ib_srq *srq,
  160. struct ib_srq_attr *srq_attr)
  161. {
  162. return srq->device->query_srq ?
  163. srq->device->query_srq(srq, srq_attr) : -ENOSYS;
  164. }
  165. EXPORT_SYMBOL(ib_query_srq);
  166. int ib_destroy_srq(struct ib_srq *srq)
  167. {
  168. struct ib_pd *pd;
  169. int ret;
  170. if (atomic_read(&srq->usecnt))
  171. return -EBUSY;
  172. pd = srq->pd;
  173. ret = srq->device->destroy_srq(srq);
  174. if (!ret)
  175. atomic_dec(&pd->usecnt);
  176. return ret;
  177. }
  178. EXPORT_SYMBOL(ib_destroy_srq);
  179. /* Queue pairs */
  180. struct ib_qp *ib_create_qp(struct ib_pd *pd,
  181. struct ib_qp_init_attr *qp_init_attr)
  182. {
  183. struct ib_qp *qp;
  184. qp = pd->device->create_qp(pd, qp_init_attr, NULL);
  185. if (!IS_ERR(qp)) {
  186. qp->device = pd->device;
  187. qp->pd = pd;
  188. qp->send_cq = qp_init_attr->send_cq;
  189. qp->recv_cq = qp_init_attr->recv_cq;
  190. qp->srq = qp_init_attr->srq;
  191. qp->uobject = NULL;
  192. qp->event_handler = qp_init_attr->event_handler;
  193. qp->qp_context = qp_init_attr->qp_context;
  194. qp->qp_type = qp_init_attr->qp_type;
  195. atomic_inc(&pd->usecnt);
  196. atomic_inc(&qp_init_attr->send_cq->usecnt);
  197. atomic_inc(&qp_init_attr->recv_cq->usecnt);
  198. if (qp_init_attr->srq)
  199. atomic_inc(&qp_init_attr->srq->usecnt);
  200. }
  201. return qp;
  202. }
  203. EXPORT_SYMBOL(ib_create_qp);
  204. static const struct {
  205. int valid;
  206. enum ib_qp_attr_mask req_param[IB_QPT_RAW_ETY + 1];
  207. enum ib_qp_attr_mask opt_param[IB_QPT_RAW_ETY + 1];
  208. } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
  209. [IB_QPS_RESET] = {
  210. [IB_QPS_RESET] = { .valid = 1 },
  211. [IB_QPS_ERR] = { .valid = 1 },
  212. [IB_QPS_INIT] = {
  213. .valid = 1,
  214. .req_param = {
  215. [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
  216. IB_QP_PORT |
  217. IB_QP_QKEY),
  218. [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
  219. IB_QP_PORT |
  220. IB_QP_ACCESS_FLAGS),
  221. [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
  222. IB_QP_PORT |
  223. IB_QP_ACCESS_FLAGS),
  224. [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
  225. IB_QP_QKEY),
  226. [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
  227. IB_QP_QKEY),
  228. }
  229. },
  230. },
  231. [IB_QPS_INIT] = {
  232. [IB_QPS_RESET] = { .valid = 1 },
  233. [IB_QPS_ERR] = { .valid = 1 },
  234. [IB_QPS_INIT] = {
  235. .valid = 1,
  236. .opt_param = {
  237. [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
  238. IB_QP_PORT |
  239. IB_QP_QKEY),
  240. [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
  241. IB_QP_PORT |
  242. IB_QP_ACCESS_FLAGS),
  243. [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
  244. IB_QP_PORT |
  245. IB_QP_ACCESS_FLAGS),
  246. [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
  247. IB_QP_QKEY),
  248. [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
  249. IB_QP_QKEY),
  250. }
  251. },
  252. [IB_QPS_RTR] = {
  253. .valid = 1,
  254. .req_param = {
  255. [IB_QPT_UC] = (IB_QP_AV |
  256. IB_QP_PATH_MTU |
  257. IB_QP_DEST_QPN |
  258. IB_QP_RQ_PSN),
  259. [IB_QPT_RC] = (IB_QP_AV |
  260. IB_QP_PATH_MTU |
  261. IB_QP_DEST_QPN |
  262. IB_QP_RQ_PSN |
  263. IB_QP_MAX_DEST_RD_ATOMIC |
  264. IB_QP_MIN_RNR_TIMER),
  265. },
  266. .opt_param = {
  267. [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
  268. IB_QP_QKEY),
  269. [IB_QPT_UC] = (IB_QP_ALT_PATH |
  270. IB_QP_ACCESS_FLAGS |
  271. IB_QP_PKEY_INDEX),
  272. [IB_QPT_RC] = (IB_QP_ALT_PATH |
  273. IB_QP_ACCESS_FLAGS |
  274. IB_QP_PKEY_INDEX),
  275. [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
  276. IB_QP_QKEY),
  277. [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
  278. IB_QP_QKEY),
  279. }
  280. }
  281. },
  282. [IB_QPS_RTR] = {
  283. [IB_QPS_RESET] = { .valid = 1 },
  284. [IB_QPS_ERR] = { .valid = 1 },
  285. [IB_QPS_RTS] = {
  286. .valid = 1,
  287. .req_param = {
  288. [IB_QPT_UD] = IB_QP_SQ_PSN,
  289. [IB_QPT_UC] = IB_QP_SQ_PSN,
  290. [IB_QPT_RC] = (IB_QP_TIMEOUT |
  291. IB_QP_RETRY_CNT |
  292. IB_QP_RNR_RETRY |
  293. IB_QP_SQ_PSN |
  294. IB_QP_MAX_QP_RD_ATOMIC),
  295. [IB_QPT_SMI] = IB_QP_SQ_PSN,
  296. [IB_QPT_GSI] = IB_QP_SQ_PSN,
  297. },
  298. .opt_param = {
  299. [IB_QPT_UD] = (IB_QP_CUR_STATE |
  300. IB_QP_QKEY),
  301. [IB_QPT_UC] = (IB_QP_CUR_STATE |
  302. IB_QP_ALT_PATH |
  303. IB_QP_ACCESS_FLAGS |
  304. IB_QP_PATH_MIG_STATE),
  305. [IB_QPT_RC] = (IB_QP_CUR_STATE |
  306. IB_QP_ALT_PATH |
  307. IB_QP_ACCESS_FLAGS |
  308. IB_QP_MIN_RNR_TIMER |
  309. IB_QP_PATH_MIG_STATE),
  310. [IB_QPT_SMI] = (IB_QP_CUR_STATE |
  311. IB_QP_QKEY),
  312. [IB_QPT_GSI] = (IB_QP_CUR_STATE |
  313. IB_QP_QKEY),
  314. }
  315. }
  316. },
  317. [IB_QPS_RTS] = {
  318. [IB_QPS_RESET] = { .valid = 1 },
  319. [IB_QPS_ERR] = { .valid = 1 },
  320. [IB_QPS_RTS] = {
  321. .valid = 1,
  322. .opt_param = {
  323. [IB_QPT_UD] = (IB_QP_CUR_STATE |
  324. IB_QP_QKEY),
  325. [IB_QPT_UC] = (IB_QP_CUR_STATE |
  326. IB_QP_ACCESS_FLAGS |
  327. IB_QP_ALT_PATH |
  328. IB_QP_PATH_MIG_STATE),
  329. [IB_QPT_RC] = (IB_QP_CUR_STATE |
  330. IB_QP_ACCESS_FLAGS |
  331. IB_QP_ALT_PATH |
  332. IB_QP_PATH_MIG_STATE |
  333. IB_QP_MIN_RNR_TIMER),
  334. [IB_QPT_SMI] = (IB_QP_CUR_STATE |
  335. IB_QP_QKEY),
  336. [IB_QPT_GSI] = (IB_QP_CUR_STATE |
  337. IB_QP_QKEY),
  338. }
  339. },
  340. [IB_QPS_SQD] = {
  341. .valid = 1,
  342. .opt_param = {
  343. [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  344. [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  345. [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  346. [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  347. [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
  348. }
  349. },
  350. },
  351. [IB_QPS_SQD] = {
  352. [IB_QPS_RESET] = { .valid = 1 },
  353. [IB_QPS_ERR] = { .valid = 1 },
  354. [IB_QPS_RTS] = {
  355. .valid = 1,
  356. .opt_param = {
  357. [IB_QPT_UD] = (IB_QP_CUR_STATE |
  358. IB_QP_QKEY),
  359. [IB_QPT_UC] = (IB_QP_CUR_STATE |
  360. IB_QP_ALT_PATH |
  361. IB_QP_ACCESS_FLAGS |
  362. IB_QP_PATH_MIG_STATE),
  363. [IB_QPT_RC] = (IB_QP_CUR_STATE |
  364. IB_QP_ALT_PATH |
  365. IB_QP_ACCESS_FLAGS |
  366. IB_QP_MIN_RNR_TIMER |
  367. IB_QP_PATH_MIG_STATE),
  368. [IB_QPT_SMI] = (IB_QP_CUR_STATE |
  369. IB_QP_QKEY),
  370. [IB_QPT_GSI] = (IB_QP_CUR_STATE |
  371. IB_QP_QKEY),
  372. }
  373. },
  374. [IB_QPS_SQD] = {
  375. .valid = 1,
  376. .opt_param = {
  377. [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
  378. IB_QP_QKEY),
  379. [IB_QPT_UC] = (IB_QP_AV |
  380. IB_QP_ALT_PATH |
  381. IB_QP_ACCESS_FLAGS |
  382. IB_QP_PKEY_INDEX |
  383. IB_QP_PATH_MIG_STATE),
  384. [IB_QPT_RC] = (IB_QP_PORT |
  385. IB_QP_AV |
  386. IB_QP_TIMEOUT |
  387. IB_QP_RETRY_CNT |
  388. IB_QP_RNR_RETRY |
  389. IB_QP_MAX_QP_RD_ATOMIC |
  390. IB_QP_MAX_DEST_RD_ATOMIC |
  391. IB_QP_ALT_PATH |
  392. IB_QP_ACCESS_FLAGS |
  393. IB_QP_PKEY_INDEX |
  394. IB_QP_MIN_RNR_TIMER |
  395. IB_QP_PATH_MIG_STATE),
  396. [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
  397. IB_QP_QKEY),
  398. [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
  399. IB_QP_QKEY),
  400. }
  401. }
  402. },
  403. [IB_QPS_SQE] = {
  404. [IB_QPS_RESET] = { .valid = 1 },
  405. [IB_QPS_ERR] = { .valid = 1 },
  406. [IB_QPS_RTS] = {
  407. .valid = 1,
  408. .opt_param = {
  409. [IB_QPT_UD] = (IB_QP_CUR_STATE |
  410. IB_QP_QKEY),
  411. [IB_QPT_UC] = (IB_QP_CUR_STATE |
  412. IB_QP_ACCESS_FLAGS),
  413. [IB_QPT_SMI] = (IB_QP_CUR_STATE |
  414. IB_QP_QKEY),
  415. [IB_QPT_GSI] = (IB_QP_CUR_STATE |
  416. IB_QP_QKEY),
  417. }
  418. }
  419. },
  420. [IB_QPS_ERR] = {
  421. [IB_QPS_RESET] = { .valid = 1 },
  422. [IB_QPS_ERR] = { .valid = 1 }
  423. }
  424. };
  425. int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
  426. enum ib_qp_type type, enum ib_qp_attr_mask mask)
  427. {
  428. enum ib_qp_attr_mask req_param, opt_param;
  429. if (cur_state < 0 || cur_state > IB_QPS_ERR ||
  430. next_state < 0 || next_state > IB_QPS_ERR)
  431. return 0;
  432. if (mask & IB_QP_CUR_STATE &&
  433. cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
  434. cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
  435. return 0;
  436. if (!qp_state_table[cur_state][next_state].valid)
  437. return 0;
  438. req_param = qp_state_table[cur_state][next_state].req_param[type];
  439. opt_param = qp_state_table[cur_state][next_state].opt_param[type];
  440. if ((mask & req_param) != req_param)
  441. return 0;
  442. if (mask & ~(req_param | opt_param | IB_QP_STATE))
  443. return 0;
  444. return 1;
  445. }
  446. EXPORT_SYMBOL(ib_modify_qp_is_ok);
  447. int ib_modify_qp(struct ib_qp *qp,
  448. struct ib_qp_attr *qp_attr,
  449. int qp_attr_mask)
  450. {
  451. return qp->device->modify_qp(qp, qp_attr, qp_attr_mask);
  452. }
  453. EXPORT_SYMBOL(ib_modify_qp);
  454. int ib_query_qp(struct ib_qp *qp,
  455. struct ib_qp_attr *qp_attr,
  456. int qp_attr_mask,
  457. struct ib_qp_init_attr *qp_init_attr)
  458. {
  459. return qp->device->query_qp ?
  460. qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
  461. -ENOSYS;
  462. }
  463. EXPORT_SYMBOL(ib_query_qp);
  464. int ib_destroy_qp(struct ib_qp *qp)
  465. {
  466. struct ib_pd *pd;
  467. struct ib_cq *scq, *rcq;
  468. struct ib_srq *srq;
  469. int ret;
  470. pd = qp->pd;
  471. scq = qp->send_cq;
  472. rcq = qp->recv_cq;
  473. srq = qp->srq;
  474. ret = qp->device->destroy_qp(qp);
  475. if (!ret) {
  476. atomic_dec(&pd->usecnt);
  477. atomic_dec(&scq->usecnt);
  478. atomic_dec(&rcq->usecnt);
  479. if (srq)
  480. atomic_dec(&srq->usecnt);
  481. }
  482. return ret;
  483. }
  484. EXPORT_SYMBOL(ib_destroy_qp);
  485. /* Completion queues */
  486. struct ib_cq *ib_create_cq(struct ib_device *device,
  487. ib_comp_handler comp_handler,
  488. void (*event_handler)(struct ib_event *, void *),
  489. void *cq_context, int cqe)
  490. {
  491. struct ib_cq *cq;
  492. cq = device->create_cq(device, cqe, NULL, NULL);
  493. if (!IS_ERR(cq)) {
  494. cq->device = device;
  495. cq->uobject = NULL;
  496. cq->comp_handler = comp_handler;
  497. cq->event_handler = event_handler;
  498. cq->cq_context = cq_context;
  499. atomic_set(&cq->usecnt, 0);
  500. }
  501. return cq;
  502. }
  503. EXPORT_SYMBOL(ib_create_cq);
  504. int ib_destroy_cq(struct ib_cq *cq)
  505. {
  506. if (atomic_read(&cq->usecnt))
  507. return -EBUSY;
  508. return cq->device->destroy_cq(cq);
  509. }
  510. EXPORT_SYMBOL(ib_destroy_cq);
  511. int ib_resize_cq(struct ib_cq *cq, int cqe)
  512. {
  513. return cq->device->resize_cq ?
  514. cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
  515. }
  516. EXPORT_SYMBOL(ib_resize_cq);
  517. /* Memory regions */
  518. struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
  519. {
  520. struct ib_mr *mr;
  521. mr = pd->device->get_dma_mr(pd, mr_access_flags);
  522. if (!IS_ERR(mr)) {
  523. mr->device = pd->device;
  524. mr->pd = pd;
  525. mr->uobject = NULL;
  526. atomic_inc(&pd->usecnt);
  527. atomic_set(&mr->usecnt, 0);
  528. }
  529. return mr;
  530. }
  531. EXPORT_SYMBOL(ib_get_dma_mr);
  532. struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
  533. struct ib_phys_buf *phys_buf_array,
  534. int num_phys_buf,
  535. int mr_access_flags,
  536. u64 *iova_start)
  537. {
  538. struct ib_mr *mr;
  539. mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
  540. mr_access_flags, iova_start);
  541. if (!IS_ERR(mr)) {
  542. mr->device = pd->device;
  543. mr->pd = pd;
  544. mr->uobject = NULL;
  545. atomic_inc(&pd->usecnt);
  546. atomic_set(&mr->usecnt, 0);
  547. }
  548. return mr;
  549. }
  550. EXPORT_SYMBOL(ib_reg_phys_mr);
  551. int ib_rereg_phys_mr(struct ib_mr *mr,
  552. int mr_rereg_mask,
  553. struct ib_pd *pd,
  554. struct ib_phys_buf *phys_buf_array,
  555. int num_phys_buf,
  556. int mr_access_flags,
  557. u64 *iova_start)
  558. {
  559. struct ib_pd *old_pd;
  560. int ret;
  561. if (!mr->device->rereg_phys_mr)
  562. return -ENOSYS;
  563. if (atomic_read(&mr->usecnt))
  564. return -EBUSY;
  565. old_pd = mr->pd;
  566. ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
  567. phys_buf_array, num_phys_buf,
  568. mr_access_flags, iova_start);
  569. if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
  570. atomic_dec(&old_pd->usecnt);
  571. atomic_inc(&pd->usecnt);
  572. }
  573. return ret;
  574. }
  575. EXPORT_SYMBOL(ib_rereg_phys_mr);
  576. int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
  577. {
  578. return mr->device->query_mr ?
  579. mr->device->query_mr(mr, mr_attr) : -ENOSYS;
  580. }
  581. EXPORT_SYMBOL(ib_query_mr);
  582. int ib_dereg_mr(struct ib_mr *mr)
  583. {
  584. struct ib_pd *pd;
  585. int ret;
  586. if (atomic_read(&mr->usecnt))
  587. return -EBUSY;
  588. pd = mr->pd;
  589. ret = mr->device->dereg_mr(mr);
  590. if (!ret)
  591. atomic_dec(&pd->usecnt);
  592. return ret;
  593. }
  594. EXPORT_SYMBOL(ib_dereg_mr);
  595. /* Memory windows */
  596. struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
  597. {
  598. struct ib_mw *mw;
  599. if (!pd->device->alloc_mw)
  600. return ERR_PTR(-ENOSYS);
  601. mw = pd->device->alloc_mw(pd);
  602. if (!IS_ERR(mw)) {
  603. mw->device = pd->device;
  604. mw->pd = pd;
  605. mw->uobject = NULL;
  606. atomic_inc(&pd->usecnt);
  607. }
  608. return mw;
  609. }
  610. EXPORT_SYMBOL(ib_alloc_mw);
  611. int ib_dealloc_mw(struct ib_mw *mw)
  612. {
  613. struct ib_pd *pd;
  614. int ret;
  615. pd = mw->pd;
  616. ret = mw->device->dealloc_mw(mw);
  617. if (!ret)
  618. atomic_dec(&pd->usecnt);
  619. return ret;
  620. }
  621. EXPORT_SYMBOL(ib_dealloc_mw);
  622. /* "Fast" memory regions */
  623. struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
  624. int mr_access_flags,
  625. struct ib_fmr_attr *fmr_attr)
  626. {
  627. struct ib_fmr *fmr;
  628. if (!pd->device->alloc_fmr)
  629. return ERR_PTR(-ENOSYS);
  630. fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
  631. if (!IS_ERR(fmr)) {
  632. fmr->device = pd->device;
  633. fmr->pd = pd;
  634. atomic_inc(&pd->usecnt);
  635. }
  636. return fmr;
  637. }
  638. EXPORT_SYMBOL(ib_alloc_fmr);
  639. int ib_unmap_fmr(struct list_head *fmr_list)
  640. {
  641. struct ib_fmr *fmr;
  642. if (list_empty(fmr_list))
  643. return 0;
  644. fmr = list_entry(fmr_list->next, struct ib_fmr, list);
  645. return fmr->device->unmap_fmr(fmr_list);
  646. }
  647. EXPORT_SYMBOL(ib_unmap_fmr);
  648. int ib_dealloc_fmr(struct ib_fmr *fmr)
  649. {
  650. struct ib_pd *pd;
  651. int ret;
  652. pd = fmr->pd;
  653. ret = fmr->device->dealloc_fmr(fmr);
  654. if (!ret)
  655. atomic_dec(&pd->usecnt);
  656. return ret;
  657. }
  658. EXPORT_SYMBOL(ib_dealloc_fmr);
  659. /* Multicast groups */
  660. int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
  661. {
  662. if (!qp->device->attach_mcast)
  663. return -ENOSYS;
  664. if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
  665. return -EINVAL;
  666. return qp->device->attach_mcast(qp, gid, lid);
  667. }
  668. EXPORT_SYMBOL(ib_attach_mcast);
  669. int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
  670. {
  671. if (!qp->device->detach_mcast)
  672. return -ENOSYS;
  673. if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
  674. return -EINVAL;
  675. return qp->device->detach_mcast(qp, gid, lid);
  676. }
  677. EXPORT_SYMBOL(ib_detach_mcast);