verbs.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834
  1. /*
  2. * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
  4. * Copyright (c) 2004 Intel Corporation. All rights reserved.
  5. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  6. * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
  7. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  8. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
  9. *
  10. * This software is available to you under a choice of one of two
  11. * licenses. You may choose to be licensed under the terms of the GNU
  12. * General Public License (GPL) Version 2, available from the file
  13. * COPYING in the main directory of this source tree, or the
  14. * OpenIB.org BSD license below:
  15. *
  16. * Redistribution and use in source and binary forms, with or
  17. * without modification, are permitted provided that the following
  18. * conditions are met:
  19. *
  20. * - Redistributions of source code must retain the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer.
  23. *
  24. * - Redistributions in binary form must reproduce the above
  25. * copyright notice, this list of conditions and the following
  26. * disclaimer in the documentation and/or other materials
  27. * provided with the distribution.
  28. *
  29. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36. * SOFTWARE.
  37. *
  38. * $Id: verbs.c 1349 2004-12-16 21:09:43Z roland $
  39. */
  40. #include <linux/errno.h>
  41. #include <linux/err.h>
  42. #include <linux/string.h>
  43. #include <rdma/ib_verbs.h>
  44. #include <rdma/ib_cache.h>
  45. int ib_rate_to_mult(enum ib_rate rate)
  46. {
  47. switch (rate) {
  48. case IB_RATE_2_5_GBPS: return 1;
  49. case IB_RATE_5_GBPS: return 2;
  50. case IB_RATE_10_GBPS: return 4;
  51. case IB_RATE_20_GBPS: return 8;
  52. case IB_RATE_30_GBPS: return 12;
  53. case IB_RATE_40_GBPS: return 16;
  54. case IB_RATE_60_GBPS: return 24;
  55. case IB_RATE_80_GBPS: return 32;
  56. case IB_RATE_120_GBPS: return 48;
  57. default: return -1;
  58. }
  59. }
  60. EXPORT_SYMBOL(ib_rate_to_mult);
  61. enum ib_rate mult_to_ib_rate(int mult)
  62. {
  63. switch (mult) {
  64. case 1: return IB_RATE_2_5_GBPS;
  65. case 2: return IB_RATE_5_GBPS;
  66. case 4: return IB_RATE_10_GBPS;
  67. case 8: return IB_RATE_20_GBPS;
  68. case 12: return IB_RATE_30_GBPS;
  69. case 16: return IB_RATE_40_GBPS;
  70. case 24: return IB_RATE_60_GBPS;
  71. case 32: return IB_RATE_80_GBPS;
  72. case 48: return IB_RATE_120_GBPS;
  73. default: return IB_RATE_PORT_CURRENT;
  74. }
  75. }
  76. EXPORT_SYMBOL(mult_to_ib_rate);
  77. /* Protection domains */
  78. struct ib_pd *ib_alloc_pd(struct ib_device *device)
  79. {
  80. struct ib_pd *pd;
  81. pd = device->alloc_pd(device, NULL, NULL);
  82. if (!IS_ERR(pd)) {
  83. pd->device = device;
  84. pd->uobject = NULL;
  85. atomic_set(&pd->usecnt, 0);
  86. }
  87. return pd;
  88. }
  89. EXPORT_SYMBOL(ib_alloc_pd);
  90. int ib_dealloc_pd(struct ib_pd *pd)
  91. {
  92. if (atomic_read(&pd->usecnt))
  93. return -EBUSY;
  94. return pd->device->dealloc_pd(pd);
  95. }
  96. EXPORT_SYMBOL(ib_dealloc_pd);
  97. /* Address handles */
  98. struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
  99. {
  100. struct ib_ah *ah;
  101. ah = pd->device->create_ah(pd, ah_attr);
  102. if (!IS_ERR(ah)) {
  103. ah->device = pd->device;
  104. ah->pd = pd;
  105. ah->uobject = NULL;
  106. atomic_inc(&pd->usecnt);
  107. }
  108. return ah;
  109. }
  110. EXPORT_SYMBOL(ib_create_ah);
  111. int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
  112. struct ib_grh *grh, struct ib_ah_attr *ah_attr)
  113. {
  114. u32 flow_class;
  115. u16 gid_index;
  116. int ret;
  117. memset(ah_attr, 0, sizeof *ah_attr);
  118. ah_attr->dlid = wc->slid;
  119. ah_attr->sl = wc->sl;
  120. ah_attr->src_path_bits = wc->dlid_path_bits;
  121. ah_attr->port_num = port_num;
  122. if (wc->wc_flags & IB_WC_GRH) {
  123. ah_attr->ah_flags = IB_AH_GRH;
  124. ah_attr->grh.dgid = grh->sgid;
  125. ret = ib_find_cached_gid(device, &grh->dgid, &port_num,
  126. &gid_index);
  127. if (ret)
  128. return ret;
  129. ah_attr->grh.sgid_index = (u8) gid_index;
  130. flow_class = be32_to_cpu(grh->version_tclass_flow);
  131. ah_attr->grh.flow_label = flow_class & 0xFFFFF;
  132. ah_attr->grh.hop_limit = grh->hop_limit;
  133. ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
  134. }
  135. return 0;
  136. }
  137. EXPORT_SYMBOL(ib_init_ah_from_wc);
  138. struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
  139. struct ib_grh *grh, u8 port_num)
  140. {
  141. struct ib_ah_attr ah_attr;
  142. int ret;
  143. ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
  144. if (ret)
  145. return ERR_PTR(ret);
  146. return ib_create_ah(pd, &ah_attr);
  147. }
  148. EXPORT_SYMBOL(ib_create_ah_from_wc);
  149. int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
  150. {
  151. return ah->device->modify_ah ?
  152. ah->device->modify_ah(ah, ah_attr) :
  153. -ENOSYS;
  154. }
  155. EXPORT_SYMBOL(ib_modify_ah);
  156. int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
  157. {
  158. return ah->device->query_ah ?
  159. ah->device->query_ah(ah, ah_attr) :
  160. -ENOSYS;
  161. }
  162. EXPORT_SYMBOL(ib_query_ah);
  163. int ib_destroy_ah(struct ib_ah *ah)
  164. {
  165. struct ib_pd *pd;
  166. int ret;
  167. pd = ah->pd;
  168. ret = ah->device->destroy_ah(ah);
  169. if (!ret)
  170. atomic_dec(&pd->usecnt);
  171. return ret;
  172. }
  173. EXPORT_SYMBOL(ib_destroy_ah);
  174. /* Shared receive queues */
  175. struct ib_srq *ib_create_srq(struct ib_pd *pd,
  176. struct ib_srq_init_attr *srq_init_attr)
  177. {
  178. struct ib_srq *srq;
  179. if (!pd->device->create_srq)
  180. return ERR_PTR(-ENOSYS);
  181. srq = pd->device->create_srq(pd, srq_init_attr, NULL);
  182. if (!IS_ERR(srq)) {
  183. srq->device = pd->device;
  184. srq->pd = pd;
  185. srq->uobject = NULL;
  186. srq->event_handler = srq_init_attr->event_handler;
  187. srq->srq_context = srq_init_attr->srq_context;
  188. atomic_inc(&pd->usecnt);
  189. atomic_set(&srq->usecnt, 0);
  190. }
  191. return srq;
  192. }
  193. EXPORT_SYMBOL(ib_create_srq);
  194. int ib_modify_srq(struct ib_srq *srq,
  195. struct ib_srq_attr *srq_attr,
  196. enum ib_srq_attr_mask srq_attr_mask)
  197. {
  198. return srq->device->modify_srq(srq, srq_attr, srq_attr_mask);
  199. }
  200. EXPORT_SYMBOL(ib_modify_srq);
  201. int ib_query_srq(struct ib_srq *srq,
  202. struct ib_srq_attr *srq_attr)
  203. {
  204. return srq->device->query_srq ?
  205. srq->device->query_srq(srq, srq_attr) : -ENOSYS;
  206. }
  207. EXPORT_SYMBOL(ib_query_srq);
  208. int ib_destroy_srq(struct ib_srq *srq)
  209. {
  210. struct ib_pd *pd;
  211. int ret;
  212. if (atomic_read(&srq->usecnt))
  213. return -EBUSY;
  214. pd = srq->pd;
  215. ret = srq->device->destroy_srq(srq);
  216. if (!ret)
  217. atomic_dec(&pd->usecnt);
  218. return ret;
  219. }
  220. EXPORT_SYMBOL(ib_destroy_srq);
  221. /* Queue pairs */
  222. struct ib_qp *ib_create_qp(struct ib_pd *pd,
  223. struct ib_qp_init_attr *qp_init_attr)
  224. {
  225. struct ib_qp *qp;
  226. qp = pd->device->create_qp(pd, qp_init_attr, NULL);
  227. if (!IS_ERR(qp)) {
  228. qp->device = pd->device;
  229. qp->pd = pd;
  230. qp->send_cq = qp_init_attr->send_cq;
  231. qp->recv_cq = qp_init_attr->recv_cq;
  232. qp->srq = qp_init_attr->srq;
  233. qp->uobject = NULL;
  234. qp->event_handler = qp_init_attr->event_handler;
  235. qp->qp_context = qp_init_attr->qp_context;
  236. qp->qp_type = qp_init_attr->qp_type;
  237. atomic_inc(&pd->usecnt);
  238. atomic_inc(&qp_init_attr->send_cq->usecnt);
  239. atomic_inc(&qp_init_attr->recv_cq->usecnt);
  240. if (qp_init_attr->srq)
  241. atomic_inc(&qp_init_attr->srq->usecnt);
  242. }
  243. return qp;
  244. }
  245. EXPORT_SYMBOL(ib_create_qp);
  246. static const struct {
  247. int valid;
  248. enum ib_qp_attr_mask req_param[IB_QPT_RAW_ETY + 1];
  249. enum ib_qp_attr_mask opt_param[IB_QPT_RAW_ETY + 1];
  250. } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
  251. [IB_QPS_RESET] = {
  252. [IB_QPS_RESET] = { .valid = 1 },
  253. [IB_QPS_ERR] = { .valid = 1 },
  254. [IB_QPS_INIT] = {
  255. .valid = 1,
  256. .req_param = {
  257. [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
  258. IB_QP_PORT |
  259. IB_QP_QKEY),
  260. [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
  261. IB_QP_PORT |
  262. IB_QP_ACCESS_FLAGS),
  263. [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
  264. IB_QP_PORT |
  265. IB_QP_ACCESS_FLAGS),
  266. [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
  267. IB_QP_QKEY),
  268. [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
  269. IB_QP_QKEY),
  270. }
  271. },
  272. },
  273. [IB_QPS_INIT] = {
  274. [IB_QPS_RESET] = { .valid = 1 },
  275. [IB_QPS_ERR] = { .valid = 1 },
  276. [IB_QPS_INIT] = {
  277. .valid = 1,
  278. .opt_param = {
  279. [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
  280. IB_QP_PORT |
  281. IB_QP_QKEY),
  282. [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
  283. IB_QP_PORT |
  284. IB_QP_ACCESS_FLAGS),
  285. [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
  286. IB_QP_PORT |
  287. IB_QP_ACCESS_FLAGS),
  288. [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
  289. IB_QP_QKEY),
  290. [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
  291. IB_QP_QKEY),
  292. }
  293. },
  294. [IB_QPS_RTR] = {
  295. .valid = 1,
  296. .req_param = {
  297. [IB_QPT_UC] = (IB_QP_AV |
  298. IB_QP_PATH_MTU |
  299. IB_QP_DEST_QPN |
  300. IB_QP_RQ_PSN),
  301. [IB_QPT_RC] = (IB_QP_AV |
  302. IB_QP_PATH_MTU |
  303. IB_QP_DEST_QPN |
  304. IB_QP_RQ_PSN |
  305. IB_QP_MAX_DEST_RD_ATOMIC |
  306. IB_QP_MIN_RNR_TIMER),
  307. },
  308. .opt_param = {
  309. [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
  310. IB_QP_QKEY),
  311. [IB_QPT_UC] = (IB_QP_ALT_PATH |
  312. IB_QP_ACCESS_FLAGS |
  313. IB_QP_PKEY_INDEX),
  314. [IB_QPT_RC] = (IB_QP_ALT_PATH |
  315. IB_QP_ACCESS_FLAGS |
  316. IB_QP_PKEY_INDEX),
  317. [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
  318. IB_QP_QKEY),
  319. [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
  320. IB_QP_QKEY),
  321. }
  322. }
  323. },
  324. [IB_QPS_RTR] = {
  325. [IB_QPS_RESET] = { .valid = 1 },
  326. [IB_QPS_ERR] = { .valid = 1 },
  327. [IB_QPS_RTS] = {
  328. .valid = 1,
  329. .req_param = {
  330. [IB_QPT_UD] = IB_QP_SQ_PSN,
  331. [IB_QPT_UC] = IB_QP_SQ_PSN,
  332. [IB_QPT_RC] = (IB_QP_TIMEOUT |
  333. IB_QP_RETRY_CNT |
  334. IB_QP_RNR_RETRY |
  335. IB_QP_SQ_PSN |
  336. IB_QP_MAX_QP_RD_ATOMIC),
  337. [IB_QPT_SMI] = IB_QP_SQ_PSN,
  338. [IB_QPT_GSI] = IB_QP_SQ_PSN,
  339. },
  340. .opt_param = {
  341. [IB_QPT_UD] = (IB_QP_CUR_STATE |
  342. IB_QP_QKEY),
  343. [IB_QPT_UC] = (IB_QP_CUR_STATE |
  344. IB_QP_ALT_PATH |
  345. IB_QP_ACCESS_FLAGS |
  346. IB_QP_PATH_MIG_STATE),
  347. [IB_QPT_RC] = (IB_QP_CUR_STATE |
  348. IB_QP_ALT_PATH |
  349. IB_QP_ACCESS_FLAGS |
  350. IB_QP_MIN_RNR_TIMER |
  351. IB_QP_PATH_MIG_STATE),
  352. [IB_QPT_SMI] = (IB_QP_CUR_STATE |
  353. IB_QP_QKEY),
  354. [IB_QPT_GSI] = (IB_QP_CUR_STATE |
  355. IB_QP_QKEY),
  356. }
  357. }
  358. },
  359. [IB_QPS_RTS] = {
  360. [IB_QPS_RESET] = { .valid = 1 },
  361. [IB_QPS_ERR] = { .valid = 1 },
  362. [IB_QPS_RTS] = {
  363. .valid = 1,
  364. .opt_param = {
  365. [IB_QPT_UD] = (IB_QP_CUR_STATE |
  366. IB_QP_QKEY),
  367. [IB_QPT_UC] = (IB_QP_CUR_STATE |
  368. IB_QP_ACCESS_FLAGS |
  369. IB_QP_ALT_PATH |
  370. IB_QP_PATH_MIG_STATE),
  371. [IB_QPT_RC] = (IB_QP_CUR_STATE |
  372. IB_QP_ACCESS_FLAGS |
  373. IB_QP_ALT_PATH |
  374. IB_QP_PATH_MIG_STATE |
  375. IB_QP_MIN_RNR_TIMER),
  376. [IB_QPT_SMI] = (IB_QP_CUR_STATE |
  377. IB_QP_QKEY),
  378. [IB_QPT_GSI] = (IB_QP_CUR_STATE |
  379. IB_QP_QKEY),
  380. }
  381. },
  382. [IB_QPS_SQD] = {
  383. .valid = 1,
  384. .opt_param = {
  385. [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  386. [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  387. [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  388. [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  389. [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
  390. }
  391. },
  392. },
  393. [IB_QPS_SQD] = {
  394. [IB_QPS_RESET] = { .valid = 1 },
  395. [IB_QPS_ERR] = { .valid = 1 },
  396. [IB_QPS_RTS] = {
  397. .valid = 1,
  398. .opt_param = {
  399. [IB_QPT_UD] = (IB_QP_CUR_STATE |
  400. IB_QP_QKEY),
  401. [IB_QPT_UC] = (IB_QP_CUR_STATE |
  402. IB_QP_ALT_PATH |
  403. IB_QP_ACCESS_FLAGS |
  404. IB_QP_PATH_MIG_STATE),
  405. [IB_QPT_RC] = (IB_QP_CUR_STATE |
  406. IB_QP_ALT_PATH |
  407. IB_QP_ACCESS_FLAGS |
  408. IB_QP_MIN_RNR_TIMER |
  409. IB_QP_PATH_MIG_STATE),
  410. [IB_QPT_SMI] = (IB_QP_CUR_STATE |
  411. IB_QP_QKEY),
  412. [IB_QPT_GSI] = (IB_QP_CUR_STATE |
  413. IB_QP_QKEY),
  414. }
  415. },
  416. [IB_QPS_SQD] = {
  417. .valid = 1,
  418. .opt_param = {
  419. [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
  420. IB_QP_QKEY),
  421. [IB_QPT_UC] = (IB_QP_AV |
  422. IB_QP_ALT_PATH |
  423. IB_QP_ACCESS_FLAGS |
  424. IB_QP_PKEY_INDEX |
  425. IB_QP_PATH_MIG_STATE),
  426. [IB_QPT_RC] = (IB_QP_PORT |
  427. IB_QP_AV |
  428. IB_QP_TIMEOUT |
  429. IB_QP_RETRY_CNT |
  430. IB_QP_RNR_RETRY |
  431. IB_QP_MAX_QP_RD_ATOMIC |
  432. IB_QP_MAX_DEST_RD_ATOMIC |
  433. IB_QP_ALT_PATH |
  434. IB_QP_ACCESS_FLAGS |
  435. IB_QP_PKEY_INDEX |
  436. IB_QP_MIN_RNR_TIMER |
  437. IB_QP_PATH_MIG_STATE),
  438. [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
  439. IB_QP_QKEY),
  440. [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
  441. IB_QP_QKEY),
  442. }
  443. }
  444. },
  445. [IB_QPS_SQE] = {
  446. [IB_QPS_RESET] = { .valid = 1 },
  447. [IB_QPS_ERR] = { .valid = 1 },
  448. [IB_QPS_RTS] = {
  449. .valid = 1,
  450. .opt_param = {
  451. [IB_QPT_UD] = (IB_QP_CUR_STATE |
  452. IB_QP_QKEY),
  453. [IB_QPT_UC] = (IB_QP_CUR_STATE |
  454. IB_QP_ACCESS_FLAGS),
  455. [IB_QPT_SMI] = (IB_QP_CUR_STATE |
  456. IB_QP_QKEY),
  457. [IB_QPT_GSI] = (IB_QP_CUR_STATE |
  458. IB_QP_QKEY),
  459. }
  460. }
  461. },
  462. [IB_QPS_ERR] = {
  463. [IB_QPS_RESET] = { .valid = 1 },
  464. [IB_QPS_ERR] = { .valid = 1 }
  465. }
  466. };
  467. int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
  468. enum ib_qp_type type, enum ib_qp_attr_mask mask)
  469. {
  470. enum ib_qp_attr_mask req_param, opt_param;
  471. if (cur_state < 0 || cur_state > IB_QPS_ERR ||
  472. next_state < 0 || next_state > IB_QPS_ERR)
  473. return 0;
  474. if (mask & IB_QP_CUR_STATE &&
  475. cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
  476. cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
  477. return 0;
  478. if (!qp_state_table[cur_state][next_state].valid)
  479. return 0;
  480. req_param = qp_state_table[cur_state][next_state].req_param[type];
  481. opt_param = qp_state_table[cur_state][next_state].opt_param[type];
  482. if ((mask & req_param) != req_param)
  483. return 0;
  484. if (mask & ~(req_param | opt_param | IB_QP_STATE))
  485. return 0;
  486. return 1;
  487. }
  488. EXPORT_SYMBOL(ib_modify_qp_is_ok);
  489. int ib_modify_qp(struct ib_qp *qp,
  490. struct ib_qp_attr *qp_attr,
  491. int qp_attr_mask)
  492. {
  493. return qp->device->modify_qp(qp, qp_attr, qp_attr_mask);
  494. }
  495. EXPORT_SYMBOL(ib_modify_qp);
  496. int ib_query_qp(struct ib_qp *qp,
  497. struct ib_qp_attr *qp_attr,
  498. int qp_attr_mask,
  499. struct ib_qp_init_attr *qp_init_attr)
  500. {
  501. return qp->device->query_qp ?
  502. qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
  503. -ENOSYS;
  504. }
  505. EXPORT_SYMBOL(ib_query_qp);
  506. int ib_destroy_qp(struct ib_qp *qp)
  507. {
  508. struct ib_pd *pd;
  509. struct ib_cq *scq, *rcq;
  510. struct ib_srq *srq;
  511. int ret;
  512. pd = qp->pd;
  513. scq = qp->send_cq;
  514. rcq = qp->recv_cq;
  515. srq = qp->srq;
  516. ret = qp->device->destroy_qp(qp);
  517. if (!ret) {
  518. atomic_dec(&pd->usecnt);
  519. atomic_dec(&scq->usecnt);
  520. atomic_dec(&rcq->usecnt);
  521. if (srq)
  522. atomic_dec(&srq->usecnt);
  523. }
  524. return ret;
  525. }
  526. EXPORT_SYMBOL(ib_destroy_qp);
  527. /* Completion queues */
  528. struct ib_cq *ib_create_cq(struct ib_device *device,
  529. ib_comp_handler comp_handler,
  530. void (*event_handler)(struct ib_event *, void *),
  531. void *cq_context, int cqe)
  532. {
  533. struct ib_cq *cq;
  534. cq = device->create_cq(device, cqe, NULL, NULL);
  535. if (!IS_ERR(cq)) {
  536. cq->device = device;
  537. cq->uobject = NULL;
  538. cq->comp_handler = comp_handler;
  539. cq->event_handler = event_handler;
  540. cq->cq_context = cq_context;
  541. atomic_set(&cq->usecnt, 0);
  542. }
  543. return cq;
  544. }
  545. EXPORT_SYMBOL(ib_create_cq);
  546. int ib_destroy_cq(struct ib_cq *cq)
  547. {
  548. if (atomic_read(&cq->usecnt))
  549. return -EBUSY;
  550. return cq->device->destroy_cq(cq);
  551. }
  552. EXPORT_SYMBOL(ib_destroy_cq);
  553. int ib_resize_cq(struct ib_cq *cq, int cqe)
  554. {
  555. return cq->device->resize_cq ?
  556. cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
  557. }
  558. EXPORT_SYMBOL(ib_resize_cq);
  559. /* Memory regions */
  560. struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
  561. {
  562. struct ib_mr *mr;
  563. mr = pd->device->get_dma_mr(pd, mr_access_flags);
  564. if (!IS_ERR(mr)) {
  565. mr->device = pd->device;
  566. mr->pd = pd;
  567. mr->uobject = NULL;
  568. atomic_inc(&pd->usecnt);
  569. atomic_set(&mr->usecnt, 0);
  570. }
  571. return mr;
  572. }
  573. EXPORT_SYMBOL(ib_get_dma_mr);
  574. struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
  575. struct ib_phys_buf *phys_buf_array,
  576. int num_phys_buf,
  577. int mr_access_flags,
  578. u64 *iova_start)
  579. {
  580. struct ib_mr *mr;
  581. mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
  582. mr_access_flags, iova_start);
  583. if (!IS_ERR(mr)) {
  584. mr->device = pd->device;
  585. mr->pd = pd;
  586. mr->uobject = NULL;
  587. atomic_inc(&pd->usecnt);
  588. atomic_set(&mr->usecnt, 0);
  589. }
  590. return mr;
  591. }
  592. EXPORT_SYMBOL(ib_reg_phys_mr);
  593. int ib_rereg_phys_mr(struct ib_mr *mr,
  594. int mr_rereg_mask,
  595. struct ib_pd *pd,
  596. struct ib_phys_buf *phys_buf_array,
  597. int num_phys_buf,
  598. int mr_access_flags,
  599. u64 *iova_start)
  600. {
  601. struct ib_pd *old_pd;
  602. int ret;
  603. if (!mr->device->rereg_phys_mr)
  604. return -ENOSYS;
  605. if (atomic_read(&mr->usecnt))
  606. return -EBUSY;
  607. old_pd = mr->pd;
  608. ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
  609. phys_buf_array, num_phys_buf,
  610. mr_access_flags, iova_start);
  611. if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
  612. atomic_dec(&old_pd->usecnt);
  613. atomic_inc(&pd->usecnt);
  614. }
  615. return ret;
  616. }
  617. EXPORT_SYMBOL(ib_rereg_phys_mr);
  618. int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
  619. {
  620. return mr->device->query_mr ?
  621. mr->device->query_mr(mr, mr_attr) : -ENOSYS;
  622. }
  623. EXPORT_SYMBOL(ib_query_mr);
  624. int ib_dereg_mr(struct ib_mr *mr)
  625. {
  626. struct ib_pd *pd;
  627. int ret;
  628. if (atomic_read(&mr->usecnt))
  629. return -EBUSY;
  630. pd = mr->pd;
  631. ret = mr->device->dereg_mr(mr);
  632. if (!ret)
  633. atomic_dec(&pd->usecnt);
  634. return ret;
  635. }
  636. EXPORT_SYMBOL(ib_dereg_mr);
  637. /* Memory windows */
  638. struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
  639. {
  640. struct ib_mw *mw;
  641. if (!pd->device->alloc_mw)
  642. return ERR_PTR(-ENOSYS);
  643. mw = pd->device->alloc_mw(pd);
  644. if (!IS_ERR(mw)) {
  645. mw->device = pd->device;
  646. mw->pd = pd;
  647. mw->uobject = NULL;
  648. atomic_inc(&pd->usecnt);
  649. }
  650. return mw;
  651. }
  652. EXPORT_SYMBOL(ib_alloc_mw);
  653. int ib_dealloc_mw(struct ib_mw *mw)
  654. {
  655. struct ib_pd *pd;
  656. int ret;
  657. pd = mw->pd;
  658. ret = mw->device->dealloc_mw(mw);
  659. if (!ret)
  660. atomic_dec(&pd->usecnt);
  661. return ret;
  662. }
  663. EXPORT_SYMBOL(ib_dealloc_mw);
  664. /* "Fast" memory regions */
  665. struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
  666. int mr_access_flags,
  667. struct ib_fmr_attr *fmr_attr)
  668. {
  669. struct ib_fmr *fmr;
  670. if (!pd->device->alloc_fmr)
  671. return ERR_PTR(-ENOSYS);
  672. fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
  673. if (!IS_ERR(fmr)) {
  674. fmr->device = pd->device;
  675. fmr->pd = pd;
  676. atomic_inc(&pd->usecnt);
  677. }
  678. return fmr;
  679. }
  680. EXPORT_SYMBOL(ib_alloc_fmr);
  681. int ib_unmap_fmr(struct list_head *fmr_list)
  682. {
  683. struct ib_fmr *fmr;
  684. if (list_empty(fmr_list))
  685. return 0;
  686. fmr = list_entry(fmr_list->next, struct ib_fmr, list);
  687. return fmr->device->unmap_fmr(fmr_list);
  688. }
  689. EXPORT_SYMBOL(ib_unmap_fmr);
  690. int ib_dealloc_fmr(struct ib_fmr *fmr)
  691. {
  692. struct ib_pd *pd;
  693. int ret;
  694. pd = fmr->pd;
  695. ret = fmr->device->dealloc_fmr(fmr);
  696. if (!ret)
  697. atomic_dec(&pd->usecnt);
  698. return ret;
  699. }
  700. EXPORT_SYMBOL(ib_dealloc_fmr);
  701. /* Multicast groups */
  702. int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
  703. {
  704. if (!qp->device->attach_mcast)
  705. return -ENOSYS;
  706. if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
  707. return -EINVAL;
  708. return qp->device->attach_mcast(qp, gid, lid);
  709. }
  710. EXPORT_SYMBOL(ib_attach_mcast);
  711. int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
  712. {
  713. if (!qp->device->detach_mcast)
  714. return -ENOSYS;
  715. if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
  716. return -EINVAL;
  717. return qp->device->detach_mcast(qp, gid, lid);
  718. }
  719. EXPORT_SYMBOL(ib_detach_mcast);