verbs.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303
  1. /*
  2. * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
  4. * Copyright (c) 2004 Intel Corporation. All rights reserved.
  5. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  6. * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
  7. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  8. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
  9. *
  10. * This software is available to you under a choice of one of two
  11. * licenses. You may choose to be licensed under the terms of the GNU
  12. * General Public License (GPL) Version 2, available from the file
  13. * COPYING in the main directory of this source tree, or the
  14. * OpenIB.org BSD license below:
  15. *
  16. * Redistribution and use in source and binary forms, with or
  17. * without modification, are permitted provided that the following
  18. * conditions are met:
  19. *
  20. * - Redistributions of source code must retain the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer.
  23. *
  24. * - Redistributions in binary form must reproduce the above
  25. * copyright notice, this list of conditions and the following
  26. * disclaimer in the documentation and/or other materials
  27. * provided with the distribution.
  28. *
  29. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36. * SOFTWARE.
  37. */
  38. #include <linux/errno.h>
  39. #include <linux/err.h>
  40. #include <linux/export.h>
  41. #include <linux/string.h>
  42. #include <linux/slab.h>
  43. #include <rdma/ib_verbs.h>
  44. #include <rdma/ib_cache.h>
  45. int ib_rate_to_mult(enum ib_rate rate)
  46. {
  47. switch (rate) {
  48. case IB_RATE_2_5_GBPS: return 1;
  49. case IB_RATE_5_GBPS: return 2;
  50. case IB_RATE_10_GBPS: return 4;
  51. case IB_RATE_20_GBPS: return 8;
  52. case IB_RATE_30_GBPS: return 12;
  53. case IB_RATE_40_GBPS: return 16;
  54. case IB_RATE_60_GBPS: return 24;
  55. case IB_RATE_80_GBPS: return 32;
  56. case IB_RATE_120_GBPS: return 48;
  57. default: return -1;
  58. }
  59. }
  60. EXPORT_SYMBOL(ib_rate_to_mult);
  61. enum ib_rate mult_to_ib_rate(int mult)
  62. {
  63. switch (mult) {
  64. case 1: return IB_RATE_2_5_GBPS;
  65. case 2: return IB_RATE_5_GBPS;
  66. case 4: return IB_RATE_10_GBPS;
  67. case 8: return IB_RATE_20_GBPS;
  68. case 12: return IB_RATE_30_GBPS;
  69. case 16: return IB_RATE_40_GBPS;
  70. case 24: return IB_RATE_60_GBPS;
  71. case 32: return IB_RATE_80_GBPS;
  72. case 48: return IB_RATE_120_GBPS;
  73. default: return IB_RATE_PORT_CURRENT;
  74. }
  75. }
  76. EXPORT_SYMBOL(mult_to_ib_rate);
  77. int ib_rate_to_mbps(enum ib_rate rate)
  78. {
  79. switch (rate) {
  80. case IB_RATE_2_5_GBPS: return 2500;
  81. case IB_RATE_5_GBPS: return 5000;
  82. case IB_RATE_10_GBPS: return 10000;
  83. case IB_RATE_20_GBPS: return 20000;
  84. case IB_RATE_30_GBPS: return 30000;
  85. case IB_RATE_40_GBPS: return 40000;
  86. case IB_RATE_60_GBPS: return 60000;
  87. case IB_RATE_80_GBPS: return 80000;
  88. case IB_RATE_120_GBPS: return 120000;
  89. case IB_RATE_14_GBPS: return 14062;
  90. case IB_RATE_56_GBPS: return 56250;
  91. case IB_RATE_112_GBPS: return 112500;
  92. case IB_RATE_168_GBPS: return 168750;
  93. case IB_RATE_25_GBPS: return 25781;
  94. case IB_RATE_100_GBPS: return 103125;
  95. case IB_RATE_200_GBPS: return 206250;
  96. case IB_RATE_300_GBPS: return 309375;
  97. default: return -1;
  98. }
  99. }
  100. EXPORT_SYMBOL(ib_rate_to_mbps);
  101. enum rdma_transport_type
  102. rdma_node_get_transport(enum rdma_node_type node_type)
  103. {
  104. switch (node_type) {
  105. case RDMA_NODE_IB_CA:
  106. case RDMA_NODE_IB_SWITCH:
  107. case RDMA_NODE_IB_ROUTER:
  108. return RDMA_TRANSPORT_IB;
  109. case RDMA_NODE_RNIC:
  110. return RDMA_TRANSPORT_IWARP;
  111. case RDMA_NODE_USNIC:
  112. return RDMA_TRANSPORT_USNIC;
  113. default:
  114. BUG();
  115. return 0;
  116. }
  117. }
  118. EXPORT_SYMBOL(rdma_node_get_transport);
  119. enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
  120. {
  121. if (device->get_link_layer)
  122. return device->get_link_layer(device, port_num);
  123. switch (rdma_node_get_transport(device->node_type)) {
  124. case RDMA_TRANSPORT_IB:
  125. return IB_LINK_LAYER_INFINIBAND;
  126. case RDMA_TRANSPORT_IWARP:
  127. case RDMA_TRANSPORT_USNIC:
  128. return IB_LINK_LAYER_ETHERNET;
  129. default:
  130. return IB_LINK_LAYER_UNSPECIFIED;
  131. }
  132. }
  133. EXPORT_SYMBOL(rdma_port_get_link_layer);
  134. /* Protection domains */
  135. struct ib_pd *ib_alloc_pd(struct ib_device *device)
  136. {
  137. struct ib_pd *pd;
  138. pd = device->alloc_pd(device, NULL, NULL);
  139. if (!IS_ERR(pd)) {
  140. pd->device = device;
  141. pd->uobject = NULL;
  142. atomic_set(&pd->usecnt, 0);
  143. }
  144. return pd;
  145. }
  146. EXPORT_SYMBOL(ib_alloc_pd);
  147. int ib_dealloc_pd(struct ib_pd *pd)
  148. {
  149. if (atomic_read(&pd->usecnt))
  150. return -EBUSY;
  151. return pd->device->dealloc_pd(pd);
  152. }
  153. EXPORT_SYMBOL(ib_dealloc_pd);
  154. /* Address handles */
  155. struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
  156. {
  157. struct ib_ah *ah;
  158. ah = pd->device->create_ah(pd, ah_attr);
  159. if (!IS_ERR(ah)) {
  160. ah->device = pd->device;
  161. ah->pd = pd;
  162. ah->uobject = NULL;
  163. atomic_inc(&pd->usecnt);
  164. }
  165. return ah;
  166. }
  167. EXPORT_SYMBOL(ib_create_ah);
  168. int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
  169. struct ib_grh *grh, struct ib_ah_attr *ah_attr)
  170. {
  171. u32 flow_class;
  172. u16 gid_index;
  173. int ret;
  174. memset(ah_attr, 0, sizeof *ah_attr);
  175. ah_attr->dlid = wc->slid;
  176. ah_attr->sl = wc->sl;
  177. ah_attr->src_path_bits = wc->dlid_path_bits;
  178. ah_attr->port_num = port_num;
  179. if (wc->wc_flags & IB_WC_GRH) {
  180. ah_attr->ah_flags = IB_AH_GRH;
  181. ah_attr->grh.dgid = grh->sgid;
  182. ret = ib_find_cached_gid(device, &grh->dgid, &port_num,
  183. &gid_index);
  184. if (ret)
  185. return ret;
  186. ah_attr->grh.sgid_index = (u8) gid_index;
  187. flow_class = be32_to_cpu(grh->version_tclass_flow);
  188. ah_attr->grh.flow_label = flow_class & 0xFFFFF;
  189. ah_attr->grh.hop_limit = 0xFF;
  190. ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
  191. }
  192. return 0;
  193. }
  194. EXPORT_SYMBOL(ib_init_ah_from_wc);
  195. struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
  196. struct ib_grh *grh, u8 port_num)
  197. {
  198. struct ib_ah_attr ah_attr;
  199. int ret;
  200. ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
  201. if (ret)
  202. return ERR_PTR(ret);
  203. return ib_create_ah(pd, &ah_attr);
  204. }
  205. EXPORT_SYMBOL(ib_create_ah_from_wc);
  206. int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
  207. {
  208. return ah->device->modify_ah ?
  209. ah->device->modify_ah(ah, ah_attr) :
  210. -ENOSYS;
  211. }
  212. EXPORT_SYMBOL(ib_modify_ah);
  213. int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
  214. {
  215. return ah->device->query_ah ?
  216. ah->device->query_ah(ah, ah_attr) :
  217. -ENOSYS;
  218. }
  219. EXPORT_SYMBOL(ib_query_ah);
  220. int ib_destroy_ah(struct ib_ah *ah)
  221. {
  222. struct ib_pd *pd;
  223. int ret;
  224. pd = ah->pd;
  225. ret = ah->device->destroy_ah(ah);
  226. if (!ret)
  227. atomic_dec(&pd->usecnt);
  228. return ret;
  229. }
  230. EXPORT_SYMBOL(ib_destroy_ah);
  231. /* Shared receive queues */
  232. struct ib_srq *ib_create_srq(struct ib_pd *pd,
  233. struct ib_srq_init_attr *srq_init_attr)
  234. {
  235. struct ib_srq *srq;
  236. if (!pd->device->create_srq)
  237. return ERR_PTR(-ENOSYS);
  238. srq = pd->device->create_srq(pd, srq_init_attr, NULL);
  239. if (!IS_ERR(srq)) {
  240. srq->device = pd->device;
  241. srq->pd = pd;
  242. srq->uobject = NULL;
  243. srq->event_handler = srq_init_attr->event_handler;
  244. srq->srq_context = srq_init_attr->srq_context;
  245. srq->srq_type = srq_init_attr->srq_type;
  246. if (srq->srq_type == IB_SRQT_XRC) {
  247. srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
  248. srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq;
  249. atomic_inc(&srq->ext.xrc.xrcd->usecnt);
  250. atomic_inc(&srq->ext.xrc.cq->usecnt);
  251. }
  252. atomic_inc(&pd->usecnt);
  253. atomic_set(&srq->usecnt, 0);
  254. }
  255. return srq;
  256. }
  257. EXPORT_SYMBOL(ib_create_srq);
  258. int ib_modify_srq(struct ib_srq *srq,
  259. struct ib_srq_attr *srq_attr,
  260. enum ib_srq_attr_mask srq_attr_mask)
  261. {
  262. return srq->device->modify_srq ?
  263. srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
  264. -ENOSYS;
  265. }
  266. EXPORT_SYMBOL(ib_modify_srq);
  267. int ib_query_srq(struct ib_srq *srq,
  268. struct ib_srq_attr *srq_attr)
  269. {
  270. return srq->device->query_srq ?
  271. srq->device->query_srq(srq, srq_attr) : -ENOSYS;
  272. }
  273. EXPORT_SYMBOL(ib_query_srq);
  274. int ib_destroy_srq(struct ib_srq *srq)
  275. {
  276. struct ib_pd *pd;
  277. enum ib_srq_type srq_type;
  278. struct ib_xrcd *uninitialized_var(xrcd);
  279. struct ib_cq *uninitialized_var(cq);
  280. int ret;
  281. if (atomic_read(&srq->usecnt))
  282. return -EBUSY;
  283. pd = srq->pd;
  284. srq_type = srq->srq_type;
  285. if (srq_type == IB_SRQT_XRC) {
  286. xrcd = srq->ext.xrc.xrcd;
  287. cq = srq->ext.xrc.cq;
  288. }
  289. ret = srq->device->destroy_srq(srq);
  290. if (!ret) {
  291. atomic_dec(&pd->usecnt);
  292. if (srq_type == IB_SRQT_XRC) {
  293. atomic_dec(&xrcd->usecnt);
  294. atomic_dec(&cq->usecnt);
  295. }
  296. }
  297. return ret;
  298. }
  299. EXPORT_SYMBOL(ib_destroy_srq);
  300. /* Queue pairs */
  301. static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
  302. {
  303. struct ib_qp *qp = context;
  304. unsigned long flags;
  305. spin_lock_irqsave(&qp->device->event_handler_lock, flags);
  306. list_for_each_entry(event->element.qp, &qp->open_list, open_list)
  307. if (event->element.qp->event_handler)
  308. event->element.qp->event_handler(event, event->element.qp->qp_context);
  309. spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
  310. }
  311. static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
  312. {
  313. mutex_lock(&xrcd->tgt_qp_mutex);
  314. list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
  315. mutex_unlock(&xrcd->tgt_qp_mutex);
  316. }
  317. static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
  318. void (*event_handler)(struct ib_event *, void *),
  319. void *qp_context)
  320. {
  321. struct ib_qp *qp;
  322. unsigned long flags;
  323. qp = kzalloc(sizeof *qp, GFP_KERNEL);
  324. if (!qp)
  325. return ERR_PTR(-ENOMEM);
  326. qp->real_qp = real_qp;
  327. atomic_inc(&real_qp->usecnt);
  328. qp->device = real_qp->device;
  329. qp->event_handler = event_handler;
  330. qp->qp_context = qp_context;
  331. qp->qp_num = real_qp->qp_num;
  332. qp->qp_type = real_qp->qp_type;
  333. spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
  334. list_add(&qp->open_list, &real_qp->open_list);
  335. spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
  336. return qp;
  337. }
  338. struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
  339. struct ib_qp_open_attr *qp_open_attr)
  340. {
  341. struct ib_qp *qp, *real_qp;
  342. if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
  343. return ERR_PTR(-EINVAL);
  344. qp = ERR_PTR(-EINVAL);
  345. mutex_lock(&xrcd->tgt_qp_mutex);
  346. list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
  347. if (real_qp->qp_num == qp_open_attr->qp_num) {
  348. qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
  349. qp_open_attr->qp_context);
  350. break;
  351. }
  352. }
  353. mutex_unlock(&xrcd->tgt_qp_mutex);
  354. return qp;
  355. }
  356. EXPORT_SYMBOL(ib_open_qp);
  357. struct ib_qp *ib_create_qp(struct ib_pd *pd,
  358. struct ib_qp_init_attr *qp_init_attr)
  359. {
  360. struct ib_qp *qp, *real_qp;
  361. struct ib_device *device;
  362. device = pd ? pd->device : qp_init_attr->xrcd->device;
  363. qp = device->create_qp(pd, qp_init_attr, NULL);
  364. if (!IS_ERR(qp)) {
  365. qp->device = device;
  366. qp->real_qp = qp;
  367. qp->uobject = NULL;
  368. qp->qp_type = qp_init_attr->qp_type;
  369. atomic_set(&qp->usecnt, 0);
  370. if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
  371. qp->event_handler = __ib_shared_qp_event_handler;
  372. qp->qp_context = qp;
  373. qp->pd = NULL;
  374. qp->send_cq = qp->recv_cq = NULL;
  375. qp->srq = NULL;
  376. qp->xrcd = qp_init_attr->xrcd;
  377. atomic_inc(&qp_init_attr->xrcd->usecnt);
  378. INIT_LIST_HEAD(&qp->open_list);
  379. real_qp = qp;
  380. qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
  381. qp_init_attr->qp_context);
  382. if (!IS_ERR(qp))
  383. __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
  384. else
  385. real_qp->device->destroy_qp(real_qp);
  386. } else {
  387. qp->event_handler = qp_init_attr->event_handler;
  388. qp->qp_context = qp_init_attr->qp_context;
  389. if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
  390. qp->recv_cq = NULL;
  391. qp->srq = NULL;
  392. } else {
  393. qp->recv_cq = qp_init_attr->recv_cq;
  394. atomic_inc(&qp_init_attr->recv_cq->usecnt);
  395. qp->srq = qp_init_attr->srq;
  396. if (qp->srq)
  397. atomic_inc(&qp_init_attr->srq->usecnt);
  398. }
  399. qp->pd = pd;
  400. qp->send_cq = qp_init_attr->send_cq;
  401. qp->xrcd = NULL;
  402. atomic_inc(&pd->usecnt);
  403. atomic_inc(&qp_init_attr->send_cq->usecnt);
  404. }
  405. }
  406. return qp;
  407. }
  408. EXPORT_SYMBOL(ib_create_qp);
  409. static const struct {
  410. int valid;
  411. enum ib_qp_attr_mask req_param[IB_QPT_MAX];
  412. enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
  413. } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
  414. [IB_QPS_RESET] = {
  415. [IB_QPS_RESET] = { .valid = 1 },
  416. [IB_QPS_INIT] = {
  417. .valid = 1,
  418. .req_param = {
  419. [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
  420. IB_QP_PORT |
  421. IB_QP_QKEY),
  422. [IB_QPT_RAW_PACKET] = IB_QP_PORT,
  423. [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
  424. IB_QP_PORT |
  425. IB_QP_ACCESS_FLAGS),
  426. [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
  427. IB_QP_PORT |
  428. IB_QP_ACCESS_FLAGS),
  429. [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
  430. IB_QP_PORT |
  431. IB_QP_ACCESS_FLAGS),
  432. [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
  433. IB_QP_PORT |
  434. IB_QP_ACCESS_FLAGS),
  435. [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
  436. IB_QP_QKEY),
  437. [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
  438. IB_QP_QKEY),
  439. }
  440. },
  441. },
  442. [IB_QPS_INIT] = {
  443. [IB_QPS_RESET] = { .valid = 1 },
  444. [IB_QPS_ERR] = { .valid = 1 },
  445. [IB_QPS_INIT] = {
  446. .valid = 1,
  447. .opt_param = {
  448. [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
  449. IB_QP_PORT |
  450. IB_QP_QKEY),
  451. [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
  452. IB_QP_PORT |
  453. IB_QP_ACCESS_FLAGS),
  454. [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
  455. IB_QP_PORT |
  456. IB_QP_ACCESS_FLAGS),
  457. [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
  458. IB_QP_PORT |
  459. IB_QP_ACCESS_FLAGS),
  460. [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
  461. IB_QP_PORT |
  462. IB_QP_ACCESS_FLAGS),
  463. [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
  464. IB_QP_QKEY),
  465. [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
  466. IB_QP_QKEY),
  467. }
  468. },
  469. [IB_QPS_RTR] = {
  470. .valid = 1,
  471. .req_param = {
  472. [IB_QPT_UC] = (IB_QP_AV |
  473. IB_QP_PATH_MTU |
  474. IB_QP_DEST_QPN |
  475. IB_QP_RQ_PSN),
  476. [IB_QPT_RC] = (IB_QP_AV |
  477. IB_QP_PATH_MTU |
  478. IB_QP_DEST_QPN |
  479. IB_QP_RQ_PSN |
  480. IB_QP_MAX_DEST_RD_ATOMIC |
  481. IB_QP_MIN_RNR_TIMER),
  482. [IB_QPT_XRC_INI] = (IB_QP_AV |
  483. IB_QP_PATH_MTU |
  484. IB_QP_DEST_QPN |
  485. IB_QP_RQ_PSN),
  486. [IB_QPT_XRC_TGT] = (IB_QP_AV |
  487. IB_QP_PATH_MTU |
  488. IB_QP_DEST_QPN |
  489. IB_QP_RQ_PSN |
  490. IB_QP_MAX_DEST_RD_ATOMIC |
  491. IB_QP_MIN_RNR_TIMER),
  492. },
  493. .opt_param = {
  494. [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
  495. IB_QP_QKEY),
  496. [IB_QPT_UC] = (IB_QP_ALT_PATH |
  497. IB_QP_ACCESS_FLAGS |
  498. IB_QP_PKEY_INDEX),
  499. [IB_QPT_RC] = (IB_QP_ALT_PATH |
  500. IB_QP_ACCESS_FLAGS |
  501. IB_QP_PKEY_INDEX),
  502. [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
  503. IB_QP_ACCESS_FLAGS |
  504. IB_QP_PKEY_INDEX),
  505. [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
  506. IB_QP_ACCESS_FLAGS |
  507. IB_QP_PKEY_INDEX),
  508. [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
  509. IB_QP_QKEY),
  510. [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
  511. IB_QP_QKEY),
  512. }
  513. }
  514. },
  515. [IB_QPS_RTR] = {
  516. [IB_QPS_RESET] = { .valid = 1 },
  517. [IB_QPS_ERR] = { .valid = 1 },
  518. [IB_QPS_RTS] = {
  519. .valid = 1,
  520. .req_param = {
  521. [IB_QPT_UD] = IB_QP_SQ_PSN,
  522. [IB_QPT_UC] = IB_QP_SQ_PSN,
  523. [IB_QPT_RC] = (IB_QP_TIMEOUT |
  524. IB_QP_RETRY_CNT |
  525. IB_QP_RNR_RETRY |
  526. IB_QP_SQ_PSN |
  527. IB_QP_MAX_QP_RD_ATOMIC),
  528. [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
  529. IB_QP_RETRY_CNT |
  530. IB_QP_RNR_RETRY |
  531. IB_QP_SQ_PSN |
  532. IB_QP_MAX_QP_RD_ATOMIC),
  533. [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
  534. IB_QP_SQ_PSN),
  535. [IB_QPT_SMI] = IB_QP_SQ_PSN,
  536. [IB_QPT_GSI] = IB_QP_SQ_PSN,
  537. },
  538. .opt_param = {
  539. [IB_QPT_UD] = (IB_QP_CUR_STATE |
  540. IB_QP_QKEY),
  541. [IB_QPT_UC] = (IB_QP_CUR_STATE |
  542. IB_QP_ALT_PATH |
  543. IB_QP_ACCESS_FLAGS |
  544. IB_QP_PATH_MIG_STATE),
  545. [IB_QPT_RC] = (IB_QP_CUR_STATE |
  546. IB_QP_ALT_PATH |
  547. IB_QP_ACCESS_FLAGS |
  548. IB_QP_MIN_RNR_TIMER |
  549. IB_QP_PATH_MIG_STATE),
  550. [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
  551. IB_QP_ALT_PATH |
  552. IB_QP_ACCESS_FLAGS |
  553. IB_QP_PATH_MIG_STATE),
  554. [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
  555. IB_QP_ALT_PATH |
  556. IB_QP_ACCESS_FLAGS |
  557. IB_QP_MIN_RNR_TIMER |
  558. IB_QP_PATH_MIG_STATE),
  559. [IB_QPT_SMI] = (IB_QP_CUR_STATE |
  560. IB_QP_QKEY),
  561. [IB_QPT_GSI] = (IB_QP_CUR_STATE |
  562. IB_QP_QKEY),
  563. }
  564. }
  565. },
  566. [IB_QPS_RTS] = {
  567. [IB_QPS_RESET] = { .valid = 1 },
  568. [IB_QPS_ERR] = { .valid = 1 },
  569. [IB_QPS_RTS] = {
  570. .valid = 1,
  571. .opt_param = {
  572. [IB_QPT_UD] = (IB_QP_CUR_STATE |
  573. IB_QP_QKEY),
  574. [IB_QPT_UC] = (IB_QP_CUR_STATE |
  575. IB_QP_ACCESS_FLAGS |
  576. IB_QP_ALT_PATH |
  577. IB_QP_PATH_MIG_STATE),
  578. [IB_QPT_RC] = (IB_QP_CUR_STATE |
  579. IB_QP_ACCESS_FLAGS |
  580. IB_QP_ALT_PATH |
  581. IB_QP_PATH_MIG_STATE |
  582. IB_QP_MIN_RNR_TIMER),
  583. [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
  584. IB_QP_ACCESS_FLAGS |
  585. IB_QP_ALT_PATH |
  586. IB_QP_PATH_MIG_STATE),
  587. [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
  588. IB_QP_ACCESS_FLAGS |
  589. IB_QP_ALT_PATH |
  590. IB_QP_PATH_MIG_STATE |
  591. IB_QP_MIN_RNR_TIMER),
  592. [IB_QPT_SMI] = (IB_QP_CUR_STATE |
  593. IB_QP_QKEY),
  594. [IB_QPT_GSI] = (IB_QP_CUR_STATE |
  595. IB_QP_QKEY),
  596. }
  597. },
  598. [IB_QPS_SQD] = {
  599. .valid = 1,
  600. .opt_param = {
  601. [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  602. [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  603. [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  604. [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  605. [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
  606. [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
  607. [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
  608. }
  609. },
  610. },
  611. [IB_QPS_SQD] = {
  612. [IB_QPS_RESET] = { .valid = 1 },
  613. [IB_QPS_ERR] = { .valid = 1 },
  614. [IB_QPS_RTS] = {
  615. .valid = 1,
  616. .opt_param = {
  617. [IB_QPT_UD] = (IB_QP_CUR_STATE |
  618. IB_QP_QKEY),
  619. [IB_QPT_UC] = (IB_QP_CUR_STATE |
  620. IB_QP_ALT_PATH |
  621. IB_QP_ACCESS_FLAGS |
  622. IB_QP_PATH_MIG_STATE),
  623. [IB_QPT_RC] = (IB_QP_CUR_STATE |
  624. IB_QP_ALT_PATH |
  625. IB_QP_ACCESS_FLAGS |
  626. IB_QP_MIN_RNR_TIMER |
  627. IB_QP_PATH_MIG_STATE),
  628. [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
  629. IB_QP_ALT_PATH |
  630. IB_QP_ACCESS_FLAGS |
  631. IB_QP_PATH_MIG_STATE),
  632. [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
  633. IB_QP_ALT_PATH |
  634. IB_QP_ACCESS_FLAGS |
  635. IB_QP_MIN_RNR_TIMER |
  636. IB_QP_PATH_MIG_STATE),
  637. [IB_QPT_SMI] = (IB_QP_CUR_STATE |
  638. IB_QP_QKEY),
  639. [IB_QPT_GSI] = (IB_QP_CUR_STATE |
  640. IB_QP_QKEY),
  641. }
  642. },
  643. [IB_QPS_SQD] = {
  644. .valid = 1,
  645. .opt_param = {
  646. [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
  647. IB_QP_QKEY),
  648. [IB_QPT_UC] = (IB_QP_AV |
  649. IB_QP_ALT_PATH |
  650. IB_QP_ACCESS_FLAGS |
  651. IB_QP_PKEY_INDEX |
  652. IB_QP_PATH_MIG_STATE),
  653. [IB_QPT_RC] = (IB_QP_PORT |
  654. IB_QP_AV |
  655. IB_QP_TIMEOUT |
  656. IB_QP_RETRY_CNT |
  657. IB_QP_RNR_RETRY |
  658. IB_QP_MAX_QP_RD_ATOMIC |
  659. IB_QP_MAX_DEST_RD_ATOMIC |
  660. IB_QP_ALT_PATH |
  661. IB_QP_ACCESS_FLAGS |
  662. IB_QP_PKEY_INDEX |
  663. IB_QP_MIN_RNR_TIMER |
  664. IB_QP_PATH_MIG_STATE),
  665. [IB_QPT_XRC_INI] = (IB_QP_PORT |
  666. IB_QP_AV |
  667. IB_QP_TIMEOUT |
  668. IB_QP_RETRY_CNT |
  669. IB_QP_RNR_RETRY |
  670. IB_QP_MAX_QP_RD_ATOMIC |
  671. IB_QP_ALT_PATH |
  672. IB_QP_ACCESS_FLAGS |
  673. IB_QP_PKEY_INDEX |
  674. IB_QP_PATH_MIG_STATE),
  675. [IB_QPT_XRC_TGT] = (IB_QP_PORT |
  676. IB_QP_AV |
  677. IB_QP_TIMEOUT |
  678. IB_QP_MAX_DEST_RD_ATOMIC |
  679. IB_QP_ALT_PATH |
  680. IB_QP_ACCESS_FLAGS |
  681. IB_QP_PKEY_INDEX |
  682. IB_QP_MIN_RNR_TIMER |
  683. IB_QP_PATH_MIG_STATE),
  684. [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
  685. IB_QP_QKEY),
  686. [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
  687. IB_QP_QKEY),
  688. }
  689. }
  690. },
  691. [IB_QPS_SQE] = {
  692. [IB_QPS_RESET] = { .valid = 1 },
  693. [IB_QPS_ERR] = { .valid = 1 },
  694. [IB_QPS_RTS] = {
  695. .valid = 1,
  696. .opt_param = {
  697. [IB_QPT_UD] = (IB_QP_CUR_STATE |
  698. IB_QP_QKEY),
  699. [IB_QPT_UC] = (IB_QP_CUR_STATE |
  700. IB_QP_ACCESS_FLAGS),
  701. [IB_QPT_SMI] = (IB_QP_CUR_STATE |
  702. IB_QP_QKEY),
  703. [IB_QPT_GSI] = (IB_QP_CUR_STATE |
  704. IB_QP_QKEY),
  705. }
  706. }
  707. },
  708. [IB_QPS_ERR] = {
  709. [IB_QPS_RESET] = { .valid = 1 },
  710. [IB_QPS_ERR] = { .valid = 1 }
  711. }
  712. };
  713. int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
  714. enum ib_qp_type type, enum ib_qp_attr_mask mask)
  715. {
  716. enum ib_qp_attr_mask req_param, opt_param;
  717. if (cur_state < 0 || cur_state > IB_QPS_ERR ||
  718. next_state < 0 || next_state > IB_QPS_ERR)
  719. return 0;
  720. if (mask & IB_QP_CUR_STATE &&
  721. cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
  722. cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
  723. return 0;
  724. if (!qp_state_table[cur_state][next_state].valid)
  725. return 0;
  726. req_param = qp_state_table[cur_state][next_state].req_param[type];
  727. opt_param = qp_state_table[cur_state][next_state].opt_param[type];
  728. if ((mask & req_param) != req_param)
  729. return 0;
  730. if (mask & ~(req_param | opt_param | IB_QP_STATE))
  731. return 0;
  732. return 1;
  733. }
  734. EXPORT_SYMBOL(ib_modify_qp_is_ok);
  735. int ib_modify_qp(struct ib_qp *qp,
  736. struct ib_qp_attr *qp_attr,
  737. int qp_attr_mask)
  738. {
  739. return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
  740. }
  741. EXPORT_SYMBOL(ib_modify_qp);
  742. int ib_query_qp(struct ib_qp *qp,
  743. struct ib_qp_attr *qp_attr,
  744. int qp_attr_mask,
  745. struct ib_qp_init_attr *qp_init_attr)
  746. {
  747. return qp->device->query_qp ?
  748. qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
  749. -ENOSYS;
  750. }
  751. EXPORT_SYMBOL(ib_query_qp);
  752. int ib_close_qp(struct ib_qp *qp)
  753. {
  754. struct ib_qp *real_qp;
  755. unsigned long flags;
  756. real_qp = qp->real_qp;
  757. if (real_qp == qp)
  758. return -EINVAL;
  759. spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
  760. list_del(&qp->open_list);
  761. spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
  762. atomic_dec(&real_qp->usecnt);
  763. kfree(qp);
  764. return 0;
  765. }
  766. EXPORT_SYMBOL(ib_close_qp);
  767. static int __ib_destroy_shared_qp(struct ib_qp *qp)
  768. {
  769. struct ib_xrcd *xrcd;
  770. struct ib_qp *real_qp;
  771. int ret;
  772. real_qp = qp->real_qp;
  773. xrcd = real_qp->xrcd;
  774. mutex_lock(&xrcd->tgt_qp_mutex);
  775. ib_close_qp(qp);
  776. if (atomic_read(&real_qp->usecnt) == 0)
  777. list_del(&real_qp->xrcd_list);
  778. else
  779. real_qp = NULL;
  780. mutex_unlock(&xrcd->tgt_qp_mutex);
  781. if (real_qp) {
  782. ret = ib_destroy_qp(real_qp);
  783. if (!ret)
  784. atomic_dec(&xrcd->usecnt);
  785. else
  786. __ib_insert_xrcd_qp(xrcd, real_qp);
  787. }
  788. return 0;
  789. }
  790. int ib_destroy_qp(struct ib_qp *qp)
  791. {
  792. struct ib_pd *pd;
  793. struct ib_cq *scq, *rcq;
  794. struct ib_srq *srq;
  795. int ret;
  796. if (atomic_read(&qp->usecnt))
  797. return -EBUSY;
  798. if (qp->real_qp != qp)
  799. return __ib_destroy_shared_qp(qp);
  800. pd = qp->pd;
  801. scq = qp->send_cq;
  802. rcq = qp->recv_cq;
  803. srq = qp->srq;
  804. ret = qp->device->destroy_qp(qp);
  805. if (!ret) {
  806. if (pd)
  807. atomic_dec(&pd->usecnt);
  808. if (scq)
  809. atomic_dec(&scq->usecnt);
  810. if (rcq)
  811. atomic_dec(&rcq->usecnt);
  812. if (srq)
  813. atomic_dec(&srq->usecnt);
  814. }
  815. return ret;
  816. }
  817. EXPORT_SYMBOL(ib_destroy_qp);
  818. /* Completion queues */
  819. struct ib_cq *ib_create_cq(struct ib_device *device,
  820. ib_comp_handler comp_handler,
  821. void (*event_handler)(struct ib_event *, void *),
  822. void *cq_context, int cqe, int comp_vector)
  823. {
  824. struct ib_cq *cq;
  825. cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
  826. if (!IS_ERR(cq)) {
  827. cq->device = device;
  828. cq->uobject = NULL;
  829. cq->comp_handler = comp_handler;
  830. cq->event_handler = event_handler;
  831. cq->cq_context = cq_context;
  832. atomic_set(&cq->usecnt, 0);
  833. }
  834. return cq;
  835. }
  836. EXPORT_SYMBOL(ib_create_cq);
  837. int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
  838. {
  839. return cq->device->modify_cq ?
  840. cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
  841. }
  842. EXPORT_SYMBOL(ib_modify_cq);
  843. int ib_destroy_cq(struct ib_cq *cq)
  844. {
  845. if (atomic_read(&cq->usecnt))
  846. return -EBUSY;
  847. return cq->device->destroy_cq(cq);
  848. }
  849. EXPORT_SYMBOL(ib_destroy_cq);
  850. int ib_resize_cq(struct ib_cq *cq, int cqe)
  851. {
  852. return cq->device->resize_cq ?
  853. cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
  854. }
  855. EXPORT_SYMBOL(ib_resize_cq);
  856. /* Memory regions */
  857. struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
  858. {
  859. struct ib_mr *mr;
  860. int err;
  861. err = ib_check_mr_access(mr_access_flags);
  862. if (err)
  863. return ERR_PTR(err);
  864. mr = pd->device->get_dma_mr(pd, mr_access_flags);
  865. if (!IS_ERR(mr)) {
  866. mr->device = pd->device;
  867. mr->pd = pd;
  868. mr->uobject = NULL;
  869. atomic_inc(&pd->usecnt);
  870. atomic_set(&mr->usecnt, 0);
  871. }
  872. return mr;
  873. }
  874. EXPORT_SYMBOL(ib_get_dma_mr);
  875. struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
  876. struct ib_phys_buf *phys_buf_array,
  877. int num_phys_buf,
  878. int mr_access_flags,
  879. u64 *iova_start)
  880. {
  881. struct ib_mr *mr;
  882. int err;
  883. err = ib_check_mr_access(mr_access_flags);
  884. if (err)
  885. return ERR_PTR(err);
  886. if (!pd->device->reg_phys_mr)
  887. return ERR_PTR(-ENOSYS);
  888. mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
  889. mr_access_flags, iova_start);
  890. if (!IS_ERR(mr)) {
  891. mr->device = pd->device;
  892. mr->pd = pd;
  893. mr->uobject = NULL;
  894. atomic_inc(&pd->usecnt);
  895. atomic_set(&mr->usecnt, 0);
  896. }
  897. return mr;
  898. }
  899. EXPORT_SYMBOL(ib_reg_phys_mr);
  900. int ib_rereg_phys_mr(struct ib_mr *mr,
  901. int mr_rereg_mask,
  902. struct ib_pd *pd,
  903. struct ib_phys_buf *phys_buf_array,
  904. int num_phys_buf,
  905. int mr_access_flags,
  906. u64 *iova_start)
  907. {
  908. struct ib_pd *old_pd;
  909. int ret;
  910. ret = ib_check_mr_access(mr_access_flags);
  911. if (ret)
  912. return ret;
  913. if (!mr->device->rereg_phys_mr)
  914. return -ENOSYS;
  915. if (atomic_read(&mr->usecnt))
  916. return -EBUSY;
  917. old_pd = mr->pd;
  918. ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
  919. phys_buf_array, num_phys_buf,
  920. mr_access_flags, iova_start);
  921. if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
  922. atomic_dec(&old_pd->usecnt);
  923. atomic_inc(&pd->usecnt);
  924. }
  925. return ret;
  926. }
  927. EXPORT_SYMBOL(ib_rereg_phys_mr);
  928. int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
  929. {
  930. return mr->device->query_mr ?
  931. mr->device->query_mr(mr, mr_attr) : -ENOSYS;
  932. }
  933. EXPORT_SYMBOL(ib_query_mr);
  934. int ib_dereg_mr(struct ib_mr *mr)
  935. {
  936. struct ib_pd *pd;
  937. int ret;
  938. if (atomic_read(&mr->usecnt))
  939. return -EBUSY;
  940. pd = mr->pd;
  941. ret = mr->device->dereg_mr(mr);
  942. if (!ret)
  943. atomic_dec(&pd->usecnt);
  944. return ret;
  945. }
  946. EXPORT_SYMBOL(ib_dereg_mr);
  947. struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
  948. {
  949. struct ib_mr *mr;
  950. if (!pd->device->alloc_fast_reg_mr)
  951. return ERR_PTR(-ENOSYS);
  952. mr = pd->device->alloc_fast_reg_mr(pd, max_page_list_len);
  953. if (!IS_ERR(mr)) {
  954. mr->device = pd->device;
  955. mr->pd = pd;
  956. mr->uobject = NULL;
  957. atomic_inc(&pd->usecnt);
  958. atomic_set(&mr->usecnt, 0);
  959. }
  960. return mr;
  961. }
  962. EXPORT_SYMBOL(ib_alloc_fast_reg_mr);
  963. struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device,
  964. int max_page_list_len)
  965. {
  966. struct ib_fast_reg_page_list *page_list;
  967. if (!device->alloc_fast_reg_page_list)
  968. return ERR_PTR(-ENOSYS);
  969. page_list = device->alloc_fast_reg_page_list(device, max_page_list_len);
  970. if (!IS_ERR(page_list)) {
  971. page_list->device = device;
  972. page_list->max_page_list_len = max_page_list_len;
  973. }
  974. return page_list;
  975. }
  976. EXPORT_SYMBOL(ib_alloc_fast_reg_page_list);
  977. void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
  978. {
  979. page_list->device->free_fast_reg_page_list(page_list);
  980. }
  981. EXPORT_SYMBOL(ib_free_fast_reg_page_list);
  982. /* Memory windows */
  983. struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
  984. {
  985. struct ib_mw *mw;
  986. if (!pd->device->alloc_mw)
  987. return ERR_PTR(-ENOSYS);
  988. mw = pd->device->alloc_mw(pd, type);
  989. if (!IS_ERR(mw)) {
  990. mw->device = pd->device;
  991. mw->pd = pd;
  992. mw->uobject = NULL;
  993. mw->type = type;
  994. atomic_inc(&pd->usecnt);
  995. }
  996. return mw;
  997. }
  998. EXPORT_SYMBOL(ib_alloc_mw);
  999. int ib_dealloc_mw(struct ib_mw *mw)
  1000. {
  1001. struct ib_pd *pd;
  1002. int ret;
  1003. pd = mw->pd;
  1004. ret = mw->device->dealloc_mw(mw);
  1005. if (!ret)
  1006. atomic_dec(&pd->usecnt);
  1007. return ret;
  1008. }
  1009. EXPORT_SYMBOL(ib_dealloc_mw);
  1010. /* "Fast" memory regions */
  1011. struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
  1012. int mr_access_flags,
  1013. struct ib_fmr_attr *fmr_attr)
  1014. {
  1015. struct ib_fmr *fmr;
  1016. if (!pd->device->alloc_fmr)
  1017. return ERR_PTR(-ENOSYS);
  1018. fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
  1019. if (!IS_ERR(fmr)) {
  1020. fmr->device = pd->device;
  1021. fmr->pd = pd;
  1022. atomic_inc(&pd->usecnt);
  1023. }
  1024. return fmr;
  1025. }
  1026. EXPORT_SYMBOL(ib_alloc_fmr);
  1027. int ib_unmap_fmr(struct list_head *fmr_list)
  1028. {
  1029. struct ib_fmr *fmr;
  1030. if (list_empty(fmr_list))
  1031. return 0;
  1032. fmr = list_entry(fmr_list->next, struct ib_fmr, list);
  1033. return fmr->device->unmap_fmr(fmr_list);
  1034. }
  1035. EXPORT_SYMBOL(ib_unmap_fmr);
  1036. int ib_dealloc_fmr(struct ib_fmr *fmr)
  1037. {
  1038. struct ib_pd *pd;
  1039. int ret;
  1040. pd = fmr->pd;
  1041. ret = fmr->device->dealloc_fmr(fmr);
  1042. if (!ret)
  1043. atomic_dec(&pd->usecnt);
  1044. return ret;
  1045. }
  1046. EXPORT_SYMBOL(ib_dealloc_fmr);
  1047. /* Multicast groups */
  1048. int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
  1049. {
  1050. int ret;
  1051. if (!qp->device->attach_mcast)
  1052. return -ENOSYS;
  1053. if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
  1054. return -EINVAL;
  1055. ret = qp->device->attach_mcast(qp, gid, lid);
  1056. if (!ret)
  1057. atomic_inc(&qp->usecnt);
  1058. return ret;
  1059. }
  1060. EXPORT_SYMBOL(ib_attach_mcast);
  1061. int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
  1062. {
  1063. int ret;
  1064. if (!qp->device->detach_mcast)
  1065. return -ENOSYS;
  1066. if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
  1067. return -EINVAL;
  1068. ret = qp->device->detach_mcast(qp, gid, lid);
  1069. if (!ret)
  1070. atomic_dec(&qp->usecnt);
  1071. return ret;
  1072. }
  1073. EXPORT_SYMBOL(ib_detach_mcast);
  1074. struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
  1075. {
  1076. struct ib_xrcd *xrcd;
  1077. if (!device->alloc_xrcd)
  1078. return ERR_PTR(-ENOSYS);
  1079. xrcd = device->alloc_xrcd(device, NULL, NULL);
  1080. if (!IS_ERR(xrcd)) {
  1081. xrcd->device = device;
  1082. xrcd->inode = NULL;
  1083. atomic_set(&xrcd->usecnt, 0);
  1084. mutex_init(&xrcd->tgt_qp_mutex);
  1085. INIT_LIST_HEAD(&xrcd->tgt_qp_list);
  1086. }
  1087. return xrcd;
  1088. }
  1089. EXPORT_SYMBOL(ib_alloc_xrcd);
  1090. int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
  1091. {
  1092. struct ib_qp *qp;
  1093. int ret;
  1094. if (atomic_read(&xrcd->usecnt))
  1095. return -EBUSY;
  1096. while (!list_empty(&xrcd->tgt_qp_list)) {
  1097. qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
  1098. ret = ib_destroy_qp(qp);
  1099. if (ret)
  1100. return ret;
  1101. }
  1102. return xrcd->device->dealloc_xrcd(xrcd);
  1103. }
  1104. EXPORT_SYMBOL(ib_dealloc_xrcd);
  1105. struct ib_flow *ib_create_flow(struct ib_qp *qp,
  1106. struct ib_flow_attr *flow_attr,
  1107. int domain)
  1108. {
  1109. struct ib_flow *flow_id;
  1110. if (!qp->device->create_flow)
  1111. return ERR_PTR(-ENOSYS);
  1112. flow_id = qp->device->create_flow(qp, flow_attr, domain);
  1113. if (!IS_ERR(flow_id))
  1114. atomic_inc(&qp->usecnt);
  1115. return flow_id;
  1116. }
  1117. EXPORT_SYMBOL(ib_create_flow);
  1118. int ib_destroy_flow(struct ib_flow *flow_id)
  1119. {
  1120. int err;
  1121. struct ib_qp *qp = flow_id->qp;
  1122. err = qp->device->destroy_flow(flow_id);
  1123. if (!err)
  1124. atomic_dec(&qp->usecnt);
  1125. return err;
  1126. }
  1127. EXPORT_SYMBOL(ib_destroy_flow);