main.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507
  1. /*
  2. * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <asm-generic/kmap_types.h>
  33. #include <linux/module.h>
  34. #include <linux/init.h>
  35. #include <linux/errno.h>
  36. #include <linux/pci.h>
  37. #include <linux/dma-mapping.h>
  38. #include <linux/slab.h>
  39. #include <linux/io-mapping.h>
  40. #include <linux/sched.h>
  41. #include <rdma/ib_user_verbs.h>
  42. #include <rdma/ib_smi.h>
  43. #include <rdma/ib_umem.h>
  44. #include "user.h"
  45. #include "mlx5_ib.h"
  46. #define DRIVER_NAME "mlx5_ib"
  47. #define DRIVER_VERSION "1.0"
  48. #define DRIVER_RELDATE "June 2013"
  49. MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
  50. MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
  51. MODULE_LICENSE("Dual BSD/GPL");
  52. MODULE_VERSION(DRIVER_VERSION);
  53. static int prof_sel = 2;
  54. module_param_named(prof_sel, prof_sel, int, 0444);
  55. MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
  56. static char mlx5_version[] =
  57. DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
  58. DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
  59. static struct mlx5_profile profile[] = {
  60. [0] = {
  61. .mask = 0,
  62. },
  63. [1] = {
  64. .mask = MLX5_PROF_MASK_QP_SIZE,
  65. .log_max_qp = 12,
  66. },
  67. [2] = {
  68. .mask = MLX5_PROF_MASK_QP_SIZE |
  69. MLX5_PROF_MASK_MR_CACHE,
  70. .log_max_qp = 17,
  71. .mr_cache[0] = {
  72. .size = 500,
  73. .limit = 250
  74. },
  75. .mr_cache[1] = {
  76. .size = 500,
  77. .limit = 250
  78. },
  79. .mr_cache[2] = {
  80. .size = 500,
  81. .limit = 250
  82. },
  83. .mr_cache[3] = {
  84. .size = 500,
  85. .limit = 250
  86. },
  87. .mr_cache[4] = {
  88. .size = 500,
  89. .limit = 250
  90. },
  91. .mr_cache[5] = {
  92. .size = 500,
  93. .limit = 250
  94. },
  95. .mr_cache[6] = {
  96. .size = 500,
  97. .limit = 250
  98. },
  99. .mr_cache[7] = {
  100. .size = 500,
  101. .limit = 250
  102. },
  103. .mr_cache[8] = {
  104. .size = 500,
  105. .limit = 250
  106. },
  107. .mr_cache[9] = {
  108. .size = 500,
  109. .limit = 250
  110. },
  111. .mr_cache[10] = {
  112. .size = 500,
  113. .limit = 250
  114. },
  115. .mr_cache[11] = {
  116. .size = 500,
  117. .limit = 250
  118. },
  119. .mr_cache[12] = {
  120. .size = 64,
  121. .limit = 32
  122. },
  123. .mr_cache[13] = {
  124. .size = 32,
  125. .limit = 16
  126. },
  127. .mr_cache[14] = {
  128. .size = 16,
  129. .limit = 8
  130. },
  131. .mr_cache[15] = {
  132. .size = 8,
  133. .limit = 4
  134. },
  135. },
  136. };
  137. int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
  138. {
  139. struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
  140. struct mlx5_eq *eq, *n;
  141. int err = -ENOENT;
  142. spin_lock(&table->lock);
  143. list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
  144. if (eq->index == vector) {
  145. *eqn = eq->eqn;
  146. *irqn = eq->irqn;
  147. err = 0;
  148. break;
  149. }
  150. }
  151. spin_unlock(&table->lock);
  152. return err;
  153. }
  154. static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
  155. {
  156. struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
  157. struct mlx5_eq *eq, *n;
  158. int ncomp_vec;
  159. int nent;
  160. int err;
  161. int i;
  162. INIT_LIST_HEAD(&dev->eqs_list);
  163. ncomp_vec = table->num_comp_vectors;
  164. nent = MLX5_COMP_EQ_SIZE;
  165. for (i = 0; i < ncomp_vec; i++) {
  166. eq = kzalloc(sizeof(*eq), GFP_KERNEL);
  167. if (!eq) {
  168. err = -ENOMEM;
  169. goto clean;
  170. }
  171. snprintf(eq->name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
  172. err = mlx5_create_map_eq(&dev->mdev, eq,
  173. i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
  174. eq->name,
  175. &dev->mdev.priv.uuari.uars[0]);
  176. if (err) {
  177. kfree(eq);
  178. goto clean;
  179. }
  180. mlx5_ib_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
  181. eq->index = i;
  182. spin_lock(&table->lock);
  183. list_add_tail(&eq->list, &dev->eqs_list);
  184. spin_unlock(&table->lock);
  185. }
  186. dev->num_comp_vectors = ncomp_vec;
  187. return 0;
  188. clean:
  189. spin_lock(&table->lock);
  190. list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
  191. list_del(&eq->list);
  192. spin_unlock(&table->lock);
  193. if (mlx5_destroy_unmap_eq(&dev->mdev, eq))
  194. mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn);
  195. kfree(eq);
  196. spin_lock(&table->lock);
  197. }
  198. spin_unlock(&table->lock);
  199. return err;
  200. }
  201. static void free_comp_eqs(struct mlx5_ib_dev *dev)
  202. {
  203. struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
  204. struct mlx5_eq *eq, *n;
  205. spin_lock(&table->lock);
  206. list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
  207. list_del(&eq->list);
  208. spin_unlock(&table->lock);
  209. if (mlx5_destroy_unmap_eq(&dev->mdev, eq))
  210. mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn);
  211. kfree(eq);
  212. spin_lock(&table->lock);
  213. }
  214. spin_unlock(&table->lock);
  215. }
  216. static int mlx5_ib_query_device(struct ib_device *ibdev,
  217. struct ib_device_attr *props)
  218. {
  219. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  220. struct ib_smp *in_mad = NULL;
  221. struct ib_smp *out_mad = NULL;
  222. int err = -ENOMEM;
  223. int max_rq_sg;
  224. int max_sq_sg;
  225. u64 flags;
  226. in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
  227. out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
  228. if (!in_mad || !out_mad)
  229. goto out;
  230. init_query_mad(in_mad);
  231. in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
  232. err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad);
  233. if (err)
  234. goto out;
  235. memset(props, 0, sizeof(*props));
  236. props->fw_ver = ((u64)fw_rev_maj(&dev->mdev) << 32) |
  237. (fw_rev_min(&dev->mdev) << 16) |
  238. fw_rev_sub(&dev->mdev);
  239. props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
  240. IB_DEVICE_PORT_ACTIVE_EVENT |
  241. IB_DEVICE_SYS_IMAGE_GUID |
  242. IB_DEVICE_RC_RNR_NAK_GEN |
  243. IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
  244. flags = dev->mdev.caps.flags;
  245. if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
  246. props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
  247. if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR)
  248. props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
  249. if (flags & MLX5_DEV_CAP_FLAG_APM)
  250. props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
  251. props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
  252. if (flags & MLX5_DEV_CAP_FLAG_XRC)
  253. props->device_cap_flags |= IB_DEVICE_XRC;
  254. props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
  255. props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) &
  256. 0xffffff;
  257. props->vendor_part_id = be16_to_cpup((__be16 *)(out_mad->data + 30));
  258. props->hw_ver = be32_to_cpup((__be32 *)(out_mad->data + 32));
  259. memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
  260. props->max_mr_size = ~0ull;
  261. props->page_size_cap = dev->mdev.caps.min_page_sz;
  262. props->max_qp = 1 << dev->mdev.caps.log_max_qp;
  263. props->max_qp_wr = dev->mdev.caps.max_wqes;
  264. max_rq_sg = dev->mdev.caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg);
  265. max_sq_sg = (dev->mdev.caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) /
  266. sizeof(struct mlx5_wqe_data_seg);
  267. props->max_sge = min(max_rq_sg, max_sq_sg);
  268. props->max_cq = 1 << dev->mdev.caps.log_max_cq;
  269. props->max_cqe = dev->mdev.caps.max_cqes - 1;
  270. props->max_mr = 1 << dev->mdev.caps.log_max_mkey;
  271. props->max_pd = 1 << dev->mdev.caps.log_max_pd;
  272. props->max_qp_rd_atom = dev->mdev.caps.max_ra_req_qp;
  273. props->max_qp_init_rd_atom = dev->mdev.caps.max_ra_res_qp;
  274. props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
  275. props->max_srq = 1 << dev->mdev.caps.log_max_srq;
  276. props->max_srq_wr = dev->mdev.caps.max_srq_wqes - 1;
  277. props->max_srq_sge = max_rq_sg - 1;
  278. props->max_fast_reg_page_list_len = (unsigned int)-1;
  279. props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay;
  280. props->atomic_cap = dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_ATOMIC ?
  281. IB_ATOMIC_HCA : IB_ATOMIC_NONE;
  282. props->masked_atomic_cap = IB_ATOMIC_HCA;
  283. props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
  284. props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg;
  285. props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg;
  286. props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
  287. props->max_mcast_grp;
  288. props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
  289. out:
  290. kfree(in_mad);
  291. kfree(out_mad);
  292. return err;
  293. }
  294. int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
  295. struct ib_port_attr *props)
  296. {
  297. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  298. struct ib_smp *in_mad = NULL;
  299. struct ib_smp *out_mad = NULL;
  300. int ext_active_speed;
  301. int err = -ENOMEM;
  302. if (port < 1 || port > dev->mdev.caps.num_ports) {
  303. mlx5_ib_warn(dev, "invalid port number %d\n", port);
  304. return -EINVAL;
  305. }
  306. in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
  307. out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
  308. if (!in_mad || !out_mad)
  309. goto out;
  310. memset(props, 0, sizeof(*props));
  311. init_query_mad(in_mad);
  312. in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
  313. in_mad->attr_mod = cpu_to_be32(port);
  314. err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad);
  315. if (err) {
  316. mlx5_ib_warn(dev, "err %d\n", err);
  317. goto out;
  318. }
  319. props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16));
  320. props->lmc = out_mad->data[34] & 0x7;
  321. props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18));
  322. props->sm_sl = out_mad->data[36] & 0xf;
  323. props->state = out_mad->data[32] & 0xf;
  324. props->phys_state = out_mad->data[33] >> 4;
  325. props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20));
  326. props->gid_tbl_len = out_mad->data[50];
  327. props->max_msg_sz = 1 << to_mdev(ibdev)->mdev.caps.log_max_msg;
  328. props->pkey_tbl_len = to_mdev(ibdev)->mdev.caps.port[port - 1].pkey_table_len;
  329. props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46));
  330. props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48));
  331. props->active_width = out_mad->data[31] & 0xf;
  332. props->active_speed = out_mad->data[35] >> 4;
  333. props->max_mtu = out_mad->data[41] & 0xf;
  334. props->active_mtu = out_mad->data[36] >> 4;
  335. props->subnet_timeout = out_mad->data[51] & 0x1f;
  336. props->max_vl_num = out_mad->data[37] >> 4;
  337. props->init_type_reply = out_mad->data[41] >> 4;
  338. /* Check if extended speeds (EDR/FDR/...) are supported */
  339. if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
  340. ext_active_speed = out_mad->data[62] >> 4;
  341. switch (ext_active_speed) {
  342. case 1:
  343. props->active_speed = 16; /* FDR */
  344. break;
  345. case 2:
  346. props->active_speed = 32; /* EDR */
  347. break;
  348. }
  349. }
  350. /* If reported active speed is QDR, check if is FDR-10 */
  351. if (props->active_speed == 4) {
  352. if (dev->mdev.caps.ext_port_cap[port - 1] &
  353. MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
  354. init_query_mad(in_mad);
  355. in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
  356. in_mad->attr_mod = cpu_to_be32(port);
  357. err = mlx5_MAD_IFC(dev, 1, 1, port,
  358. NULL, NULL, in_mad, out_mad);
  359. if (err)
  360. goto out;
  361. /* Checking LinkSpeedActive for FDR-10 */
  362. if (out_mad->data[15] & 0x1)
  363. props->active_speed = 8;
  364. }
  365. }
  366. out:
  367. kfree(in_mad);
  368. kfree(out_mad);
  369. return err;
  370. }
  371. static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
  372. union ib_gid *gid)
  373. {
  374. struct ib_smp *in_mad = NULL;
  375. struct ib_smp *out_mad = NULL;
  376. int err = -ENOMEM;
  377. in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
  378. out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
  379. if (!in_mad || !out_mad)
  380. goto out;
  381. init_query_mad(in_mad);
  382. in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
  383. in_mad->attr_mod = cpu_to_be32(port);
  384. err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
  385. if (err)
  386. goto out;
  387. memcpy(gid->raw, out_mad->data + 8, 8);
  388. init_query_mad(in_mad);
  389. in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
  390. in_mad->attr_mod = cpu_to_be32(index / 8);
  391. err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
  392. if (err)
  393. goto out;
  394. memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
  395. out:
  396. kfree(in_mad);
  397. kfree(out_mad);
  398. return err;
  399. }
  400. static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
  401. u16 *pkey)
  402. {
  403. struct ib_smp *in_mad = NULL;
  404. struct ib_smp *out_mad = NULL;
  405. int err = -ENOMEM;
  406. in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
  407. out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
  408. if (!in_mad || !out_mad)
  409. goto out;
  410. init_query_mad(in_mad);
  411. in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
  412. in_mad->attr_mod = cpu_to_be32(index / 32);
  413. err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
  414. if (err)
  415. goto out;
  416. *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]);
  417. out:
  418. kfree(in_mad);
  419. kfree(out_mad);
  420. return err;
  421. }
  422. struct mlx5_reg_node_desc {
  423. u8 desc[64];
  424. };
  425. static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
  426. struct ib_device_modify *props)
  427. {
  428. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  429. struct mlx5_reg_node_desc in;
  430. struct mlx5_reg_node_desc out;
  431. int err;
  432. if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
  433. return -EOPNOTSUPP;
  434. if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
  435. return 0;
  436. /*
  437. * If possible, pass node desc to FW, so it can generate
  438. * a 144 trap. If cmd fails, just ignore.
  439. */
  440. memcpy(&in, props->node_desc, 64);
  441. err = mlx5_core_access_reg(&dev->mdev, &in, sizeof(in), &out,
  442. sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
  443. if (err)
  444. return err;
  445. memcpy(ibdev->node_desc, props->node_desc, 64);
  446. return err;
  447. }
  448. static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
  449. struct ib_port_modify *props)
  450. {
  451. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  452. struct ib_port_attr attr;
  453. u32 tmp;
  454. int err;
  455. mutex_lock(&dev->cap_mask_mutex);
  456. err = mlx5_ib_query_port(ibdev, port, &attr);
  457. if (err)
  458. goto out;
  459. tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
  460. ~props->clr_port_cap_mask;
  461. err = mlx5_set_port_caps(&dev->mdev, port, tmp);
  462. out:
  463. mutex_unlock(&dev->cap_mask_mutex);
  464. return err;
  465. }
  466. static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
  467. struct ib_udata *udata)
  468. {
  469. struct mlx5_ib_dev *dev = to_mdev(ibdev);
  470. struct mlx5_ib_alloc_ucontext_req req;
  471. struct mlx5_ib_alloc_ucontext_resp resp;
  472. struct mlx5_ib_ucontext *context;
  473. struct mlx5_uuar_info *uuari;
  474. struct mlx5_uar *uars;
  475. int num_uars;
  476. int uuarn;
  477. int err;
  478. int i;
  479. if (!dev->ib_active)
  480. return ERR_PTR(-EAGAIN);
  481. err = ib_copy_from_udata(&req, udata, sizeof(req));
  482. if (err)
  483. return ERR_PTR(err);
  484. if (req.total_num_uuars > MLX5_MAX_UUARS)
  485. return ERR_PTR(-ENOMEM);
  486. if (req.total_num_uuars == 0)
  487. return ERR_PTR(-EINVAL);
  488. req.total_num_uuars = ALIGN(req.total_num_uuars, MLX5_BF_REGS_PER_PAGE);
  489. if (req.num_low_latency_uuars > req.total_num_uuars - 1)
  490. return ERR_PTR(-EINVAL);
  491. num_uars = req.total_num_uuars / MLX5_BF_REGS_PER_PAGE;
  492. resp.qp_tab_size = 1 << dev->mdev.caps.log_max_qp;
  493. resp.bf_reg_size = dev->mdev.caps.bf_reg_size;
  494. resp.cache_line_size = L1_CACHE_BYTES;
  495. resp.max_sq_desc_sz = dev->mdev.caps.max_sq_desc_sz;
  496. resp.max_rq_desc_sz = dev->mdev.caps.max_rq_desc_sz;
  497. resp.max_send_wqebb = dev->mdev.caps.max_wqes;
  498. resp.max_recv_wr = dev->mdev.caps.max_wqes;
  499. resp.max_srq_recv_wr = dev->mdev.caps.max_srq_wqes;
  500. context = kzalloc(sizeof(*context), GFP_KERNEL);
  501. if (!context)
  502. return ERR_PTR(-ENOMEM);
  503. uuari = &context->uuari;
  504. mutex_init(&uuari->lock);
  505. uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL);
  506. if (!uars) {
  507. err = -ENOMEM;
  508. goto out_ctx;
  509. }
  510. uuari->bitmap = kcalloc(BITS_TO_LONGS(req.total_num_uuars),
  511. sizeof(*uuari->bitmap),
  512. GFP_KERNEL);
  513. if (!uuari->bitmap) {
  514. err = -ENOMEM;
  515. goto out_uar_ctx;
  516. }
  517. /*
  518. * clear all fast path uuars
  519. */
  520. for (i = 0; i < req.total_num_uuars; i++) {
  521. uuarn = i & 3;
  522. if (uuarn == 2 || uuarn == 3)
  523. set_bit(i, uuari->bitmap);
  524. }
  525. uuari->count = kcalloc(req.total_num_uuars, sizeof(*uuari->count), GFP_KERNEL);
  526. if (!uuari->count) {
  527. err = -ENOMEM;
  528. goto out_bitmap;
  529. }
  530. for (i = 0; i < num_uars; i++) {
  531. err = mlx5_cmd_alloc_uar(&dev->mdev, &uars[i].index);
  532. if (err)
  533. goto out_count;
  534. }
  535. INIT_LIST_HEAD(&context->db_page_list);
  536. mutex_init(&context->db_page_mutex);
  537. resp.tot_uuars = req.total_num_uuars;
  538. resp.num_ports = dev->mdev.caps.num_ports;
  539. err = ib_copy_to_udata(udata, &resp,
  540. sizeof(resp) - sizeof(resp.reserved));
  541. if (err)
  542. goto out_uars;
  543. uuari->num_low_latency_uuars = req.num_low_latency_uuars;
  544. uuari->uars = uars;
  545. uuari->num_uars = num_uars;
  546. return &context->ibucontext;
  547. out_uars:
  548. for (i--; i >= 0; i--)
  549. mlx5_cmd_free_uar(&dev->mdev, uars[i].index);
  550. out_count:
  551. kfree(uuari->count);
  552. out_bitmap:
  553. kfree(uuari->bitmap);
  554. out_uar_ctx:
  555. kfree(uars);
  556. out_ctx:
  557. kfree(context);
  558. return ERR_PTR(err);
  559. }
  560. static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
  561. {
  562. struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
  563. struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
  564. struct mlx5_uuar_info *uuari = &context->uuari;
  565. int i;
  566. for (i = 0; i < uuari->num_uars; i++) {
  567. if (mlx5_cmd_free_uar(&dev->mdev, uuari->uars[i].index))
  568. mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index);
  569. }
  570. kfree(uuari->count);
  571. kfree(uuari->bitmap);
  572. kfree(uuari->uars);
  573. kfree(context);
  574. return 0;
  575. }
  576. static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index)
  577. {
  578. return (pci_resource_start(dev->mdev.pdev, 0) >> PAGE_SHIFT) + index;
  579. }
  580. static int get_command(unsigned long offset)
  581. {
  582. return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
  583. }
  584. static int get_arg(unsigned long offset)
  585. {
  586. return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
  587. }
  588. static int get_index(unsigned long offset)
  589. {
  590. return get_arg(offset);
  591. }
  592. static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
  593. {
  594. struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
  595. struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
  596. struct mlx5_uuar_info *uuari = &context->uuari;
  597. unsigned long command;
  598. unsigned long idx;
  599. phys_addr_t pfn;
  600. command = get_command(vma->vm_pgoff);
  601. switch (command) {
  602. case MLX5_IB_MMAP_REGULAR_PAGE:
  603. if (vma->vm_end - vma->vm_start != PAGE_SIZE)
  604. return -EINVAL;
  605. idx = get_index(vma->vm_pgoff);
  606. pfn = uar_index2pfn(dev, uuari->uars[idx].index);
  607. mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn 0x%llx\n", idx,
  608. (unsigned long long)pfn);
  609. if (idx >= uuari->num_uars)
  610. return -EINVAL;
  611. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  612. if (io_remap_pfn_range(vma, vma->vm_start, pfn,
  613. PAGE_SIZE, vma->vm_page_prot))
  614. return -EAGAIN;
  615. mlx5_ib_dbg(dev, "mapped WC at 0x%lx, PA 0x%llx\n",
  616. vma->vm_start,
  617. (unsigned long long)pfn << PAGE_SHIFT);
  618. break;
  619. case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
  620. return -ENOSYS;
  621. default:
  622. return -EINVAL;
  623. }
  624. return 0;
  625. }
  626. static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn)
  627. {
  628. struct mlx5_create_mkey_mbox_in *in;
  629. struct mlx5_mkey_seg *seg;
  630. struct mlx5_core_mr mr;
  631. int err;
  632. in = kzalloc(sizeof(*in), GFP_KERNEL);
  633. if (!in)
  634. return -ENOMEM;
  635. seg = &in->seg;
  636. seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA;
  637. seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
  638. seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
  639. seg->start_addr = 0;
  640. err = mlx5_core_create_mkey(&dev->mdev, &mr, in, sizeof(*in));
  641. if (err) {
  642. mlx5_ib_warn(dev, "failed to create mkey, %d\n", err);
  643. goto err_in;
  644. }
  645. kfree(in);
  646. *key = mr.key;
  647. return 0;
  648. err_in:
  649. kfree(in);
  650. return err;
  651. }
  652. static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key)
  653. {
  654. struct mlx5_core_mr mr;
  655. int err;
  656. memset(&mr, 0, sizeof(mr));
  657. mr.key = key;
  658. err = mlx5_core_destroy_mkey(&dev->mdev, &mr);
  659. if (err)
  660. mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key);
  661. }
  662. static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
  663. struct ib_ucontext *context,
  664. struct ib_udata *udata)
  665. {
  666. struct mlx5_ib_alloc_pd_resp resp;
  667. struct mlx5_ib_pd *pd;
  668. int err;
  669. pd = kmalloc(sizeof(*pd), GFP_KERNEL);
  670. if (!pd)
  671. return ERR_PTR(-ENOMEM);
  672. err = mlx5_core_alloc_pd(&to_mdev(ibdev)->mdev, &pd->pdn);
  673. if (err) {
  674. kfree(pd);
  675. return ERR_PTR(err);
  676. }
  677. if (context) {
  678. resp.pdn = pd->pdn;
  679. if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
  680. mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn);
  681. kfree(pd);
  682. return ERR_PTR(-EFAULT);
  683. }
  684. } else {
  685. err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn);
  686. if (err) {
  687. mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn);
  688. kfree(pd);
  689. return ERR_PTR(err);
  690. }
  691. }
  692. return &pd->ibpd;
  693. }
  694. static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
  695. {
  696. struct mlx5_ib_dev *mdev = to_mdev(pd->device);
  697. struct mlx5_ib_pd *mpd = to_mpd(pd);
  698. if (!pd->uobject)
  699. free_pa_mkey(mdev, mpd->pa_lkey);
  700. mlx5_core_dealloc_pd(&mdev->mdev, mpd->pdn);
  701. kfree(mpd);
  702. return 0;
  703. }
  704. static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  705. {
  706. struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
  707. int err;
  708. err = mlx5_core_attach_mcg(&dev->mdev, gid, ibqp->qp_num);
  709. if (err)
  710. mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
  711. ibqp->qp_num, gid->raw);
  712. return err;
  713. }
  714. static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  715. {
  716. struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
  717. int err;
  718. err = mlx5_core_detach_mcg(&dev->mdev, gid, ibqp->qp_num);
  719. if (err)
  720. mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
  721. ibqp->qp_num, gid->raw);
  722. return err;
  723. }
  724. static int init_node_data(struct mlx5_ib_dev *dev)
  725. {
  726. struct ib_smp *in_mad = NULL;
  727. struct ib_smp *out_mad = NULL;
  728. int err = -ENOMEM;
  729. in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
  730. out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
  731. if (!in_mad || !out_mad)
  732. goto out;
  733. init_query_mad(in_mad);
  734. in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
  735. err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
  736. if (err)
  737. goto out;
  738. memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
  739. in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
  740. err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
  741. if (err)
  742. goto out;
  743. dev->mdev.rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32));
  744. memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
  745. out:
  746. kfree(in_mad);
  747. kfree(out_mad);
  748. return err;
  749. }
  750. static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
  751. char *buf)
  752. {
  753. struct mlx5_ib_dev *dev =
  754. container_of(device, struct mlx5_ib_dev, ib_dev.dev);
  755. return sprintf(buf, "%d\n", dev->mdev.priv.fw_pages);
  756. }
  757. static ssize_t show_reg_pages(struct device *device,
  758. struct device_attribute *attr, char *buf)
  759. {
  760. struct mlx5_ib_dev *dev =
  761. container_of(device, struct mlx5_ib_dev, ib_dev.dev);
  762. return sprintf(buf, "%d\n", dev->mdev.priv.reg_pages);
  763. }
  764. static ssize_t show_hca(struct device *device, struct device_attribute *attr,
  765. char *buf)
  766. {
  767. struct mlx5_ib_dev *dev =
  768. container_of(device, struct mlx5_ib_dev, ib_dev.dev);
  769. return sprintf(buf, "MT%d\n", dev->mdev.pdev->device);
  770. }
  771. static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
  772. char *buf)
  773. {
  774. struct mlx5_ib_dev *dev =
  775. container_of(device, struct mlx5_ib_dev, ib_dev.dev);
  776. return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(&dev->mdev),
  777. fw_rev_min(&dev->mdev), fw_rev_sub(&dev->mdev));
  778. }
  779. static ssize_t show_rev(struct device *device, struct device_attribute *attr,
  780. char *buf)
  781. {
  782. struct mlx5_ib_dev *dev =
  783. container_of(device, struct mlx5_ib_dev, ib_dev.dev);
  784. return sprintf(buf, "%x\n", dev->mdev.rev_id);
  785. }
  786. static ssize_t show_board(struct device *device, struct device_attribute *attr,
  787. char *buf)
  788. {
  789. struct mlx5_ib_dev *dev =
  790. container_of(device, struct mlx5_ib_dev, ib_dev.dev);
  791. return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
  792. dev->mdev.board_id);
  793. }
  794. static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
  795. static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
  796. static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
  797. static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
  798. static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
  799. static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
  800. static struct device_attribute *mlx5_class_attributes[] = {
  801. &dev_attr_hw_rev,
  802. &dev_attr_fw_ver,
  803. &dev_attr_hca_type,
  804. &dev_attr_board_id,
  805. &dev_attr_fw_pages,
  806. &dev_attr_reg_pages,
  807. };
  808. static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
  809. void *data)
  810. {
  811. struct mlx5_ib_dev *ibdev = container_of(dev, struct mlx5_ib_dev, mdev);
  812. struct ib_event ibev;
  813. u8 port = 0;
  814. switch (event) {
  815. case MLX5_DEV_EVENT_SYS_ERROR:
  816. ibdev->ib_active = false;
  817. ibev.event = IB_EVENT_DEVICE_FATAL;
  818. break;
  819. case MLX5_DEV_EVENT_PORT_UP:
  820. ibev.event = IB_EVENT_PORT_ACTIVE;
  821. port = *(u8 *)data;
  822. break;
  823. case MLX5_DEV_EVENT_PORT_DOWN:
  824. ibev.event = IB_EVENT_PORT_ERR;
  825. port = *(u8 *)data;
  826. break;
  827. case MLX5_DEV_EVENT_PORT_INITIALIZED:
  828. /* not used by ULPs */
  829. return;
  830. case MLX5_DEV_EVENT_LID_CHANGE:
  831. ibev.event = IB_EVENT_LID_CHANGE;
  832. port = *(u8 *)data;
  833. break;
  834. case MLX5_DEV_EVENT_PKEY_CHANGE:
  835. ibev.event = IB_EVENT_PKEY_CHANGE;
  836. port = *(u8 *)data;
  837. break;
  838. case MLX5_DEV_EVENT_GUID_CHANGE:
  839. ibev.event = IB_EVENT_GID_CHANGE;
  840. port = *(u8 *)data;
  841. break;
  842. case MLX5_DEV_EVENT_CLIENT_REREG:
  843. ibev.event = IB_EVENT_CLIENT_REREGISTER;
  844. port = *(u8 *)data;
  845. break;
  846. }
  847. ibev.device = &ibdev->ib_dev;
  848. ibev.element.port_num = port;
  849. if (ibdev->ib_active)
  850. ib_dispatch_event(&ibev);
  851. }
  852. static void get_ext_port_caps(struct mlx5_ib_dev *dev)
  853. {
  854. int port;
  855. for (port = 1; port <= dev->mdev.caps.num_ports; port++)
  856. mlx5_query_ext_port_caps(dev, port);
  857. }
  858. static int get_port_caps(struct mlx5_ib_dev *dev)
  859. {
  860. struct ib_device_attr *dprops = NULL;
  861. struct ib_port_attr *pprops = NULL;
  862. int err = 0;
  863. int port;
  864. pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
  865. if (!pprops)
  866. goto out;
  867. dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
  868. if (!dprops)
  869. goto out;
  870. err = mlx5_ib_query_device(&dev->ib_dev, dprops);
  871. if (err) {
  872. mlx5_ib_warn(dev, "query_device failed %d\n", err);
  873. goto out;
  874. }
  875. for (port = 1; port <= dev->mdev.caps.num_ports; port++) {
  876. err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
  877. if (err) {
  878. mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err);
  879. break;
  880. }
  881. dev->mdev.caps.port[port - 1].pkey_table_len = dprops->max_pkeys;
  882. dev->mdev.caps.port[port - 1].gid_table_len = pprops->gid_tbl_len;
  883. mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
  884. dprops->max_pkeys, pprops->gid_tbl_len);
  885. }
  886. out:
  887. kfree(pprops);
  888. kfree(dprops);
  889. return err;
  890. }
  891. static void destroy_umrc_res(struct mlx5_ib_dev *dev)
  892. {
  893. int err;
  894. err = mlx5_mr_cache_cleanup(dev);
  895. if (err)
  896. mlx5_ib_warn(dev, "mr cache cleanup failed\n");
  897. mlx5_ib_destroy_qp(dev->umrc.qp);
  898. ib_destroy_cq(dev->umrc.cq);
  899. ib_dereg_mr(dev->umrc.mr);
  900. ib_dealloc_pd(dev->umrc.pd);
  901. }
  902. enum {
  903. MAX_UMR_WR = 128,
  904. };
  905. static int create_umr_res(struct mlx5_ib_dev *dev)
  906. {
  907. struct ib_qp_init_attr *init_attr = NULL;
  908. struct ib_qp_attr *attr = NULL;
  909. struct ib_pd *pd;
  910. struct ib_cq *cq;
  911. struct ib_qp *qp;
  912. struct ib_mr *mr;
  913. int ret;
  914. attr = kzalloc(sizeof(*attr), GFP_KERNEL);
  915. init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
  916. if (!attr || !init_attr) {
  917. ret = -ENOMEM;
  918. goto error_0;
  919. }
  920. pd = ib_alloc_pd(&dev->ib_dev);
  921. if (IS_ERR(pd)) {
  922. mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
  923. ret = PTR_ERR(pd);
  924. goto error_0;
  925. }
  926. mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
  927. if (IS_ERR(mr)) {
  928. mlx5_ib_dbg(dev, "Couldn't create DMA MR for sync UMR QP\n");
  929. ret = PTR_ERR(mr);
  930. goto error_1;
  931. }
  932. cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL, 128,
  933. 0);
  934. if (IS_ERR(cq)) {
  935. mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
  936. ret = PTR_ERR(cq);
  937. goto error_2;
  938. }
  939. ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
  940. init_attr->send_cq = cq;
  941. init_attr->recv_cq = cq;
  942. init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
  943. init_attr->cap.max_send_wr = MAX_UMR_WR;
  944. init_attr->cap.max_send_sge = 1;
  945. init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
  946. init_attr->port_num = 1;
  947. qp = mlx5_ib_create_qp(pd, init_attr, NULL);
  948. if (IS_ERR(qp)) {
  949. mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
  950. ret = PTR_ERR(qp);
  951. goto error_3;
  952. }
  953. qp->device = &dev->ib_dev;
  954. qp->real_qp = qp;
  955. qp->uobject = NULL;
  956. qp->qp_type = MLX5_IB_QPT_REG_UMR;
  957. attr->qp_state = IB_QPS_INIT;
  958. attr->port_num = 1;
  959. ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
  960. IB_QP_PORT, NULL);
  961. if (ret) {
  962. mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
  963. goto error_4;
  964. }
  965. memset(attr, 0, sizeof(*attr));
  966. attr->qp_state = IB_QPS_RTR;
  967. attr->path_mtu = IB_MTU_256;
  968. ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
  969. if (ret) {
  970. mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
  971. goto error_4;
  972. }
  973. memset(attr, 0, sizeof(*attr));
  974. attr->qp_state = IB_QPS_RTS;
  975. ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
  976. if (ret) {
  977. mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
  978. goto error_4;
  979. }
  980. dev->umrc.qp = qp;
  981. dev->umrc.cq = cq;
  982. dev->umrc.mr = mr;
  983. dev->umrc.pd = pd;
  984. sema_init(&dev->umrc.sem, MAX_UMR_WR);
  985. ret = mlx5_mr_cache_init(dev);
  986. if (ret) {
  987. mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
  988. goto error_4;
  989. }
  990. kfree(attr);
  991. kfree(init_attr);
  992. return 0;
  993. error_4:
  994. mlx5_ib_destroy_qp(qp);
  995. error_3:
  996. ib_destroy_cq(cq);
  997. error_2:
  998. ib_dereg_mr(mr);
  999. error_1:
  1000. ib_dealloc_pd(pd);
  1001. error_0:
  1002. kfree(attr);
  1003. kfree(init_attr);
  1004. return ret;
  1005. }
  1006. static int create_dev_resources(struct mlx5_ib_resources *devr)
  1007. {
  1008. struct ib_srq_init_attr attr;
  1009. struct mlx5_ib_dev *dev;
  1010. int ret = 0;
  1011. dev = container_of(devr, struct mlx5_ib_dev, devr);
  1012. devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
  1013. if (IS_ERR(devr->p0)) {
  1014. ret = PTR_ERR(devr->p0);
  1015. goto error0;
  1016. }
  1017. devr->p0->device = &dev->ib_dev;
  1018. devr->p0->uobject = NULL;
  1019. atomic_set(&devr->p0->usecnt, 0);
  1020. devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, 1, 0, NULL, NULL);
  1021. if (IS_ERR(devr->c0)) {
  1022. ret = PTR_ERR(devr->c0);
  1023. goto error1;
  1024. }
  1025. devr->c0->device = &dev->ib_dev;
  1026. devr->c0->uobject = NULL;
  1027. devr->c0->comp_handler = NULL;
  1028. devr->c0->event_handler = NULL;
  1029. devr->c0->cq_context = NULL;
  1030. atomic_set(&devr->c0->usecnt, 0);
  1031. devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
  1032. if (IS_ERR(devr->x0)) {
  1033. ret = PTR_ERR(devr->x0);
  1034. goto error2;
  1035. }
  1036. devr->x0->device = &dev->ib_dev;
  1037. devr->x0->inode = NULL;
  1038. atomic_set(&devr->x0->usecnt, 0);
  1039. mutex_init(&devr->x0->tgt_qp_mutex);
  1040. INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
  1041. devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
  1042. if (IS_ERR(devr->x1)) {
  1043. ret = PTR_ERR(devr->x1);
  1044. goto error3;
  1045. }
  1046. devr->x1->device = &dev->ib_dev;
  1047. devr->x1->inode = NULL;
  1048. atomic_set(&devr->x1->usecnt, 0);
  1049. mutex_init(&devr->x1->tgt_qp_mutex);
  1050. INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
  1051. memset(&attr, 0, sizeof(attr));
  1052. attr.attr.max_sge = 1;
  1053. attr.attr.max_wr = 1;
  1054. attr.srq_type = IB_SRQT_XRC;
  1055. attr.ext.xrc.cq = devr->c0;
  1056. attr.ext.xrc.xrcd = devr->x0;
  1057. devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
  1058. if (IS_ERR(devr->s0)) {
  1059. ret = PTR_ERR(devr->s0);
  1060. goto error4;
  1061. }
  1062. devr->s0->device = &dev->ib_dev;
  1063. devr->s0->pd = devr->p0;
  1064. devr->s0->uobject = NULL;
  1065. devr->s0->event_handler = NULL;
  1066. devr->s0->srq_context = NULL;
  1067. devr->s0->srq_type = IB_SRQT_XRC;
  1068. devr->s0->ext.xrc.xrcd = devr->x0;
  1069. devr->s0->ext.xrc.cq = devr->c0;
  1070. atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
  1071. atomic_inc(&devr->s0->ext.xrc.cq->usecnt);
  1072. atomic_inc(&devr->p0->usecnt);
  1073. atomic_set(&devr->s0->usecnt, 0);
  1074. return 0;
  1075. error4:
  1076. mlx5_ib_dealloc_xrcd(devr->x1);
  1077. error3:
  1078. mlx5_ib_dealloc_xrcd(devr->x0);
  1079. error2:
  1080. mlx5_ib_destroy_cq(devr->c0);
  1081. error1:
  1082. mlx5_ib_dealloc_pd(devr->p0);
  1083. error0:
  1084. return ret;
  1085. }
  1086. static void destroy_dev_resources(struct mlx5_ib_resources *devr)
  1087. {
  1088. mlx5_ib_destroy_srq(devr->s0);
  1089. mlx5_ib_dealloc_xrcd(devr->x0);
  1090. mlx5_ib_dealloc_xrcd(devr->x1);
  1091. mlx5_ib_destroy_cq(devr->c0);
  1092. mlx5_ib_dealloc_pd(devr->p0);
  1093. }
  1094. static int init_one(struct pci_dev *pdev,
  1095. const struct pci_device_id *id)
  1096. {
  1097. struct mlx5_core_dev *mdev;
  1098. struct mlx5_ib_dev *dev;
  1099. int err;
  1100. int i;
  1101. printk_once(KERN_INFO "%s", mlx5_version);
  1102. dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
  1103. if (!dev)
  1104. return -ENOMEM;
  1105. mdev = &dev->mdev;
  1106. mdev->event = mlx5_ib_event;
  1107. if (prof_sel >= ARRAY_SIZE(profile)) {
  1108. pr_warn("selected pofile out of range, selceting default\n");
  1109. prof_sel = 0;
  1110. }
  1111. mdev->profile = &profile[prof_sel];
  1112. err = mlx5_dev_init(mdev, pdev);
  1113. if (err)
  1114. goto err_free;
  1115. err = get_port_caps(dev);
  1116. if (err)
  1117. goto err_cleanup;
  1118. get_ext_port_caps(dev);
  1119. err = alloc_comp_eqs(dev);
  1120. if (err)
  1121. goto err_cleanup;
  1122. MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
  1123. strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
  1124. dev->ib_dev.owner = THIS_MODULE;
  1125. dev->ib_dev.node_type = RDMA_NODE_IB_CA;
  1126. dev->ib_dev.local_dma_lkey = mdev->caps.reserved_lkey;
  1127. dev->num_ports = mdev->caps.num_ports;
  1128. dev->ib_dev.phys_port_cnt = dev->num_ports;
  1129. dev->ib_dev.num_comp_vectors = dev->num_comp_vectors;
  1130. dev->ib_dev.dma_device = &mdev->pdev->dev;
  1131. dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
  1132. dev->ib_dev.uverbs_cmd_mask =
  1133. (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
  1134. (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
  1135. (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
  1136. (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
  1137. (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
  1138. (1ull << IB_USER_VERBS_CMD_REG_MR) |
  1139. (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
  1140. (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
  1141. (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
  1142. (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
  1143. (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
  1144. (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
  1145. (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
  1146. (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
  1147. (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
  1148. (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
  1149. (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
  1150. (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
  1151. (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
  1152. (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
  1153. (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
  1154. (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
  1155. (1ull << IB_USER_VERBS_CMD_OPEN_QP);
  1156. dev->ib_dev.query_device = mlx5_ib_query_device;
  1157. dev->ib_dev.query_port = mlx5_ib_query_port;
  1158. dev->ib_dev.query_gid = mlx5_ib_query_gid;
  1159. dev->ib_dev.query_pkey = mlx5_ib_query_pkey;
  1160. dev->ib_dev.modify_device = mlx5_ib_modify_device;
  1161. dev->ib_dev.modify_port = mlx5_ib_modify_port;
  1162. dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext;
  1163. dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext;
  1164. dev->ib_dev.mmap = mlx5_ib_mmap;
  1165. dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd;
  1166. dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd;
  1167. dev->ib_dev.create_ah = mlx5_ib_create_ah;
  1168. dev->ib_dev.query_ah = mlx5_ib_query_ah;
  1169. dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah;
  1170. dev->ib_dev.create_srq = mlx5_ib_create_srq;
  1171. dev->ib_dev.modify_srq = mlx5_ib_modify_srq;
  1172. dev->ib_dev.query_srq = mlx5_ib_query_srq;
  1173. dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq;
  1174. dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv;
  1175. dev->ib_dev.create_qp = mlx5_ib_create_qp;
  1176. dev->ib_dev.modify_qp = mlx5_ib_modify_qp;
  1177. dev->ib_dev.query_qp = mlx5_ib_query_qp;
  1178. dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp;
  1179. dev->ib_dev.post_send = mlx5_ib_post_send;
  1180. dev->ib_dev.post_recv = mlx5_ib_post_recv;
  1181. dev->ib_dev.create_cq = mlx5_ib_create_cq;
  1182. dev->ib_dev.modify_cq = mlx5_ib_modify_cq;
  1183. dev->ib_dev.resize_cq = mlx5_ib_resize_cq;
  1184. dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq;
  1185. dev->ib_dev.poll_cq = mlx5_ib_poll_cq;
  1186. dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq;
  1187. dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr;
  1188. dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr;
  1189. dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr;
  1190. dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
  1191. dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
  1192. dev->ib_dev.process_mad = mlx5_ib_process_mad;
  1193. dev->ib_dev.alloc_fast_reg_mr = mlx5_ib_alloc_fast_reg_mr;
  1194. dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list;
  1195. dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list;
  1196. if (mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC) {
  1197. dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
  1198. dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
  1199. dev->ib_dev.uverbs_cmd_mask |=
  1200. (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
  1201. (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
  1202. }
  1203. err = init_node_data(dev);
  1204. if (err)
  1205. goto err_eqs;
  1206. mutex_init(&dev->cap_mask_mutex);
  1207. spin_lock_init(&dev->mr_lock);
  1208. err = create_dev_resources(&dev->devr);
  1209. if (err)
  1210. goto err_eqs;
  1211. err = ib_register_device(&dev->ib_dev, NULL);
  1212. if (err)
  1213. goto err_rsrc;
  1214. err = create_umr_res(dev);
  1215. if (err)
  1216. goto err_dev;
  1217. for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
  1218. err = device_create_file(&dev->ib_dev.dev,
  1219. mlx5_class_attributes[i]);
  1220. if (err)
  1221. goto err_umrc;
  1222. }
  1223. dev->ib_active = true;
  1224. return 0;
  1225. err_umrc:
  1226. destroy_umrc_res(dev);
  1227. err_dev:
  1228. ib_unregister_device(&dev->ib_dev);
  1229. err_rsrc:
  1230. destroy_dev_resources(&dev->devr);
  1231. err_eqs:
  1232. free_comp_eqs(dev);
  1233. err_cleanup:
  1234. mlx5_dev_cleanup(mdev);
  1235. err_free:
  1236. ib_dealloc_device((struct ib_device *)dev);
  1237. return err;
  1238. }
  1239. static void remove_one(struct pci_dev *pdev)
  1240. {
  1241. struct mlx5_ib_dev *dev = mlx5_pci2ibdev(pdev);
  1242. destroy_umrc_res(dev);
  1243. ib_unregister_device(&dev->ib_dev);
  1244. destroy_dev_resources(&dev->devr);
  1245. free_comp_eqs(dev);
  1246. mlx5_dev_cleanup(&dev->mdev);
  1247. ib_dealloc_device(&dev->ib_dev);
  1248. }
  1249. static DEFINE_PCI_DEVICE_TABLE(mlx5_ib_pci_table) = {
  1250. { PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */
  1251. { 0, }
  1252. };
  1253. MODULE_DEVICE_TABLE(pci, mlx5_ib_pci_table);
  1254. static struct pci_driver mlx5_ib_driver = {
  1255. .name = DRIVER_NAME,
  1256. .id_table = mlx5_ib_pci_table,
  1257. .probe = init_one,
  1258. .remove = remove_one
  1259. };
  1260. static int __init mlx5_ib_init(void)
  1261. {
  1262. return pci_register_driver(&mlx5_ib_driver);
  1263. }
  1264. static void __exit mlx5_ib_cleanup(void)
  1265. {
  1266. pci_unregister_driver(&mlx5_ib_driver);
  1267. }
  1268. module_init(mlx5_ib_init);
  1269. module_exit(mlx5_ib_cleanup);