main.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351
  1. /*
  2. * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
  3. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/module.h>
  34. #include <linux/init.h>
  35. #include <linux/slab.h>
  36. #include <linux/errno.h>
  37. #include <linux/netdevice.h>
  38. #include <linux/inetdevice.h>
  39. #include <linux/rtnetlink.h>
  40. #include <linux/if_vlan.h>
  41. #include <rdma/ib_smi.h>
  42. #include <rdma/ib_user_verbs.h>
  43. #include <rdma/ib_addr.h>
  44. #include <linux/mlx4/driver.h>
  45. #include <linux/mlx4/cmd.h>
  46. #include "mlx4_ib.h"
  47. #include "user.h"
  48. #define DRV_NAME "mlx4_ib"
  49. #define DRV_VERSION "1.0"
  50. #define DRV_RELDATE "April 4, 2008"
  51. MODULE_AUTHOR("Roland Dreier");
  52. MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
  53. MODULE_LICENSE("Dual BSD/GPL");
  54. MODULE_VERSION(DRV_VERSION);
  55. static const char mlx4_ib_version[] =
  56. DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
  57. DRV_VERSION " (" DRV_RELDATE ")\n";
  58. struct update_gid_work {
  59. struct work_struct work;
  60. union ib_gid gids[128];
  61. struct mlx4_ib_dev *dev;
  62. int port;
  63. };
  64. static struct workqueue_struct *wq;
  65. static void init_query_mad(struct ib_smp *mad)
  66. {
  67. mad->base_version = 1;
  68. mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
  69. mad->class_version = 1;
  70. mad->method = IB_MGMT_METHOD_GET;
  71. }
  72. static union ib_gid zgid;
  73. static int mlx4_ib_query_device(struct ib_device *ibdev,
  74. struct ib_device_attr *props)
  75. {
  76. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  77. struct ib_smp *in_mad = NULL;
  78. struct ib_smp *out_mad = NULL;
  79. int err = -ENOMEM;
  80. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  81. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  82. if (!in_mad || !out_mad)
  83. goto out;
  84. init_query_mad(in_mad);
  85. in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
  86. err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad);
  87. if (err)
  88. goto out;
  89. memset(props, 0, sizeof *props);
  90. props->fw_ver = dev->dev->caps.fw_ver;
  91. props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
  92. IB_DEVICE_PORT_ACTIVE_EVENT |
  93. IB_DEVICE_SYS_IMAGE_GUID |
  94. IB_DEVICE_RC_RNR_NAK_GEN |
  95. IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
  96. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
  97. props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
  98. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
  99. props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
  100. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
  101. props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
  102. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
  103. props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
  104. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
  105. props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
  106. if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
  107. props->device_cap_flags |= IB_DEVICE_UD_TSO;
  108. if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
  109. props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
  110. if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
  111. (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
  112. (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
  113. props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
  114. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
  115. props->device_cap_flags |= IB_DEVICE_XRC;
  116. props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
  117. 0xffffff;
  118. props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
  119. props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
  120. memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
  121. props->max_mr_size = ~0ull;
  122. props->page_size_cap = dev->dev->caps.page_size_cap;
  123. props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps;
  124. props->max_qp_wr = dev->dev->caps.max_wqes;
  125. props->max_sge = min(dev->dev->caps.max_sq_sg,
  126. dev->dev->caps.max_rq_sg);
  127. props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs;
  128. props->max_cqe = dev->dev->caps.max_cqes;
  129. props->max_mr = dev->dev->caps.num_mpts - dev->dev->caps.reserved_mrws;
  130. props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
  131. props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
  132. props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
  133. props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
  134. props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs;
  135. props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
  136. props->max_srq_sge = dev->dev->caps.max_srq_sge;
  137. props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
  138. props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
  139. props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
  140. IB_ATOMIC_HCA : IB_ATOMIC_NONE;
  141. props->masked_atomic_cap = IB_ATOMIC_HCA;
  142. props->max_pkeys = dev->dev->caps.pkey_table_len[1];
  143. props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
  144. props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
  145. props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
  146. props->max_mcast_grp;
  147. props->max_map_per_fmr = (1 << (32 - ilog2(dev->dev->caps.num_mpts))) - 1;
  148. out:
  149. kfree(in_mad);
  150. kfree(out_mad);
  151. return err;
  152. }
  153. static enum rdma_link_layer
  154. mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
  155. {
  156. struct mlx4_dev *dev = to_mdev(device)->dev;
  157. return dev->caps.port_mask & (1 << (port_num - 1)) ?
  158. IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
  159. }
  160. static int ib_link_query_port(struct ib_device *ibdev, u8 port,
  161. struct ib_port_attr *props,
  162. struct ib_smp *in_mad,
  163. struct ib_smp *out_mad)
  164. {
  165. int ext_active_speed;
  166. int err;
  167. props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
  168. props->lmc = out_mad->data[34] & 0x7;
  169. props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
  170. props->sm_sl = out_mad->data[36] & 0xf;
  171. props->state = out_mad->data[32] & 0xf;
  172. props->phys_state = out_mad->data[33] >> 4;
  173. props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
  174. props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
  175. props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
  176. props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
  177. props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
  178. props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
  179. props->active_width = out_mad->data[31] & 0xf;
  180. props->active_speed = out_mad->data[35] >> 4;
  181. props->max_mtu = out_mad->data[41] & 0xf;
  182. props->active_mtu = out_mad->data[36] >> 4;
  183. props->subnet_timeout = out_mad->data[51] & 0x1f;
  184. props->max_vl_num = out_mad->data[37] >> 4;
  185. props->init_type_reply = out_mad->data[41] >> 4;
  186. /* Check if extended speeds (EDR/FDR/...) are supported */
  187. if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
  188. ext_active_speed = out_mad->data[62] >> 4;
  189. switch (ext_active_speed) {
  190. case 1:
  191. props->active_speed = 16; /* FDR */
  192. break;
  193. case 2:
  194. props->active_speed = 32; /* EDR */
  195. break;
  196. }
  197. }
  198. /* If reported active speed is QDR, check if is FDR-10 */
  199. if (props->active_speed == 4) {
  200. if (to_mdev(ibdev)->dev->caps.ext_port_cap[port] &
  201. MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
  202. init_query_mad(in_mad);
  203. in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
  204. in_mad->attr_mod = cpu_to_be32(port);
  205. err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port,
  206. NULL, NULL, in_mad, out_mad);
  207. if (err)
  208. return err;
  209. /* Checking LinkSpeedActive for FDR-10 */
  210. if (out_mad->data[15] & 0x1)
  211. props->active_speed = 8;
  212. }
  213. }
  214. return 0;
  215. }
  216. static u8 state_to_phys_state(enum ib_port_state state)
  217. {
  218. return state == IB_PORT_ACTIVE ? 5 : 3;
  219. }
  220. static int eth_link_query_port(struct ib_device *ibdev, u8 port,
  221. struct ib_port_attr *props,
  222. struct ib_smp *out_mad)
  223. {
  224. struct mlx4_ib_iboe *iboe = &to_mdev(ibdev)->iboe;
  225. struct net_device *ndev;
  226. enum ib_mtu tmp;
  227. props->active_width = IB_WIDTH_1X;
  228. props->active_speed = 4;
  229. props->port_cap_flags = IB_PORT_CM_SUP;
  230. props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
  231. props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
  232. props->pkey_tbl_len = 1;
  233. props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
  234. props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
  235. props->max_mtu = IB_MTU_4096;
  236. props->subnet_timeout = 0;
  237. props->max_vl_num = out_mad->data[37] >> 4;
  238. props->init_type_reply = 0;
  239. props->state = IB_PORT_DOWN;
  240. props->phys_state = state_to_phys_state(props->state);
  241. props->active_mtu = IB_MTU_256;
  242. spin_lock(&iboe->lock);
  243. ndev = iboe->netdevs[port - 1];
  244. if (!ndev)
  245. goto out;
  246. tmp = iboe_get_mtu(ndev->mtu);
  247. props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
  248. props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
  249. IB_PORT_ACTIVE : IB_PORT_DOWN;
  250. props->phys_state = state_to_phys_state(props->state);
  251. out:
  252. spin_unlock(&iboe->lock);
  253. return 0;
  254. }
  255. static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
  256. struct ib_port_attr *props)
  257. {
  258. struct ib_smp *in_mad = NULL;
  259. struct ib_smp *out_mad = NULL;
  260. int err = -ENOMEM;
  261. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  262. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  263. if (!in_mad || !out_mad)
  264. goto out;
  265. memset(props, 0, sizeof *props);
  266. init_query_mad(in_mad);
  267. in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
  268. in_mad->attr_mod = cpu_to_be32(port);
  269. err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
  270. if (err)
  271. goto out;
  272. err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
  273. ib_link_query_port(ibdev, port, props, in_mad, out_mad) :
  274. eth_link_query_port(ibdev, port, props, out_mad);
  275. out:
  276. kfree(in_mad);
  277. kfree(out_mad);
  278. return err;
  279. }
  280. static int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
  281. union ib_gid *gid)
  282. {
  283. struct ib_smp *in_mad = NULL;
  284. struct ib_smp *out_mad = NULL;
  285. int err = -ENOMEM;
  286. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  287. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  288. if (!in_mad || !out_mad)
  289. goto out;
  290. init_query_mad(in_mad);
  291. in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
  292. in_mad->attr_mod = cpu_to_be32(port);
  293. err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
  294. if (err)
  295. goto out;
  296. memcpy(gid->raw, out_mad->data + 8, 8);
  297. init_query_mad(in_mad);
  298. in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
  299. in_mad->attr_mod = cpu_to_be32(index / 8);
  300. err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
  301. if (err)
  302. goto out;
  303. memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
  304. out:
  305. kfree(in_mad);
  306. kfree(out_mad);
  307. return err;
  308. }
  309. static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
  310. union ib_gid *gid)
  311. {
  312. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  313. *gid = dev->iboe.gid_table[port - 1][index];
  314. return 0;
  315. }
  316. static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
  317. union ib_gid *gid)
  318. {
  319. if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
  320. return __mlx4_ib_query_gid(ibdev, port, index, gid);
  321. else
  322. return iboe_query_gid(ibdev, port, index, gid);
  323. }
  324. static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
  325. u16 *pkey)
  326. {
  327. struct ib_smp *in_mad = NULL;
  328. struct ib_smp *out_mad = NULL;
  329. int err = -ENOMEM;
  330. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  331. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  332. if (!in_mad || !out_mad)
  333. goto out;
  334. init_query_mad(in_mad);
  335. in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
  336. in_mad->attr_mod = cpu_to_be32(index / 32);
  337. err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
  338. if (err)
  339. goto out;
  340. *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
  341. out:
  342. kfree(in_mad);
  343. kfree(out_mad);
  344. return err;
  345. }
  346. static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
  347. struct ib_device_modify *props)
  348. {
  349. struct mlx4_cmd_mailbox *mailbox;
  350. if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
  351. return -EOPNOTSUPP;
  352. if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
  353. return 0;
  354. spin_lock(&to_mdev(ibdev)->sm_lock);
  355. memcpy(ibdev->node_desc, props->node_desc, 64);
  356. spin_unlock(&to_mdev(ibdev)->sm_lock);
  357. /*
  358. * If possible, pass node desc to FW, so it can generate
  359. * a 144 trap. If cmd fails, just ignore.
  360. */
  361. mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
  362. if (IS_ERR(mailbox))
  363. return 0;
  364. memset(mailbox->buf, 0, 256);
  365. memcpy(mailbox->buf, props->node_desc, 64);
  366. mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
  367. MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A);
  368. mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
  369. return 0;
  370. }
  371. static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
  372. u32 cap_mask)
  373. {
  374. struct mlx4_cmd_mailbox *mailbox;
  375. int err;
  376. u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
  377. mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
  378. if (IS_ERR(mailbox))
  379. return PTR_ERR(mailbox);
  380. memset(mailbox->buf, 0, 256);
  381. if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
  382. *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
  383. ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
  384. } else {
  385. ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
  386. ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
  387. }
  388. err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
  389. MLX4_CMD_TIME_CLASS_B);
  390. mlx4_free_cmd_mailbox(dev->dev, mailbox);
  391. return err;
  392. }
  393. static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
  394. struct ib_port_modify *props)
  395. {
  396. struct ib_port_attr attr;
  397. u32 cap_mask;
  398. int err;
  399. mutex_lock(&to_mdev(ibdev)->cap_mask_mutex);
  400. err = mlx4_ib_query_port(ibdev, port, &attr);
  401. if (err)
  402. goto out;
  403. cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
  404. ~props->clr_port_cap_mask;
  405. err = mlx4_SET_PORT(to_mdev(ibdev), port,
  406. !!(mask & IB_PORT_RESET_QKEY_CNTR),
  407. cap_mask);
  408. out:
  409. mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
  410. return err;
  411. }
  412. static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
  413. struct ib_udata *udata)
  414. {
  415. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  416. struct mlx4_ib_ucontext *context;
  417. struct mlx4_ib_alloc_ucontext_resp resp;
  418. int err;
  419. if (!dev->ib_active)
  420. return ERR_PTR(-EAGAIN);
  421. resp.qp_tab_size = dev->dev->caps.num_qps;
  422. resp.bf_reg_size = dev->dev->caps.bf_reg_size;
  423. resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
  424. context = kmalloc(sizeof *context, GFP_KERNEL);
  425. if (!context)
  426. return ERR_PTR(-ENOMEM);
  427. err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
  428. if (err) {
  429. kfree(context);
  430. return ERR_PTR(err);
  431. }
  432. INIT_LIST_HEAD(&context->db_page_list);
  433. mutex_init(&context->db_page_mutex);
  434. err = ib_copy_to_udata(udata, &resp, sizeof resp);
  435. if (err) {
  436. mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
  437. kfree(context);
  438. return ERR_PTR(-EFAULT);
  439. }
  440. return &context->ibucontext;
  441. }
  442. static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
  443. {
  444. struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
  445. mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
  446. kfree(context);
  447. return 0;
  448. }
  449. static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
  450. {
  451. struct mlx4_ib_dev *dev = to_mdev(context->device);
  452. if (vma->vm_end - vma->vm_start != PAGE_SIZE)
  453. return -EINVAL;
  454. if (vma->vm_pgoff == 0) {
  455. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  456. if (io_remap_pfn_range(vma, vma->vm_start,
  457. to_mucontext(context)->uar.pfn,
  458. PAGE_SIZE, vma->vm_page_prot))
  459. return -EAGAIN;
  460. } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
  461. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  462. if (io_remap_pfn_range(vma, vma->vm_start,
  463. to_mucontext(context)->uar.pfn +
  464. dev->dev->caps.num_uars,
  465. PAGE_SIZE, vma->vm_page_prot))
  466. return -EAGAIN;
  467. } else
  468. return -EINVAL;
  469. return 0;
  470. }
  471. static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
  472. struct ib_ucontext *context,
  473. struct ib_udata *udata)
  474. {
  475. struct mlx4_ib_pd *pd;
  476. int err;
  477. pd = kmalloc(sizeof *pd, GFP_KERNEL);
  478. if (!pd)
  479. return ERR_PTR(-ENOMEM);
  480. err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
  481. if (err) {
  482. kfree(pd);
  483. return ERR_PTR(err);
  484. }
  485. if (context)
  486. if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
  487. mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
  488. kfree(pd);
  489. return ERR_PTR(-EFAULT);
  490. }
  491. return &pd->ibpd;
  492. }
  493. static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
  494. {
  495. mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
  496. kfree(pd);
  497. return 0;
  498. }
  499. static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
  500. struct ib_ucontext *context,
  501. struct ib_udata *udata)
  502. {
  503. struct mlx4_ib_xrcd *xrcd;
  504. int err;
  505. if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
  506. return ERR_PTR(-ENOSYS);
  507. xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
  508. if (!xrcd)
  509. return ERR_PTR(-ENOMEM);
  510. err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
  511. if (err)
  512. goto err1;
  513. xrcd->pd = ib_alloc_pd(ibdev);
  514. if (IS_ERR(xrcd->pd)) {
  515. err = PTR_ERR(xrcd->pd);
  516. goto err2;
  517. }
  518. xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0);
  519. if (IS_ERR(xrcd->cq)) {
  520. err = PTR_ERR(xrcd->cq);
  521. goto err3;
  522. }
  523. return &xrcd->ibxrcd;
  524. err3:
  525. ib_dealloc_pd(xrcd->pd);
  526. err2:
  527. mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
  528. err1:
  529. kfree(xrcd);
  530. return ERR_PTR(err);
  531. }
  532. static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
  533. {
  534. ib_destroy_cq(to_mxrcd(xrcd)->cq);
  535. ib_dealloc_pd(to_mxrcd(xrcd)->pd);
  536. mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
  537. kfree(xrcd);
  538. return 0;
  539. }
  540. static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
  541. {
  542. struct mlx4_ib_qp *mqp = to_mqp(ibqp);
  543. struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
  544. struct mlx4_ib_gid_entry *ge;
  545. ge = kzalloc(sizeof *ge, GFP_KERNEL);
  546. if (!ge)
  547. return -ENOMEM;
  548. ge->gid = *gid;
  549. if (mlx4_ib_add_mc(mdev, mqp, gid)) {
  550. ge->port = mqp->port;
  551. ge->added = 1;
  552. }
  553. mutex_lock(&mqp->mutex);
  554. list_add_tail(&ge->list, &mqp->gid_list);
  555. mutex_unlock(&mqp->mutex);
  556. return 0;
  557. }
  558. int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
  559. union ib_gid *gid)
  560. {
  561. u8 mac[6];
  562. struct net_device *ndev;
  563. int ret = 0;
  564. if (!mqp->port)
  565. return 0;
  566. spin_lock(&mdev->iboe.lock);
  567. ndev = mdev->iboe.netdevs[mqp->port - 1];
  568. if (ndev)
  569. dev_hold(ndev);
  570. spin_unlock(&mdev->iboe.lock);
  571. if (ndev) {
  572. rdma_get_mcast_mac((struct in6_addr *)gid, mac);
  573. rtnl_lock();
  574. dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac);
  575. ret = 1;
  576. rtnl_unlock();
  577. dev_put(ndev);
  578. }
  579. return ret;
  580. }
  581. static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  582. {
  583. int err;
  584. struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
  585. struct mlx4_ib_qp *mqp = to_mqp(ibqp);
  586. err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
  587. !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
  588. MLX4_PROT_IB_IPV6);
  589. if (err)
  590. return err;
  591. err = add_gid_entry(ibqp, gid);
  592. if (err)
  593. goto err_add;
  594. return 0;
  595. err_add:
  596. mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6);
  597. return err;
  598. }
  599. static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
  600. {
  601. struct mlx4_ib_gid_entry *ge;
  602. struct mlx4_ib_gid_entry *tmp;
  603. struct mlx4_ib_gid_entry *ret = NULL;
  604. list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
  605. if (!memcmp(raw, ge->gid.raw, 16)) {
  606. ret = ge;
  607. break;
  608. }
  609. }
  610. return ret;
  611. }
  612. static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  613. {
  614. int err;
  615. struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
  616. struct mlx4_ib_qp *mqp = to_mqp(ibqp);
  617. u8 mac[6];
  618. struct net_device *ndev;
  619. struct mlx4_ib_gid_entry *ge;
  620. err = mlx4_multicast_detach(mdev->dev,
  621. &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6);
  622. if (err)
  623. return err;
  624. mutex_lock(&mqp->mutex);
  625. ge = find_gid_entry(mqp, gid->raw);
  626. if (ge) {
  627. spin_lock(&mdev->iboe.lock);
  628. ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
  629. if (ndev)
  630. dev_hold(ndev);
  631. spin_unlock(&mdev->iboe.lock);
  632. rdma_get_mcast_mac((struct in6_addr *)gid, mac);
  633. if (ndev) {
  634. rtnl_lock();
  635. dev_mc_del(mdev->iboe.netdevs[ge->port - 1], mac);
  636. rtnl_unlock();
  637. dev_put(ndev);
  638. }
  639. list_del(&ge->list);
  640. kfree(ge);
  641. } else
  642. printk(KERN_WARNING "could not find mgid entry\n");
  643. mutex_unlock(&mqp->mutex);
  644. return 0;
  645. }
  646. static int init_node_data(struct mlx4_ib_dev *dev)
  647. {
  648. struct ib_smp *in_mad = NULL;
  649. struct ib_smp *out_mad = NULL;
  650. int err = -ENOMEM;
  651. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  652. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  653. if (!in_mad || !out_mad)
  654. goto out;
  655. init_query_mad(in_mad);
  656. in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
  657. err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
  658. if (err)
  659. goto out;
  660. memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
  661. in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
  662. err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
  663. if (err)
  664. goto out;
  665. memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
  666. out:
  667. kfree(in_mad);
  668. kfree(out_mad);
  669. return err;
  670. }
  671. static ssize_t show_hca(struct device *device, struct device_attribute *attr,
  672. char *buf)
  673. {
  674. struct mlx4_ib_dev *dev =
  675. container_of(device, struct mlx4_ib_dev, ib_dev.dev);
  676. return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
  677. }
  678. static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
  679. char *buf)
  680. {
  681. struct mlx4_ib_dev *dev =
  682. container_of(device, struct mlx4_ib_dev, ib_dev.dev);
  683. return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
  684. (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
  685. (int) dev->dev->caps.fw_ver & 0xffff);
  686. }
  687. static ssize_t show_rev(struct device *device, struct device_attribute *attr,
  688. char *buf)
  689. {
  690. struct mlx4_ib_dev *dev =
  691. container_of(device, struct mlx4_ib_dev, ib_dev.dev);
  692. return sprintf(buf, "%x\n", dev->dev->rev_id);
  693. }
  694. static ssize_t show_board(struct device *device, struct device_attribute *attr,
  695. char *buf)
  696. {
  697. struct mlx4_ib_dev *dev =
  698. container_of(device, struct mlx4_ib_dev, ib_dev.dev);
  699. return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
  700. dev->dev->board_id);
  701. }
  702. static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
  703. static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
  704. static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
  705. static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
  706. static struct device_attribute *mlx4_class_attributes[] = {
  707. &dev_attr_hw_rev,
  708. &dev_attr_fw_ver,
  709. &dev_attr_hca_type,
  710. &dev_attr_board_id
  711. };
  712. static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev)
  713. {
  714. memcpy(eui, dev->dev_addr, 3);
  715. memcpy(eui + 5, dev->dev_addr + 3, 3);
  716. if (vlan_id < 0x1000) {
  717. eui[3] = vlan_id >> 8;
  718. eui[4] = vlan_id & 0xff;
  719. } else {
  720. eui[3] = 0xff;
  721. eui[4] = 0xfe;
  722. }
  723. eui[0] ^= 2;
  724. }
  725. static void update_gids_task(struct work_struct *work)
  726. {
  727. struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
  728. struct mlx4_cmd_mailbox *mailbox;
  729. union ib_gid *gids;
  730. int err;
  731. struct mlx4_dev *dev = gw->dev->dev;
  732. struct ib_event event;
  733. mailbox = mlx4_alloc_cmd_mailbox(dev);
  734. if (IS_ERR(mailbox)) {
  735. printk(KERN_WARNING "update gid table failed %ld\n", PTR_ERR(mailbox));
  736. return;
  737. }
  738. gids = mailbox->buf;
  739. memcpy(gids, gw->gids, sizeof gw->gids);
  740. err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
  741. 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B);
  742. if (err)
  743. printk(KERN_WARNING "set port command failed\n");
  744. else {
  745. memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
  746. event.device = &gw->dev->ib_dev;
  747. event.element.port_num = gw->port;
  748. event.event = IB_EVENT_GID_CHANGE;
  749. ib_dispatch_event(&event);
  750. }
  751. mlx4_free_cmd_mailbox(dev, mailbox);
  752. kfree(gw);
  753. }
  754. static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
  755. {
  756. struct net_device *ndev = dev->iboe.netdevs[port - 1];
  757. struct update_gid_work *work;
  758. struct net_device *tmp;
  759. int i;
  760. u8 *hits;
  761. int ret;
  762. union ib_gid gid;
  763. int free;
  764. int found;
  765. int need_update = 0;
  766. u16 vid;
  767. work = kzalloc(sizeof *work, GFP_ATOMIC);
  768. if (!work)
  769. return -ENOMEM;
  770. hits = kzalloc(128, GFP_ATOMIC);
  771. if (!hits) {
  772. ret = -ENOMEM;
  773. goto out;
  774. }
  775. rcu_read_lock();
  776. for_each_netdev_rcu(&init_net, tmp) {
  777. if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
  778. gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
  779. vid = rdma_vlan_dev_vlan_id(tmp);
  780. mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev);
  781. found = 0;
  782. free = -1;
  783. for (i = 0; i < 128; ++i) {
  784. if (free < 0 &&
  785. !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
  786. free = i;
  787. if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) {
  788. hits[i] = 1;
  789. found = 1;
  790. break;
  791. }
  792. }
  793. if (!found) {
  794. if (tmp == ndev &&
  795. (memcmp(&dev->iboe.gid_table[port - 1][0],
  796. &gid, sizeof gid) ||
  797. !memcmp(&dev->iboe.gid_table[port - 1][0],
  798. &zgid, sizeof gid))) {
  799. dev->iboe.gid_table[port - 1][0] = gid;
  800. ++need_update;
  801. hits[0] = 1;
  802. } else if (free >= 0) {
  803. dev->iboe.gid_table[port - 1][free] = gid;
  804. hits[free] = 1;
  805. ++need_update;
  806. }
  807. }
  808. }
  809. }
  810. rcu_read_unlock();
  811. for (i = 0; i < 128; ++i)
  812. if (!hits[i]) {
  813. if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
  814. ++need_update;
  815. dev->iboe.gid_table[port - 1][i] = zgid;
  816. }
  817. if (need_update) {
  818. memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids);
  819. INIT_WORK(&work->work, update_gids_task);
  820. work->port = port;
  821. work->dev = dev;
  822. queue_work(wq, &work->work);
  823. } else
  824. kfree(work);
  825. kfree(hits);
  826. return 0;
  827. out:
  828. kfree(work);
  829. return ret;
  830. }
  831. static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event)
  832. {
  833. switch (event) {
  834. case NETDEV_UP:
  835. case NETDEV_CHANGEADDR:
  836. update_ipv6_gids(dev, port, 0);
  837. break;
  838. case NETDEV_DOWN:
  839. update_ipv6_gids(dev, port, 1);
  840. dev->iboe.netdevs[port - 1] = NULL;
  841. }
  842. }
  843. static void netdev_added(struct mlx4_ib_dev *dev, int port)
  844. {
  845. update_ipv6_gids(dev, port, 0);
  846. }
  847. static void netdev_removed(struct mlx4_ib_dev *dev, int port)
  848. {
  849. update_ipv6_gids(dev, port, 1);
  850. }
  851. static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event,
  852. void *ptr)
  853. {
  854. struct net_device *dev = ptr;
  855. struct mlx4_ib_dev *ibdev;
  856. struct net_device *oldnd;
  857. struct mlx4_ib_iboe *iboe;
  858. int port;
  859. if (!net_eq(dev_net(dev), &init_net))
  860. return NOTIFY_DONE;
  861. ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
  862. iboe = &ibdev->iboe;
  863. spin_lock(&iboe->lock);
  864. mlx4_foreach_ib_transport_port(port, ibdev->dev) {
  865. oldnd = iboe->netdevs[port - 1];
  866. iboe->netdevs[port - 1] =
  867. mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
  868. if (oldnd != iboe->netdevs[port - 1]) {
  869. if (iboe->netdevs[port - 1])
  870. netdev_added(ibdev, port);
  871. else
  872. netdev_removed(ibdev, port);
  873. }
  874. }
  875. if (dev == iboe->netdevs[0] ||
  876. (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0]))
  877. handle_en_event(ibdev, 1, event);
  878. else if (dev == iboe->netdevs[1]
  879. || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1]))
  880. handle_en_event(ibdev, 2, event);
  881. spin_unlock(&iboe->lock);
  882. return NOTIFY_DONE;
  883. }
  884. static void *mlx4_ib_add(struct mlx4_dev *dev)
  885. {
  886. struct mlx4_ib_dev *ibdev;
  887. int num_ports = 0;
  888. int i;
  889. int err;
  890. struct mlx4_ib_iboe *iboe;
  891. printk_once(KERN_INFO "%s", mlx4_ib_version);
  892. mlx4_foreach_ib_transport_port(i, dev)
  893. num_ports++;
  894. /* No point in registering a device with no ports... */
  895. if (num_ports == 0)
  896. return NULL;
  897. ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
  898. if (!ibdev) {
  899. dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
  900. return NULL;
  901. }
  902. iboe = &ibdev->iboe;
  903. if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
  904. goto err_dealloc;
  905. if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
  906. goto err_pd;
  907. ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
  908. PAGE_SIZE);
  909. if (!ibdev->uar_map)
  910. goto err_uar;
  911. MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
  912. ibdev->dev = dev;
  913. strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
  914. ibdev->ib_dev.owner = THIS_MODULE;
  915. ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
  916. ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
  917. ibdev->num_ports = num_ports;
  918. ibdev->ib_dev.phys_port_cnt = ibdev->num_ports;
  919. ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
  920. ibdev->ib_dev.dma_device = &dev->pdev->dev;
  921. ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
  922. ibdev->ib_dev.uverbs_cmd_mask =
  923. (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
  924. (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
  925. (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
  926. (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
  927. (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
  928. (1ull << IB_USER_VERBS_CMD_REG_MR) |
  929. (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
  930. (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
  931. (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
  932. (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
  933. (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
  934. (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
  935. (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
  936. (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
  937. (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
  938. (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
  939. (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
  940. (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
  941. (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
  942. (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
  943. (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
  944. (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
  945. (1ull << IB_USER_VERBS_CMD_OPEN_QP);
  946. ibdev->ib_dev.query_device = mlx4_ib_query_device;
  947. ibdev->ib_dev.query_port = mlx4_ib_query_port;
  948. ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
  949. ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
  950. ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
  951. ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
  952. ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
  953. ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
  954. ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
  955. ibdev->ib_dev.mmap = mlx4_ib_mmap;
  956. ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
  957. ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
  958. ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
  959. ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
  960. ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
  961. ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
  962. ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
  963. ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
  964. ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
  965. ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
  966. ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
  967. ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
  968. ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
  969. ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
  970. ibdev->ib_dev.post_send = mlx4_ib_post_send;
  971. ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
  972. ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
  973. ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
  974. ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
  975. ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
  976. ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
  977. ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
  978. ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
  979. ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
  980. ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
  981. ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
  982. ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
  983. ibdev->ib_dev.free_fast_reg_page_list = mlx4_ib_free_fast_reg_page_list;
  984. ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
  985. ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
  986. ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
  987. ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
  988. ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
  989. ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
  990. ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
  991. if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
  992. ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
  993. ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
  994. ibdev->ib_dev.uverbs_cmd_mask |=
  995. (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
  996. (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
  997. }
  998. spin_lock_init(&iboe->lock);
  999. if (init_node_data(ibdev))
  1000. goto err_map;
  1001. for (i = 0; i < ibdev->num_ports; ++i) {
  1002. if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
  1003. IB_LINK_LAYER_ETHERNET) {
  1004. err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
  1005. if (err)
  1006. ibdev->counters[i] = -1;
  1007. } else
  1008. ibdev->counters[i] = -1;
  1009. }
  1010. spin_lock_init(&ibdev->sm_lock);
  1011. mutex_init(&ibdev->cap_mask_mutex);
  1012. if (ib_register_device(&ibdev->ib_dev, NULL))
  1013. goto err_counter;
  1014. if (mlx4_ib_mad_init(ibdev))
  1015. goto err_reg;
  1016. if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) {
  1017. iboe->nb.notifier_call = mlx4_ib_netdev_event;
  1018. err = register_netdevice_notifier(&iboe->nb);
  1019. if (err)
  1020. goto err_reg;
  1021. }
  1022. for (i = 0; i < ARRAY_SIZE(mlx4_class_attributes); ++i) {
  1023. if (device_create_file(&ibdev->ib_dev.dev,
  1024. mlx4_class_attributes[i]))
  1025. goto err_notif;
  1026. }
  1027. ibdev->ib_active = true;
  1028. return ibdev;
  1029. err_notif:
  1030. if (unregister_netdevice_notifier(&ibdev->iboe.nb))
  1031. printk(KERN_WARNING "failure unregistering notifier\n");
  1032. flush_workqueue(wq);
  1033. err_reg:
  1034. ib_unregister_device(&ibdev->ib_dev);
  1035. err_counter:
  1036. for (; i; --i)
  1037. mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
  1038. err_map:
  1039. iounmap(ibdev->uar_map);
  1040. err_uar:
  1041. mlx4_uar_free(dev, &ibdev->priv_uar);
  1042. err_pd:
  1043. mlx4_pd_free(dev, ibdev->priv_pdn);
  1044. err_dealloc:
  1045. ib_dealloc_device(&ibdev->ib_dev);
  1046. return NULL;
  1047. }
  1048. static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
  1049. {
  1050. struct mlx4_ib_dev *ibdev = ibdev_ptr;
  1051. int p;
  1052. mlx4_ib_mad_cleanup(ibdev);
  1053. ib_unregister_device(&ibdev->ib_dev);
  1054. if (ibdev->iboe.nb.notifier_call) {
  1055. if (unregister_netdevice_notifier(&ibdev->iboe.nb))
  1056. printk(KERN_WARNING "failure unregistering notifier\n");
  1057. ibdev->iboe.nb.notifier_call = NULL;
  1058. }
  1059. iounmap(ibdev->uar_map);
  1060. for (p = 0; p < ibdev->num_ports; ++p)
  1061. mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
  1062. mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
  1063. mlx4_CLOSE_PORT(dev, p);
  1064. mlx4_uar_free(dev, &ibdev->priv_uar);
  1065. mlx4_pd_free(dev, ibdev->priv_pdn);
  1066. ib_dealloc_device(&ibdev->ib_dev);
  1067. }
  1068. static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
  1069. enum mlx4_dev_event event, int port)
  1070. {
  1071. struct ib_event ibev;
  1072. struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
  1073. if (port > ibdev->num_ports)
  1074. return;
  1075. switch (event) {
  1076. case MLX4_DEV_EVENT_PORT_UP:
  1077. ibev.event = IB_EVENT_PORT_ACTIVE;
  1078. break;
  1079. case MLX4_DEV_EVENT_PORT_DOWN:
  1080. ibev.event = IB_EVENT_PORT_ERR;
  1081. break;
  1082. case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
  1083. ibdev->ib_active = false;
  1084. ibev.event = IB_EVENT_DEVICE_FATAL;
  1085. break;
  1086. default:
  1087. return;
  1088. }
  1089. ibev.device = ibdev_ptr;
  1090. ibev.element.port_num = port;
  1091. ib_dispatch_event(&ibev);
  1092. }
  1093. static struct mlx4_interface mlx4_ib_interface = {
  1094. .add = mlx4_ib_add,
  1095. .remove = mlx4_ib_remove,
  1096. .event = mlx4_ib_event,
  1097. .protocol = MLX4_PROT_IB_IPV6
  1098. };
  1099. static int __init mlx4_ib_init(void)
  1100. {
  1101. int err;
  1102. wq = create_singlethread_workqueue("mlx4_ib");
  1103. if (!wq)
  1104. return -ENOMEM;
  1105. err = mlx4_register_interface(&mlx4_ib_interface);
  1106. if (err) {
  1107. destroy_workqueue(wq);
  1108. return err;
  1109. }
  1110. return 0;
  1111. }
  1112. static void __exit mlx4_ib_cleanup(void)
  1113. {
  1114. mlx4_unregister_interface(&mlx4_ib_interface);
  1115. destroy_workqueue(wq);
  1116. }
  1117. module_init(mlx4_ib_init);
  1118. module_exit(mlx4_ib_cleanup);