main.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582
  1. /*
  2. * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
  3. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/module.h>
  34. #include <linux/init.h>
  35. #include <linux/slab.h>
  36. #include <linux/errno.h>
  37. #include <linux/netdevice.h>
  38. #include <linux/inetdevice.h>
  39. #include <linux/rtnetlink.h>
  40. #include <linux/if_vlan.h>
  41. #include <rdma/ib_smi.h>
  42. #include <rdma/ib_user_verbs.h>
  43. #include <rdma/ib_addr.h>
  44. #include <linux/mlx4/driver.h>
  45. #include <linux/mlx4/cmd.h>
  46. #include "mlx4_ib.h"
  47. #include "user.h"
  48. #define DRV_NAME MLX4_IB_DRV_NAME
  49. #define DRV_VERSION "1.0"
  50. #define DRV_RELDATE "April 4, 2008"
  51. MODULE_AUTHOR("Roland Dreier");
  52. MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
  53. MODULE_LICENSE("Dual BSD/GPL");
  54. MODULE_VERSION(DRV_VERSION);
  55. static const char mlx4_ib_version[] =
  56. DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
  57. DRV_VERSION " (" DRV_RELDATE ")\n";
  58. struct update_gid_work {
  59. struct work_struct work;
  60. union ib_gid gids[128];
  61. struct mlx4_ib_dev *dev;
  62. int port;
  63. };
  64. static struct workqueue_struct *wq;
  65. static void init_query_mad(struct ib_smp *mad)
  66. {
  67. mad->base_version = 1;
  68. mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
  69. mad->class_version = 1;
  70. mad->method = IB_MGMT_METHOD_GET;
  71. }
  72. static union ib_gid zgid;
  73. static int mlx4_ib_query_device(struct ib_device *ibdev,
  74. struct ib_device_attr *props)
  75. {
  76. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  77. struct ib_smp *in_mad = NULL;
  78. struct ib_smp *out_mad = NULL;
  79. int err = -ENOMEM;
  80. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  81. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  82. if (!in_mad || !out_mad)
  83. goto out;
  84. init_query_mad(in_mad);
  85. in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
  86. err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad);
  87. if (err)
  88. goto out;
  89. memset(props, 0, sizeof *props);
  90. props->fw_ver = dev->dev->caps.fw_ver;
  91. props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
  92. IB_DEVICE_PORT_ACTIVE_EVENT |
  93. IB_DEVICE_SYS_IMAGE_GUID |
  94. IB_DEVICE_RC_RNR_NAK_GEN |
  95. IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
  96. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
  97. props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
  98. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
  99. props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
  100. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
  101. props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
  102. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
  103. props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
  104. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
  105. props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
  106. if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
  107. props->device_cap_flags |= IB_DEVICE_UD_TSO;
  108. if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
  109. props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
  110. if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
  111. (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
  112. (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
  113. props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
  114. if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
  115. props->device_cap_flags |= IB_DEVICE_XRC;
  116. props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
  117. 0xffffff;
  118. props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
  119. props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
  120. memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
  121. props->max_mr_size = ~0ull;
  122. props->page_size_cap = dev->dev->caps.page_size_cap;
  123. props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps;
  124. props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
  125. props->max_sge = min(dev->dev->caps.max_sq_sg,
  126. dev->dev->caps.max_rq_sg);
  127. props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs;
  128. props->max_cqe = dev->dev->caps.max_cqes;
  129. props->max_mr = dev->dev->caps.num_mpts - dev->dev->caps.reserved_mrws;
  130. props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
  131. props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
  132. props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
  133. props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
  134. props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs;
  135. props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
  136. props->max_srq_sge = dev->dev->caps.max_srq_sge;
  137. props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
  138. props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
  139. props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
  140. IB_ATOMIC_HCA : IB_ATOMIC_NONE;
  141. props->masked_atomic_cap = props->atomic_cap;
  142. props->max_pkeys = dev->dev->caps.pkey_table_len[1];
  143. props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
  144. props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
  145. props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
  146. props->max_mcast_grp;
  147. props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
  148. out:
  149. kfree(in_mad);
  150. kfree(out_mad);
  151. return err;
  152. }
  153. static enum rdma_link_layer
  154. mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
  155. {
  156. struct mlx4_dev *dev = to_mdev(device)->dev;
  157. return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
  158. IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
  159. }
  160. static int ib_link_query_port(struct ib_device *ibdev, u8 port,
  161. struct ib_port_attr *props)
  162. {
  163. struct ib_smp *in_mad = NULL;
  164. struct ib_smp *out_mad = NULL;
  165. int ext_active_speed;
  166. int err = -ENOMEM;
  167. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  168. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  169. if (!in_mad || !out_mad)
  170. goto out;
  171. init_query_mad(in_mad);
  172. in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
  173. in_mad->attr_mod = cpu_to_be32(port);
  174. err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL,
  175. in_mad, out_mad);
  176. if (err)
  177. goto out;
  178. props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
  179. props->lmc = out_mad->data[34] & 0x7;
  180. props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
  181. props->sm_sl = out_mad->data[36] & 0xf;
  182. props->state = out_mad->data[32] & 0xf;
  183. props->phys_state = out_mad->data[33] >> 4;
  184. props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
  185. props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
  186. props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
  187. props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
  188. props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
  189. props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
  190. props->active_width = out_mad->data[31] & 0xf;
  191. props->active_speed = out_mad->data[35] >> 4;
  192. props->max_mtu = out_mad->data[41] & 0xf;
  193. props->active_mtu = out_mad->data[36] >> 4;
  194. props->subnet_timeout = out_mad->data[51] & 0x1f;
  195. props->max_vl_num = out_mad->data[37] >> 4;
  196. props->init_type_reply = out_mad->data[41] >> 4;
  197. /* Check if extended speeds (EDR/FDR/...) are supported */
  198. if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
  199. ext_active_speed = out_mad->data[62] >> 4;
  200. switch (ext_active_speed) {
  201. case 1:
  202. props->active_speed = IB_SPEED_FDR;
  203. break;
  204. case 2:
  205. props->active_speed = IB_SPEED_EDR;
  206. break;
  207. }
  208. }
  209. /* If reported active speed is QDR, check if is FDR-10 */
  210. if (props->active_speed == IB_SPEED_QDR) {
  211. init_query_mad(in_mad);
  212. in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
  213. in_mad->attr_mod = cpu_to_be32(port);
  214. err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port,
  215. NULL, NULL, in_mad, out_mad);
  216. if (err)
  217. goto out;
  218. /* Checking LinkSpeedActive for FDR-10 */
  219. if (out_mad->data[15] & 0x1)
  220. props->active_speed = IB_SPEED_FDR10;
  221. }
  222. /* Avoid wrong speed value returned by FW if the IB link is down. */
  223. if (props->state == IB_PORT_DOWN)
  224. props->active_speed = IB_SPEED_SDR;
  225. out:
  226. kfree(in_mad);
  227. kfree(out_mad);
  228. return err;
  229. }
  230. static u8 state_to_phys_state(enum ib_port_state state)
  231. {
  232. return state == IB_PORT_ACTIVE ? 5 : 3;
  233. }
  234. static int eth_link_query_port(struct ib_device *ibdev, u8 port,
  235. struct ib_port_attr *props)
  236. {
  237. struct mlx4_ib_dev *mdev = to_mdev(ibdev);
  238. struct mlx4_ib_iboe *iboe = &mdev->iboe;
  239. struct net_device *ndev;
  240. enum ib_mtu tmp;
  241. struct mlx4_cmd_mailbox *mailbox;
  242. int err = 0;
  243. mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
  244. if (IS_ERR(mailbox))
  245. return PTR_ERR(mailbox);
  246. err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
  247. MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
  248. MLX4_CMD_WRAPPED);
  249. if (err)
  250. goto out;
  251. props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
  252. IB_WIDTH_4X : IB_WIDTH_1X;
  253. props->active_speed = IB_SPEED_QDR;
  254. props->port_cap_flags = IB_PORT_CM_SUP;
  255. props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
  256. props->max_msg_sz = mdev->dev->caps.max_msg_sz;
  257. props->pkey_tbl_len = 1;
  258. props->max_mtu = IB_MTU_4096;
  259. props->max_vl_num = 2;
  260. props->state = IB_PORT_DOWN;
  261. props->phys_state = state_to_phys_state(props->state);
  262. props->active_mtu = IB_MTU_256;
  263. spin_lock(&iboe->lock);
  264. ndev = iboe->netdevs[port - 1];
  265. if (!ndev)
  266. goto out_unlock;
  267. tmp = iboe_get_mtu(ndev->mtu);
  268. props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
  269. props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
  270. IB_PORT_ACTIVE : IB_PORT_DOWN;
  271. props->phys_state = state_to_phys_state(props->state);
  272. out_unlock:
  273. spin_unlock(&iboe->lock);
  274. out:
  275. mlx4_free_cmd_mailbox(mdev->dev, mailbox);
  276. return err;
  277. }
  278. static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
  279. struct ib_port_attr *props)
  280. {
  281. int err;
  282. memset(props, 0, sizeof *props);
  283. err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
  284. ib_link_query_port(ibdev, port, props) :
  285. eth_link_query_port(ibdev, port, props);
  286. return err;
  287. }
  288. static int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
  289. union ib_gid *gid)
  290. {
  291. struct ib_smp *in_mad = NULL;
  292. struct ib_smp *out_mad = NULL;
  293. int err = -ENOMEM;
  294. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  295. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  296. if (!in_mad || !out_mad)
  297. goto out;
  298. init_query_mad(in_mad);
  299. in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
  300. in_mad->attr_mod = cpu_to_be32(port);
  301. err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
  302. if (err)
  303. goto out;
  304. memcpy(gid->raw, out_mad->data + 8, 8);
  305. init_query_mad(in_mad);
  306. in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
  307. in_mad->attr_mod = cpu_to_be32(index / 8);
  308. err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
  309. if (err)
  310. goto out;
  311. memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
  312. out:
  313. kfree(in_mad);
  314. kfree(out_mad);
  315. return err;
  316. }
  317. static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
  318. union ib_gid *gid)
  319. {
  320. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  321. *gid = dev->iboe.gid_table[port - 1][index];
  322. return 0;
  323. }
  324. static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
  325. union ib_gid *gid)
  326. {
  327. if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
  328. return __mlx4_ib_query_gid(ibdev, port, index, gid);
  329. else
  330. return iboe_query_gid(ibdev, port, index, gid);
  331. }
  332. static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
  333. u16 *pkey)
  334. {
  335. struct ib_smp *in_mad = NULL;
  336. struct ib_smp *out_mad = NULL;
  337. int err = -ENOMEM;
  338. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  339. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  340. if (!in_mad || !out_mad)
  341. goto out;
  342. init_query_mad(in_mad);
  343. in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
  344. in_mad->attr_mod = cpu_to_be32(index / 32);
  345. err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
  346. if (err)
  347. goto out;
  348. *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
  349. out:
  350. kfree(in_mad);
  351. kfree(out_mad);
  352. return err;
  353. }
  354. static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
  355. struct ib_device_modify *props)
  356. {
  357. struct mlx4_cmd_mailbox *mailbox;
  358. unsigned long flags;
  359. if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
  360. return -EOPNOTSUPP;
  361. if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
  362. return 0;
  363. spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
  364. memcpy(ibdev->node_desc, props->node_desc, 64);
  365. spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
  366. /*
  367. * If possible, pass node desc to FW, so it can generate
  368. * a 144 trap. If cmd fails, just ignore.
  369. */
  370. mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
  371. if (IS_ERR(mailbox))
  372. return 0;
  373. memset(mailbox->buf, 0, 256);
  374. memcpy(mailbox->buf, props->node_desc, 64);
  375. mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
  376. MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  377. mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
  378. return 0;
  379. }
  380. static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
  381. u32 cap_mask)
  382. {
  383. struct mlx4_cmd_mailbox *mailbox;
  384. int err;
  385. u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
  386. mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
  387. if (IS_ERR(mailbox))
  388. return PTR_ERR(mailbox);
  389. memset(mailbox->buf, 0, 256);
  390. if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
  391. *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
  392. ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
  393. } else {
  394. ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
  395. ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
  396. }
  397. err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
  398. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
  399. mlx4_free_cmd_mailbox(dev->dev, mailbox);
  400. return err;
  401. }
  402. static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
  403. struct ib_port_modify *props)
  404. {
  405. struct ib_port_attr attr;
  406. u32 cap_mask;
  407. int err;
  408. mutex_lock(&to_mdev(ibdev)->cap_mask_mutex);
  409. err = mlx4_ib_query_port(ibdev, port, &attr);
  410. if (err)
  411. goto out;
  412. cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
  413. ~props->clr_port_cap_mask;
  414. err = mlx4_SET_PORT(to_mdev(ibdev), port,
  415. !!(mask & IB_PORT_RESET_QKEY_CNTR),
  416. cap_mask);
  417. out:
  418. mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
  419. return err;
  420. }
  421. static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
  422. struct ib_udata *udata)
  423. {
  424. struct mlx4_ib_dev *dev = to_mdev(ibdev);
  425. struct mlx4_ib_ucontext *context;
  426. struct mlx4_ib_alloc_ucontext_resp resp;
  427. int err;
  428. if (!dev->ib_active)
  429. return ERR_PTR(-EAGAIN);
  430. resp.qp_tab_size = dev->dev->caps.num_qps;
  431. resp.bf_reg_size = dev->dev->caps.bf_reg_size;
  432. resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
  433. context = kmalloc(sizeof *context, GFP_KERNEL);
  434. if (!context)
  435. return ERR_PTR(-ENOMEM);
  436. err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
  437. if (err) {
  438. kfree(context);
  439. return ERR_PTR(err);
  440. }
  441. INIT_LIST_HEAD(&context->db_page_list);
  442. mutex_init(&context->db_page_mutex);
  443. err = ib_copy_to_udata(udata, &resp, sizeof resp);
  444. if (err) {
  445. mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
  446. kfree(context);
  447. return ERR_PTR(-EFAULT);
  448. }
  449. return &context->ibucontext;
  450. }
  451. static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
  452. {
  453. struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
  454. mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
  455. kfree(context);
  456. return 0;
  457. }
  458. static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
  459. {
  460. struct mlx4_ib_dev *dev = to_mdev(context->device);
  461. if (vma->vm_end - vma->vm_start != PAGE_SIZE)
  462. return -EINVAL;
  463. if (vma->vm_pgoff == 0) {
  464. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  465. if (io_remap_pfn_range(vma, vma->vm_start,
  466. to_mucontext(context)->uar.pfn,
  467. PAGE_SIZE, vma->vm_page_prot))
  468. return -EAGAIN;
  469. } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
  470. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  471. if (io_remap_pfn_range(vma, vma->vm_start,
  472. to_mucontext(context)->uar.pfn +
  473. dev->dev->caps.num_uars,
  474. PAGE_SIZE, vma->vm_page_prot))
  475. return -EAGAIN;
  476. } else
  477. return -EINVAL;
  478. return 0;
  479. }
  480. static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
  481. struct ib_ucontext *context,
  482. struct ib_udata *udata)
  483. {
  484. struct mlx4_ib_pd *pd;
  485. int err;
  486. pd = kmalloc(sizeof *pd, GFP_KERNEL);
  487. if (!pd)
  488. return ERR_PTR(-ENOMEM);
  489. err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
  490. if (err) {
  491. kfree(pd);
  492. return ERR_PTR(err);
  493. }
  494. if (context)
  495. if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
  496. mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
  497. kfree(pd);
  498. return ERR_PTR(-EFAULT);
  499. }
  500. return &pd->ibpd;
  501. }
  502. static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
  503. {
  504. mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
  505. kfree(pd);
  506. return 0;
  507. }
  508. static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
  509. struct ib_ucontext *context,
  510. struct ib_udata *udata)
  511. {
  512. struct mlx4_ib_xrcd *xrcd;
  513. int err;
  514. if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
  515. return ERR_PTR(-ENOSYS);
  516. xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
  517. if (!xrcd)
  518. return ERR_PTR(-ENOMEM);
  519. err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
  520. if (err)
  521. goto err1;
  522. xrcd->pd = ib_alloc_pd(ibdev);
  523. if (IS_ERR(xrcd->pd)) {
  524. err = PTR_ERR(xrcd->pd);
  525. goto err2;
  526. }
  527. xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0);
  528. if (IS_ERR(xrcd->cq)) {
  529. err = PTR_ERR(xrcd->cq);
  530. goto err3;
  531. }
  532. return &xrcd->ibxrcd;
  533. err3:
  534. ib_dealloc_pd(xrcd->pd);
  535. err2:
  536. mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
  537. err1:
  538. kfree(xrcd);
  539. return ERR_PTR(err);
  540. }
  541. static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
  542. {
  543. ib_destroy_cq(to_mxrcd(xrcd)->cq);
  544. ib_dealloc_pd(to_mxrcd(xrcd)->pd);
  545. mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
  546. kfree(xrcd);
  547. return 0;
  548. }
  549. static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
  550. {
  551. struct mlx4_ib_qp *mqp = to_mqp(ibqp);
  552. struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
  553. struct mlx4_ib_gid_entry *ge;
  554. ge = kzalloc(sizeof *ge, GFP_KERNEL);
  555. if (!ge)
  556. return -ENOMEM;
  557. ge->gid = *gid;
  558. if (mlx4_ib_add_mc(mdev, mqp, gid)) {
  559. ge->port = mqp->port;
  560. ge->added = 1;
  561. }
  562. mutex_lock(&mqp->mutex);
  563. list_add_tail(&ge->list, &mqp->gid_list);
  564. mutex_unlock(&mqp->mutex);
  565. return 0;
  566. }
  567. int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
  568. union ib_gid *gid)
  569. {
  570. u8 mac[6];
  571. struct net_device *ndev;
  572. int ret = 0;
  573. if (!mqp->port)
  574. return 0;
  575. spin_lock(&mdev->iboe.lock);
  576. ndev = mdev->iboe.netdevs[mqp->port - 1];
  577. if (ndev)
  578. dev_hold(ndev);
  579. spin_unlock(&mdev->iboe.lock);
  580. if (ndev) {
  581. rdma_get_mcast_mac((struct in6_addr *)gid, mac);
  582. rtnl_lock();
  583. dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac);
  584. ret = 1;
  585. rtnl_unlock();
  586. dev_put(ndev);
  587. }
  588. return ret;
  589. }
  590. struct mlx4_ib_steering {
  591. struct list_head list;
  592. u64 reg_id;
  593. union ib_gid gid;
  594. };
  595. static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  596. {
  597. int err;
  598. struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
  599. struct mlx4_ib_qp *mqp = to_mqp(ibqp);
  600. u64 reg_id;
  601. struct mlx4_ib_steering *ib_steering = NULL;
  602. if (mdev->dev->caps.steering_mode ==
  603. MLX4_STEERING_MODE_DEVICE_MANAGED) {
  604. ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
  605. if (!ib_steering)
  606. return -ENOMEM;
  607. }
  608. err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
  609. !!(mqp->flags &
  610. MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
  611. MLX4_PROT_IB_IPV6, &reg_id);
  612. if (err)
  613. goto err_malloc;
  614. err = add_gid_entry(ibqp, gid);
  615. if (err)
  616. goto err_add;
  617. if (ib_steering) {
  618. memcpy(ib_steering->gid.raw, gid->raw, 16);
  619. ib_steering->reg_id = reg_id;
  620. mutex_lock(&mqp->mutex);
  621. list_add(&ib_steering->list, &mqp->steering_rules);
  622. mutex_unlock(&mqp->mutex);
  623. }
  624. return 0;
  625. err_add:
  626. mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
  627. MLX4_PROT_IB_IPV6, reg_id);
  628. err_malloc:
  629. kfree(ib_steering);
  630. return err;
  631. }
  632. static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
  633. {
  634. struct mlx4_ib_gid_entry *ge;
  635. struct mlx4_ib_gid_entry *tmp;
  636. struct mlx4_ib_gid_entry *ret = NULL;
  637. list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
  638. if (!memcmp(raw, ge->gid.raw, 16)) {
  639. ret = ge;
  640. break;
  641. }
  642. }
  643. return ret;
  644. }
  645. static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  646. {
  647. int err;
  648. struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
  649. struct mlx4_ib_qp *mqp = to_mqp(ibqp);
  650. u8 mac[6];
  651. struct net_device *ndev;
  652. struct mlx4_ib_gid_entry *ge;
  653. u64 reg_id = 0;
  654. if (mdev->dev->caps.steering_mode ==
  655. MLX4_STEERING_MODE_DEVICE_MANAGED) {
  656. struct mlx4_ib_steering *ib_steering;
  657. mutex_lock(&mqp->mutex);
  658. list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
  659. if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
  660. list_del(&ib_steering->list);
  661. break;
  662. }
  663. }
  664. mutex_unlock(&mqp->mutex);
  665. if (&ib_steering->list == &mqp->steering_rules) {
  666. pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
  667. return -EINVAL;
  668. }
  669. reg_id = ib_steering->reg_id;
  670. kfree(ib_steering);
  671. }
  672. err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
  673. MLX4_PROT_IB_IPV6, reg_id);
  674. if (err)
  675. return err;
  676. mutex_lock(&mqp->mutex);
  677. ge = find_gid_entry(mqp, gid->raw);
  678. if (ge) {
  679. spin_lock(&mdev->iboe.lock);
  680. ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
  681. if (ndev)
  682. dev_hold(ndev);
  683. spin_unlock(&mdev->iboe.lock);
  684. rdma_get_mcast_mac((struct in6_addr *)gid, mac);
  685. if (ndev) {
  686. rtnl_lock();
  687. dev_mc_del(mdev->iboe.netdevs[ge->port - 1], mac);
  688. rtnl_unlock();
  689. dev_put(ndev);
  690. }
  691. list_del(&ge->list);
  692. kfree(ge);
  693. } else
  694. pr_warn("could not find mgid entry\n");
  695. mutex_unlock(&mqp->mutex);
  696. return 0;
  697. }
  698. static int init_node_data(struct mlx4_ib_dev *dev)
  699. {
  700. struct ib_smp *in_mad = NULL;
  701. struct ib_smp *out_mad = NULL;
  702. int err = -ENOMEM;
  703. in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
  704. out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
  705. if (!in_mad || !out_mad)
  706. goto out;
  707. init_query_mad(in_mad);
  708. in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
  709. err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
  710. if (err)
  711. goto out;
  712. memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
  713. in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
  714. err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
  715. if (err)
  716. goto out;
  717. memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
  718. out:
  719. kfree(in_mad);
  720. kfree(out_mad);
  721. return err;
  722. }
  723. static ssize_t show_hca(struct device *device, struct device_attribute *attr,
  724. char *buf)
  725. {
  726. struct mlx4_ib_dev *dev =
  727. container_of(device, struct mlx4_ib_dev, ib_dev.dev);
  728. return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
  729. }
  730. static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
  731. char *buf)
  732. {
  733. struct mlx4_ib_dev *dev =
  734. container_of(device, struct mlx4_ib_dev, ib_dev.dev);
  735. return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
  736. (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
  737. (int) dev->dev->caps.fw_ver & 0xffff);
  738. }
  739. static ssize_t show_rev(struct device *device, struct device_attribute *attr,
  740. char *buf)
  741. {
  742. struct mlx4_ib_dev *dev =
  743. container_of(device, struct mlx4_ib_dev, ib_dev.dev);
  744. return sprintf(buf, "%x\n", dev->dev->rev_id);
  745. }
  746. static ssize_t show_board(struct device *device, struct device_attribute *attr,
  747. char *buf)
  748. {
  749. struct mlx4_ib_dev *dev =
  750. container_of(device, struct mlx4_ib_dev, ib_dev.dev);
  751. return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
  752. dev->dev->board_id);
  753. }
  754. static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
  755. static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
  756. static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
  757. static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
  758. static struct device_attribute *mlx4_class_attributes[] = {
  759. &dev_attr_hw_rev,
  760. &dev_attr_fw_ver,
  761. &dev_attr_hca_type,
  762. &dev_attr_board_id
  763. };
  764. static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev)
  765. {
  766. memcpy(eui, dev->dev_addr, 3);
  767. memcpy(eui + 5, dev->dev_addr + 3, 3);
  768. if (vlan_id < 0x1000) {
  769. eui[3] = vlan_id >> 8;
  770. eui[4] = vlan_id & 0xff;
  771. } else {
  772. eui[3] = 0xff;
  773. eui[4] = 0xfe;
  774. }
  775. eui[0] ^= 2;
  776. }
  777. static void update_gids_task(struct work_struct *work)
  778. {
  779. struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
  780. struct mlx4_cmd_mailbox *mailbox;
  781. union ib_gid *gids;
  782. int err;
  783. struct mlx4_dev *dev = gw->dev->dev;
  784. mailbox = mlx4_alloc_cmd_mailbox(dev);
  785. if (IS_ERR(mailbox)) {
  786. pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
  787. return;
  788. }
  789. gids = mailbox->buf;
  790. memcpy(gids, gw->gids, sizeof gw->gids);
  791. err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
  792. 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  793. MLX4_CMD_NATIVE);
  794. if (err)
  795. pr_warn("set port command failed\n");
  796. else {
  797. memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
  798. mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
  799. }
  800. mlx4_free_cmd_mailbox(dev, mailbox);
  801. kfree(gw);
  802. }
  803. static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
  804. {
  805. struct net_device *ndev = dev->iboe.netdevs[port - 1];
  806. struct update_gid_work *work;
  807. struct net_device *tmp;
  808. int i;
  809. u8 *hits;
  810. int ret;
  811. union ib_gid gid;
  812. int free;
  813. int found;
  814. int need_update = 0;
  815. u16 vid;
  816. work = kzalloc(sizeof *work, GFP_ATOMIC);
  817. if (!work)
  818. return -ENOMEM;
  819. hits = kzalloc(128, GFP_ATOMIC);
  820. if (!hits) {
  821. ret = -ENOMEM;
  822. goto out;
  823. }
  824. rcu_read_lock();
  825. for_each_netdev_rcu(&init_net, tmp) {
  826. if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
  827. gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
  828. vid = rdma_vlan_dev_vlan_id(tmp);
  829. mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev);
  830. found = 0;
  831. free = -1;
  832. for (i = 0; i < 128; ++i) {
  833. if (free < 0 &&
  834. !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
  835. free = i;
  836. if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) {
  837. hits[i] = 1;
  838. found = 1;
  839. break;
  840. }
  841. }
  842. if (!found) {
  843. if (tmp == ndev &&
  844. (memcmp(&dev->iboe.gid_table[port - 1][0],
  845. &gid, sizeof gid) ||
  846. !memcmp(&dev->iboe.gid_table[port - 1][0],
  847. &zgid, sizeof gid))) {
  848. dev->iboe.gid_table[port - 1][0] = gid;
  849. ++need_update;
  850. hits[0] = 1;
  851. } else if (free >= 0) {
  852. dev->iboe.gid_table[port - 1][free] = gid;
  853. hits[free] = 1;
  854. ++need_update;
  855. }
  856. }
  857. }
  858. }
  859. rcu_read_unlock();
  860. for (i = 0; i < 128; ++i)
  861. if (!hits[i]) {
  862. if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
  863. ++need_update;
  864. dev->iboe.gid_table[port - 1][i] = zgid;
  865. }
  866. if (need_update) {
  867. memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids);
  868. INIT_WORK(&work->work, update_gids_task);
  869. work->port = port;
  870. work->dev = dev;
  871. queue_work(wq, &work->work);
  872. } else
  873. kfree(work);
  874. kfree(hits);
  875. return 0;
  876. out:
  877. kfree(work);
  878. return ret;
  879. }
  880. static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event)
  881. {
  882. switch (event) {
  883. case NETDEV_UP:
  884. case NETDEV_CHANGEADDR:
  885. update_ipv6_gids(dev, port, 0);
  886. break;
  887. case NETDEV_DOWN:
  888. update_ipv6_gids(dev, port, 1);
  889. dev->iboe.netdevs[port - 1] = NULL;
  890. }
  891. }
  892. static void netdev_added(struct mlx4_ib_dev *dev, int port)
  893. {
  894. update_ipv6_gids(dev, port, 0);
  895. }
  896. static void netdev_removed(struct mlx4_ib_dev *dev, int port)
  897. {
  898. update_ipv6_gids(dev, port, 1);
  899. }
  900. static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event,
  901. void *ptr)
  902. {
  903. struct net_device *dev = ptr;
  904. struct mlx4_ib_dev *ibdev;
  905. struct net_device *oldnd;
  906. struct mlx4_ib_iboe *iboe;
  907. int port;
  908. if (!net_eq(dev_net(dev), &init_net))
  909. return NOTIFY_DONE;
  910. ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
  911. iboe = &ibdev->iboe;
  912. spin_lock(&iboe->lock);
  913. mlx4_foreach_ib_transport_port(port, ibdev->dev) {
  914. oldnd = iboe->netdevs[port - 1];
  915. iboe->netdevs[port - 1] =
  916. mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
  917. if (oldnd != iboe->netdevs[port - 1]) {
  918. if (iboe->netdevs[port - 1])
  919. netdev_added(ibdev, port);
  920. else
  921. netdev_removed(ibdev, port);
  922. }
  923. }
  924. if (dev == iboe->netdevs[0] ||
  925. (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0]))
  926. handle_en_event(ibdev, 1, event);
  927. else if (dev == iboe->netdevs[1]
  928. || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1]))
  929. handle_en_event(ibdev, 2, event);
  930. spin_unlock(&iboe->lock);
  931. return NOTIFY_DONE;
  932. }
  933. static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
  934. {
  935. char name[32];
  936. int eq_per_port = 0;
  937. int added_eqs = 0;
  938. int total_eqs = 0;
  939. int i, j, eq;
  940. /* Legacy mode or comp_pool is not large enough */
  941. if (dev->caps.comp_pool == 0 ||
  942. dev->caps.num_ports > dev->caps.comp_pool)
  943. return;
  944. eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
  945. dev->caps.num_ports);
  946. /* Init eq table */
  947. added_eqs = 0;
  948. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
  949. added_eqs += eq_per_port;
  950. total_eqs = dev->caps.num_comp_vectors + added_eqs;
  951. ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
  952. if (!ibdev->eq_table)
  953. return;
  954. ibdev->eq_added = added_eqs;
  955. eq = 0;
  956. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
  957. for (j = 0; j < eq_per_port; j++) {
  958. sprintf(name, "mlx4-ib-%d-%d@%s",
  959. i, j, dev->pdev->bus->name);
  960. /* Set IRQ for specific name (per ring) */
  961. if (mlx4_assign_eq(dev, name, NULL,
  962. &ibdev->eq_table[eq])) {
  963. /* Use legacy (same as mlx4_en driver) */
  964. pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
  965. ibdev->eq_table[eq] =
  966. (eq % dev->caps.num_comp_vectors);
  967. }
  968. eq++;
  969. }
  970. }
  971. /* Fill the reset of the vector with legacy EQ */
  972. for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
  973. ibdev->eq_table[eq++] = i;
  974. /* Advertise the new number of EQs to clients */
  975. ibdev->ib_dev.num_comp_vectors = total_eqs;
  976. }
  977. static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
  978. {
  979. int i;
  980. /* no additional eqs were added */
  981. if (!ibdev->eq_table)
  982. return;
  983. /* Reset the advertised EQ number */
  984. ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
  985. /* Free only the added eqs */
  986. for (i = 0; i < ibdev->eq_added; i++) {
  987. /* Don't free legacy eqs if used */
  988. if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
  989. continue;
  990. mlx4_release_eq(dev, ibdev->eq_table[i]);
  991. }
  992. kfree(ibdev->eq_table);
  993. }
  994. static void *mlx4_ib_add(struct mlx4_dev *dev)
  995. {
  996. struct mlx4_ib_dev *ibdev;
  997. int num_ports = 0;
  998. int i, j;
  999. int err;
  1000. struct mlx4_ib_iboe *iboe;
  1001. pr_info_once("%s", mlx4_ib_version);
  1002. if (mlx4_is_mfunc(dev)) {
  1003. pr_warn("IB not yet supported in SRIOV\n");
  1004. return NULL;
  1005. }
  1006. mlx4_foreach_ib_transport_port(i, dev)
  1007. num_ports++;
  1008. /* No point in registering a device with no ports... */
  1009. if (num_ports == 0)
  1010. return NULL;
  1011. ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
  1012. if (!ibdev) {
  1013. dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
  1014. return NULL;
  1015. }
  1016. iboe = &ibdev->iboe;
  1017. if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
  1018. goto err_dealloc;
  1019. if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
  1020. goto err_pd;
  1021. ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
  1022. PAGE_SIZE);
  1023. if (!ibdev->uar_map)
  1024. goto err_uar;
  1025. MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
  1026. ibdev->dev = dev;
  1027. strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
  1028. ibdev->ib_dev.owner = THIS_MODULE;
  1029. ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
  1030. ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
  1031. ibdev->num_ports = num_ports;
  1032. ibdev->ib_dev.phys_port_cnt = ibdev->num_ports;
  1033. ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
  1034. ibdev->ib_dev.dma_device = &dev->pdev->dev;
  1035. ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
  1036. ibdev->ib_dev.uverbs_cmd_mask =
  1037. (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
  1038. (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
  1039. (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
  1040. (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
  1041. (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
  1042. (1ull << IB_USER_VERBS_CMD_REG_MR) |
  1043. (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
  1044. (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
  1045. (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
  1046. (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
  1047. (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
  1048. (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
  1049. (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
  1050. (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
  1051. (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
  1052. (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
  1053. (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
  1054. (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
  1055. (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
  1056. (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
  1057. (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
  1058. (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
  1059. (1ull << IB_USER_VERBS_CMD_OPEN_QP);
  1060. ibdev->ib_dev.query_device = mlx4_ib_query_device;
  1061. ibdev->ib_dev.query_port = mlx4_ib_query_port;
  1062. ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
  1063. ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
  1064. ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
  1065. ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
  1066. ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
  1067. ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
  1068. ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
  1069. ibdev->ib_dev.mmap = mlx4_ib_mmap;
  1070. ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
  1071. ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
  1072. ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
  1073. ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
  1074. ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
  1075. ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
  1076. ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
  1077. ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
  1078. ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
  1079. ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
  1080. ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
  1081. ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
  1082. ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
  1083. ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
  1084. ibdev->ib_dev.post_send = mlx4_ib_post_send;
  1085. ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
  1086. ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
  1087. ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
  1088. ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
  1089. ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
  1090. ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
  1091. ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
  1092. ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
  1093. ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
  1094. ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
  1095. ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
  1096. ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
  1097. ibdev->ib_dev.free_fast_reg_page_list = mlx4_ib_free_fast_reg_page_list;
  1098. ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
  1099. ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
  1100. ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
  1101. ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
  1102. ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
  1103. ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
  1104. ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
  1105. if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
  1106. ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
  1107. ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
  1108. ibdev->ib_dev.uverbs_cmd_mask |=
  1109. (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
  1110. (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
  1111. }
  1112. mlx4_ib_alloc_eqs(dev, ibdev);
  1113. spin_lock_init(&iboe->lock);
  1114. if (init_node_data(ibdev))
  1115. goto err_map;
  1116. for (i = 0; i < ibdev->num_ports; ++i) {
  1117. if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
  1118. IB_LINK_LAYER_ETHERNET) {
  1119. err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
  1120. if (err)
  1121. ibdev->counters[i] = -1;
  1122. } else
  1123. ibdev->counters[i] = -1;
  1124. }
  1125. spin_lock_init(&ibdev->sm_lock);
  1126. mutex_init(&ibdev->cap_mask_mutex);
  1127. if (ib_register_device(&ibdev->ib_dev, NULL))
  1128. goto err_counter;
  1129. if (mlx4_ib_mad_init(ibdev))
  1130. goto err_reg;
  1131. if (mlx4_ib_init_sriov(ibdev))
  1132. goto err_mad;
  1133. if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) {
  1134. iboe->nb.notifier_call = mlx4_ib_netdev_event;
  1135. err = register_netdevice_notifier(&iboe->nb);
  1136. if (err)
  1137. goto err_sriov;
  1138. }
  1139. for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
  1140. if (device_create_file(&ibdev->ib_dev.dev,
  1141. mlx4_class_attributes[j]))
  1142. goto err_notif;
  1143. }
  1144. ibdev->ib_active = true;
  1145. return ibdev;
  1146. err_notif:
  1147. if (unregister_netdevice_notifier(&ibdev->iboe.nb))
  1148. pr_warn("failure unregistering notifier\n");
  1149. flush_workqueue(wq);
  1150. err_sriov:
  1151. mlx4_ib_close_sriov(ibdev);
  1152. err_mad:
  1153. mlx4_ib_mad_cleanup(ibdev);
  1154. err_reg:
  1155. ib_unregister_device(&ibdev->ib_dev);
  1156. err_counter:
  1157. for (; i; --i)
  1158. if (ibdev->counters[i - 1] != -1)
  1159. mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
  1160. err_map:
  1161. iounmap(ibdev->uar_map);
  1162. err_uar:
  1163. mlx4_uar_free(dev, &ibdev->priv_uar);
  1164. err_pd:
  1165. mlx4_pd_free(dev, ibdev->priv_pdn);
  1166. err_dealloc:
  1167. ib_dealloc_device(&ibdev->ib_dev);
  1168. return NULL;
  1169. }
  1170. static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
  1171. {
  1172. struct mlx4_ib_dev *ibdev = ibdev_ptr;
  1173. int p;
  1174. mlx4_ib_close_sriov(ibdev);
  1175. mlx4_ib_mad_cleanup(ibdev);
  1176. ib_unregister_device(&ibdev->ib_dev);
  1177. if (ibdev->iboe.nb.notifier_call) {
  1178. if (unregister_netdevice_notifier(&ibdev->iboe.nb))
  1179. pr_warn("failure unregistering notifier\n");
  1180. ibdev->iboe.nb.notifier_call = NULL;
  1181. }
  1182. iounmap(ibdev->uar_map);
  1183. for (p = 0; p < ibdev->num_ports; ++p)
  1184. if (ibdev->counters[p] != -1)
  1185. mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
  1186. mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
  1187. mlx4_CLOSE_PORT(dev, p);
  1188. mlx4_ib_free_eqs(dev, ibdev);
  1189. mlx4_uar_free(dev, &ibdev->priv_uar);
  1190. mlx4_pd_free(dev, ibdev->priv_pdn);
  1191. ib_dealloc_device(&ibdev->ib_dev);
  1192. }
  1193. static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
  1194. {
  1195. struct mlx4_ib_demux_work **dm = NULL;
  1196. struct mlx4_dev *dev = ibdev->dev;
  1197. int i;
  1198. unsigned long flags;
  1199. if (!mlx4_is_master(dev))
  1200. return;
  1201. dm = kcalloc(dev->caps.num_ports, sizeof *dm, GFP_ATOMIC);
  1202. if (!dm) {
  1203. pr_err("failed to allocate memory for tunneling qp update\n");
  1204. goto out;
  1205. }
  1206. for (i = 0; i < dev->caps.num_ports; i++) {
  1207. dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
  1208. if (!dm[i]) {
  1209. pr_err("failed to allocate memory for tunneling qp update work struct\n");
  1210. for (i = 0; i < dev->caps.num_ports; i++) {
  1211. if (dm[i])
  1212. kfree(dm[i]);
  1213. }
  1214. goto out;
  1215. }
  1216. }
  1217. /* initialize or tear down tunnel QPs for the slave */
  1218. for (i = 0; i < dev->caps.num_ports; i++) {
  1219. INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
  1220. dm[i]->port = i + 1;
  1221. dm[i]->slave = slave;
  1222. dm[i]->do_init = do_init;
  1223. dm[i]->dev = ibdev;
  1224. spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
  1225. if (!ibdev->sriov.is_going_down)
  1226. queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
  1227. spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
  1228. }
  1229. out:
  1230. if (dm)
  1231. kfree(dm);
  1232. return;
  1233. }
  1234. static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
  1235. enum mlx4_dev_event event, unsigned long param)
  1236. {
  1237. struct ib_event ibev;
  1238. struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
  1239. struct mlx4_eqe *eqe = NULL;
  1240. struct ib_event_work *ew;
  1241. int p = 0;
  1242. if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
  1243. eqe = (struct mlx4_eqe *)param;
  1244. else
  1245. p = (int) param;
  1246. switch (event) {
  1247. case MLX4_DEV_EVENT_PORT_UP:
  1248. if (p > ibdev->num_ports)
  1249. return;
  1250. ibev.event = IB_EVENT_PORT_ACTIVE;
  1251. break;
  1252. case MLX4_DEV_EVENT_PORT_DOWN:
  1253. if (p > ibdev->num_ports)
  1254. return;
  1255. ibev.event = IB_EVENT_PORT_ERR;
  1256. break;
  1257. case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
  1258. ibdev->ib_active = false;
  1259. ibev.event = IB_EVENT_DEVICE_FATAL;
  1260. break;
  1261. case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
  1262. ew = kmalloc(sizeof *ew, GFP_ATOMIC);
  1263. if (!ew) {
  1264. pr_err("failed to allocate memory for events work\n");
  1265. break;
  1266. }
  1267. INIT_WORK(&ew->work, handle_port_mgmt_change_event);
  1268. memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
  1269. ew->ib_dev = ibdev;
  1270. handle_port_mgmt_change_event(&ew->work);
  1271. return;
  1272. case MLX4_DEV_EVENT_SLAVE_INIT:
  1273. /* here, p is the slave id */
  1274. do_slave_init(ibdev, p, 1);
  1275. return;
  1276. case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
  1277. /* here, p is the slave id */
  1278. do_slave_init(ibdev, p, 0);
  1279. return;
  1280. default:
  1281. return;
  1282. }
  1283. ibev.device = ibdev_ptr;
  1284. ibev.element.port_num = (u8) p;
  1285. ib_dispatch_event(&ibev);
  1286. }
  1287. static struct mlx4_interface mlx4_ib_interface = {
  1288. .add = mlx4_ib_add,
  1289. .remove = mlx4_ib_remove,
  1290. .event = mlx4_ib_event,
  1291. .protocol = MLX4_PROT_IB_IPV6
  1292. };
  1293. static int __init mlx4_ib_init(void)
  1294. {
  1295. int err;
  1296. wq = create_singlethread_workqueue("mlx4_ib");
  1297. if (!wq)
  1298. return -ENOMEM;
  1299. err = mlx4_register_interface(&mlx4_ib_interface);
  1300. if (err) {
  1301. destroy_workqueue(wq);
  1302. return err;
  1303. }
  1304. return 0;
  1305. }
  1306. static void __exit mlx4_ib_cleanup(void)
  1307. {
  1308. mlx4_unregister_interface(&mlx4_ib_interface);
  1309. destroy_workqueue(wq);
  1310. }
  1311. module_init(mlx4_ib_init);
  1312. module_exit(mlx4_ib_cleanup);