main.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343
  1. /*
  2. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  4. * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
  5. * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. */
  35. #include <linux/module.h>
  36. #include <linux/init.h>
  37. #include <linux/errno.h>
  38. #include <linux/pci.h>
  39. #include <linux/dma-mapping.h>
  40. #include <linux/mlx4/device.h>
  41. #include <linux/mlx4/doorbell.h>
  42. #include "mlx4.h"
  43. #include "fw.h"
  44. #include "icm.h"
  45. MODULE_AUTHOR("Roland Dreier");
  46. MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
  47. MODULE_LICENSE("Dual BSD/GPL");
  48. MODULE_VERSION(DRV_VERSION);
  49. struct workqueue_struct *mlx4_wq;
  50. #ifdef CONFIG_MLX4_DEBUG
  51. int mlx4_debug_level = 0;
  52. module_param_named(debug_level, mlx4_debug_level, int, 0644);
  53. MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
  54. #endif /* CONFIG_MLX4_DEBUG */
  55. #ifdef CONFIG_PCI_MSI
  56. static int msi_x = 1;
  57. module_param(msi_x, int, 0444);
  58. MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
  59. #else /* CONFIG_PCI_MSI */
  60. #define msi_x (0)
  61. #endif /* CONFIG_PCI_MSI */
  62. static char mlx4_version[] __devinitdata =
  63. DRV_NAME ": Mellanox ConnectX core driver v"
  64. DRV_VERSION " (" DRV_RELDATE ")\n";
  65. static struct mlx4_profile default_profile = {
  66. .num_qp = 1 << 17,
  67. .num_srq = 1 << 16,
  68. .rdmarc_per_qp = 1 << 4,
  69. .num_cq = 1 << 16,
  70. .num_mcg = 1 << 13,
  71. .num_mpt = 1 << 17,
  72. .num_mtt = 1 << 20,
  73. };
  74. static int log_num_mac = 2;
  75. module_param_named(log_num_mac, log_num_mac, int, 0444);
  76. MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
  77. static int log_num_vlan;
  78. module_param_named(log_num_vlan, log_num_vlan, int, 0444);
  79. MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
  80. static int use_prio;
  81. module_param_named(use_prio, use_prio, bool, 0444);
  82. MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
  83. "(0/1, default 0)");
  84. static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
  85. module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
  86. MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
  87. int mlx4_check_port_params(struct mlx4_dev *dev,
  88. enum mlx4_port_type *port_type)
  89. {
  90. int i;
  91. for (i = 0; i < dev->caps.num_ports - 1; i++) {
  92. if (port_type[i] != port_type[i + 1]) {
  93. if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
  94. mlx4_err(dev, "Only same port types supported "
  95. "on this HCA, aborting.\n");
  96. return -EINVAL;
  97. }
  98. if (port_type[i] == MLX4_PORT_TYPE_ETH &&
  99. port_type[i + 1] == MLX4_PORT_TYPE_IB)
  100. return -EINVAL;
  101. }
  102. }
  103. for (i = 0; i < dev->caps.num_ports; i++) {
  104. if (!(port_type[i] & dev->caps.supported_type[i+1])) {
  105. mlx4_err(dev, "Requested port type for port %d is not "
  106. "supported on this HCA\n", i + 1);
  107. return -EINVAL;
  108. }
  109. }
  110. return 0;
  111. }
  112. static void mlx4_set_port_mask(struct mlx4_dev *dev)
  113. {
  114. int i;
  115. dev->caps.port_mask = 0;
  116. for (i = 1; i <= dev->caps.num_ports; ++i)
  117. if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
  118. dev->caps.port_mask |= 1 << (i - 1);
  119. }
  120. static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
  121. {
  122. int err;
  123. int i;
  124. err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
  125. if (err) {
  126. mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
  127. return err;
  128. }
  129. if (dev_cap->min_page_sz > PAGE_SIZE) {
  130. mlx4_err(dev, "HCA minimum page size of %d bigger than "
  131. "kernel PAGE_SIZE of %ld, aborting.\n",
  132. dev_cap->min_page_sz, PAGE_SIZE);
  133. return -ENODEV;
  134. }
  135. if (dev_cap->num_ports > MLX4_MAX_PORTS) {
  136. mlx4_err(dev, "HCA has %d ports, but we only support %d, "
  137. "aborting.\n",
  138. dev_cap->num_ports, MLX4_MAX_PORTS);
  139. return -ENODEV;
  140. }
  141. if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
  142. mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
  143. "PCI resource 2 size of 0x%llx, aborting.\n",
  144. dev_cap->uar_size,
  145. (unsigned long long) pci_resource_len(dev->pdev, 2));
  146. return -ENODEV;
  147. }
  148. dev->caps.num_ports = dev_cap->num_ports;
  149. for (i = 1; i <= dev->caps.num_ports; ++i) {
  150. dev->caps.vl_cap[i] = dev_cap->max_vl[i];
  151. dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
  152. dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
  153. dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
  154. dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
  155. dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
  156. dev->caps.def_mac[i] = dev_cap->def_mac[i];
  157. dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
  158. }
  159. dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
  160. dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
  161. dev->caps.bf_reg_size = dev_cap->bf_reg_size;
  162. dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
  163. dev->caps.max_sq_sg = dev_cap->max_sq_sg;
  164. dev->caps.max_rq_sg = dev_cap->max_rq_sg;
  165. dev->caps.max_wqes = dev_cap->max_qp_sz;
  166. dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
  167. dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
  168. dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
  169. dev->caps.reserved_srqs = dev_cap->reserved_srqs;
  170. dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
  171. dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
  172. dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM;
  173. /*
  174. * Subtract 1 from the limit because we need to allocate a
  175. * spare CQE so the HCA HW can tell the difference between an
  176. * empty CQ and a full CQ.
  177. */
  178. dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
  179. dev->caps.reserved_cqs = dev_cap->reserved_cqs;
  180. dev->caps.reserved_eqs = dev_cap->reserved_eqs;
  181. dev->caps.mtts_per_seg = 1 << log_mtts_per_seg;
  182. dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
  183. dev->caps.mtts_per_seg);
  184. dev->caps.reserved_mrws = dev_cap->reserved_mrws;
  185. dev->caps.reserved_uars = dev_cap->reserved_uars;
  186. dev->caps.reserved_pds = dev_cap->reserved_pds;
  187. dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
  188. dev->caps.max_msg_sz = dev_cap->max_msg_sz;
  189. dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
  190. dev->caps.flags = dev_cap->flags;
  191. dev->caps.bmme_flags = dev_cap->bmme_flags;
  192. dev->caps.reserved_lkey = dev_cap->reserved_lkey;
  193. dev->caps.stat_rate_support = dev_cap->stat_rate_support;
  194. dev->caps.max_gso_sz = dev_cap->max_gso_sz;
  195. dev->caps.log_num_macs = log_num_mac;
  196. dev->caps.log_num_vlans = log_num_vlan;
  197. dev->caps.log_num_prios = use_prio ? 3 : 0;
  198. for (i = 1; i <= dev->caps.num_ports; ++i) {
  199. if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
  200. dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
  201. else
  202. dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
  203. dev->caps.possible_type[i] = dev->caps.port_type[i];
  204. mlx4_priv(dev)->sense.sense_allowed[i] =
  205. dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
  206. if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
  207. dev->caps.log_num_macs = dev_cap->log_max_macs[i];
  208. mlx4_warn(dev, "Requested number of MACs is too much "
  209. "for port %d, reducing to %d.\n",
  210. i, 1 << dev->caps.log_num_macs);
  211. }
  212. if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
  213. dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
  214. mlx4_warn(dev, "Requested number of VLANs is too much "
  215. "for port %d, reducing to %d.\n",
  216. i, 1 << dev->caps.log_num_vlans);
  217. }
  218. }
  219. mlx4_set_port_mask(dev);
  220. dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
  221. dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
  222. dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
  223. (1 << dev->caps.log_num_macs) *
  224. (1 << dev->caps.log_num_vlans) *
  225. (1 << dev->caps.log_num_prios) *
  226. dev->caps.num_ports;
  227. dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
  228. dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
  229. dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
  230. dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
  231. dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
  232. return 0;
  233. }
  234. /*
  235. * Change the port configuration of the device.
  236. * Every user of this function must hold the port mutex.
  237. */
  238. int mlx4_change_port_types(struct mlx4_dev *dev,
  239. enum mlx4_port_type *port_types)
  240. {
  241. int err = 0;
  242. int change = 0;
  243. int port;
  244. for (port = 0; port < dev->caps.num_ports; port++) {
  245. /* Change the port type only if the new type is different
  246. * from the current, and not set to Auto */
  247. if (port_types[port] != dev->caps.port_type[port + 1]) {
  248. change = 1;
  249. dev->caps.port_type[port + 1] = port_types[port];
  250. }
  251. }
  252. if (change) {
  253. mlx4_unregister_device(dev);
  254. for (port = 1; port <= dev->caps.num_ports; port++) {
  255. mlx4_CLOSE_PORT(dev, port);
  256. err = mlx4_SET_PORT(dev, port);
  257. if (err) {
  258. mlx4_err(dev, "Failed to set port %d, "
  259. "aborting\n", port);
  260. goto out;
  261. }
  262. }
  263. mlx4_set_port_mask(dev);
  264. err = mlx4_register_device(dev);
  265. }
  266. out:
  267. return err;
  268. }
  269. static ssize_t show_port_type(struct device *dev,
  270. struct device_attribute *attr,
  271. char *buf)
  272. {
  273. struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
  274. port_attr);
  275. struct mlx4_dev *mdev = info->dev;
  276. char type[8];
  277. sprintf(type, "%s",
  278. (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
  279. "ib" : "eth");
  280. if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
  281. sprintf(buf, "auto (%s)\n", type);
  282. else
  283. sprintf(buf, "%s\n", type);
  284. return strlen(buf);
  285. }
  286. static ssize_t set_port_type(struct device *dev,
  287. struct device_attribute *attr,
  288. const char *buf, size_t count)
  289. {
  290. struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
  291. port_attr);
  292. struct mlx4_dev *mdev = info->dev;
  293. struct mlx4_priv *priv = mlx4_priv(mdev);
  294. enum mlx4_port_type types[MLX4_MAX_PORTS];
  295. enum mlx4_port_type new_types[MLX4_MAX_PORTS];
  296. int i;
  297. int err = 0;
  298. if (!strcmp(buf, "ib\n"))
  299. info->tmp_type = MLX4_PORT_TYPE_IB;
  300. else if (!strcmp(buf, "eth\n"))
  301. info->tmp_type = MLX4_PORT_TYPE_ETH;
  302. else if (!strcmp(buf, "auto\n"))
  303. info->tmp_type = MLX4_PORT_TYPE_AUTO;
  304. else {
  305. mlx4_err(mdev, "%s is not supported port type\n", buf);
  306. return -EINVAL;
  307. }
  308. mlx4_stop_sense(mdev);
  309. mutex_lock(&priv->port_mutex);
  310. /* Possible type is always the one that was delivered */
  311. mdev->caps.possible_type[info->port] = info->tmp_type;
  312. for (i = 0; i < mdev->caps.num_ports; i++) {
  313. types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
  314. mdev->caps.possible_type[i+1];
  315. if (types[i] == MLX4_PORT_TYPE_AUTO)
  316. types[i] = mdev->caps.port_type[i+1];
  317. }
  318. if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
  319. for (i = 1; i <= mdev->caps.num_ports; i++) {
  320. if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
  321. mdev->caps.possible_type[i] = mdev->caps.port_type[i];
  322. err = -EINVAL;
  323. }
  324. }
  325. }
  326. if (err) {
  327. mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
  328. "Set only 'eth' or 'ib' for both ports "
  329. "(should be the same)\n");
  330. goto out;
  331. }
  332. mlx4_do_sense_ports(mdev, new_types, types);
  333. err = mlx4_check_port_params(mdev, new_types);
  334. if (err)
  335. goto out;
  336. /* We are about to apply the changes after the configuration
  337. * was verified, no need to remember the temporary types
  338. * any more */
  339. for (i = 0; i < mdev->caps.num_ports; i++)
  340. priv->port[i + 1].tmp_type = 0;
  341. err = mlx4_change_port_types(mdev, new_types);
  342. out:
  343. mlx4_start_sense(mdev);
  344. mutex_unlock(&priv->port_mutex);
  345. return err ? err : count;
  346. }
  347. static int mlx4_load_fw(struct mlx4_dev *dev)
  348. {
  349. struct mlx4_priv *priv = mlx4_priv(dev);
  350. int err;
  351. priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
  352. GFP_HIGHUSER | __GFP_NOWARN, 0);
  353. if (!priv->fw.fw_icm) {
  354. mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
  355. return -ENOMEM;
  356. }
  357. err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
  358. if (err) {
  359. mlx4_err(dev, "MAP_FA command failed, aborting.\n");
  360. goto err_free;
  361. }
  362. err = mlx4_RUN_FW(dev);
  363. if (err) {
  364. mlx4_err(dev, "RUN_FW command failed, aborting.\n");
  365. goto err_unmap_fa;
  366. }
  367. return 0;
  368. err_unmap_fa:
  369. mlx4_UNMAP_FA(dev);
  370. err_free:
  371. mlx4_free_icm(dev, priv->fw.fw_icm, 0);
  372. return err;
  373. }
  374. static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
  375. int cmpt_entry_sz)
  376. {
  377. struct mlx4_priv *priv = mlx4_priv(dev);
  378. int err;
  379. err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
  380. cmpt_base +
  381. ((u64) (MLX4_CMPT_TYPE_QP *
  382. cmpt_entry_sz) << MLX4_CMPT_SHIFT),
  383. cmpt_entry_sz, dev->caps.num_qps,
  384. dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
  385. 0, 0);
  386. if (err)
  387. goto err;
  388. err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
  389. cmpt_base +
  390. ((u64) (MLX4_CMPT_TYPE_SRQ *
  391. cmpt_entry_sz) << MLX4_CMPT_SHIFT),
  392. cmpt_entry_sz, dev->caps.num_srqs,
  393. dev->caps.reserved_srqs, 0, 0);
  394. if (err)
  395. goto err_qp;
  396. err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
  397. cmpt_base +
  398. ((u64) (MLX4_CMPT_TYPE_CQ *
  399. cmpt_entry_sz) << MLX4_CMPT_SHIFT),
  400. cmpt_entry_sz, dev->caps.num_cqs,
  401. dev->caps.reserved_cqs, 0, 0);
  402. if (err)
  403. goto err_srq;
  404. err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
  405. cmpt_base +
  406. ((u64) (MLX4_CMPT_TYPE_EQ *
  407. cmpt_entry_sz) << MLX4_CMPT_SHIFT),
  408. cmpt_entry_sz,
  409. dev->caps.num_eqs, dev->caps.num_eqs, 0, 0);
  410. if (err)
  411. goto err_cq;
  412. return 0;
  413. err_cq:
  414. mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
  415. err_srq:
  416. mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
  417. err_qp:
  418. mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
  419. err:
  420. return err;
  421. }
  422. static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
  423. struct mlx4_init_hca_param *init_hca, u64 icm_size)
  424. {
  425. struct mlx4_priv *priv = mlx4_priv(dev);
  426. u64 aux_pages;
  427. int err;
  428. err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
  429. if (err) {
  430. mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
  431. return err;
  432. }
  433. mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
  434. (unsigned long long) icm_size >> 10,
  435. (unsigned long long) aux_pages << 2);
  436. priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
  437. GFP_HIGHUSER | __GFP_NOWARN, 0);
  438. if (!priv->fw.aux_icm) {
  439. mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
  440. return -ENOMEM;
  441. }
  442. err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
  443. if (err) {
  444. mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
  445. goto err_free_aux;
  446. }
  447. err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
  448. if (err) {
  449. mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
  450. goto err_unmap_aux;
  451. }
  452. err = mlx4_init_icm_table(dev, &priv->eq_table.table,
  453. init_hca->eqc_base, dev_cap->eqc_entry_sz,
  454. dev->caps.num_eqs, dev->caps.num_eqs,
  455. 0, 0);
  456. if (err) {
  457. mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
  458. goto err_unmap_cmpt;
  459. }
  460. /*
  461. * Reserved MTT entries must be aligned up to a cacheline
  462. * boundary, since the FW will write to them, while the driver
  463. * writes to all other MTT entries. (The variable
  464. * dev->caps.mtt_entry_sz below is really the MTT segment
  465. * size, not the raw entry size)
  466. */
  467. dev->caps.reserved_mtts =
  468. ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
  469. dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
  470. err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
  471. init_hca->mtt_base,
  472. dev->caps.mtt_entry_sz,
  473. dev->caps.num_mtt_segs,
  474. dev->caps.reserved_mtts, 1, 0);
  475. if (err) {
  476. mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
  477. goto err_unmap_eq;
  478. }
  479. err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
  480. init_hca->dmpt_base,
  481. dev_cap->dmpt_entry_sz,
  482. dev->caps.num_mpts,
  483. dev->caps.reserved_mrws, 1, 1);
  484. if (err) {
  485. mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
  486. goto err_unmap_mtt;
  487. }
  488. err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
  489. init_hca->qpc_base,
  490. dev_cap->qpc_entry_sz,
  491. dev->caps.num_qps,
  492. dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
  493. 0, 0);
  494. if (err) {
  495. mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
  496. goto err_unmap_dmpt;
  497. }
  498. err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
  499. init_hca->auxc_base,
  500. dev_cap->aux_entry_sz,
  501. dev->caps.num_qps,
  502. dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
  503. 0, 0);
  504. if (err) {
  505. mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
  506. goto err_unmap_qp;
  507. }
  508. err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
  509. init_hca->altc_base,
  510. dev_cap->altc_entry_sz,
  511. dev->caps.num_qps,
  512. dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
  513. 0, 0);
  514. if (err) {
  515. mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
  516. goto err_unmap_auxc;
  517. }
  518. err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
  519. init_hca->rdmarc_base,
  520. dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
  521. dev->caps.num_qps,
  522. dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
  523. 0, 0);
  524. if (err) {
  525. mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
  526. goto err_unmap_altc;
  527. }
  528. err = mlx4_init_icm_table(dev, &priv->cq_table.table,
  529. init_hca->cqc_base,
  530. dev_cap->cqc_entry_sz,
  531. dev->caps.num_cqs,
  532. dev->caps.reserved_cqs, 0, 0);
  533. if (err) {
  534. mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
  535. goto err_unmap_rdmarc;
  536. }
  537. err = mlx4_init_icm_table(dev, &priv->srq_table.table,
  538. init_hca->srqc_base,
  539. dev_cap->srq_entry_sz,
  540. dev->caps.num_srqs,
  541. dev->caps.reserved_srqs, 0, 0);
  542. if (err) {
  543. mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
  544. goto err_unmap_cq;
  545. }
  546. /*
  547. * It's not strictly required, but for simplicity just map the
  548. * whole multicast group table now. The table isn't very big
  549. * and it's a lot easier than trying to track ref counts.
  550. */
  551. err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
  552. init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
  553. dev->caps.num_mgms + dev->caps.num_amgms,
  554. dev->caps.num_mgms + dev->caps.num_amgms,
  555. 0, 0);
  556. if (err) {
  557. mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
  558. goto err_unmap_srq;
  559. }
  560. return 0;
  561. err_unmap_srq:
  562. mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
  563. err_unmap_cq:
  564. mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
  565. err_unmap_rdmarc:
  566. mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
  567. err_unmap_altc:
  568. mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
  569. err_unmap_auxc:
  570. mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
  571. err_unmap_qp:
  572. mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
  573. err_unmap_dmpt:
  574. mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
  575. err_unmap_mtt:
  576. mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
  577. err_unmap_eq:
  578. mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
  579. err_unmap_cmpt:
  580. mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
  581. mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
  582. mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
  583. mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
  584. err_unmap_aux:
  585. mlx4_UNMAP_ICM_AUX(dev);
  586. err_free_aux:
  587. mlx4_free_icm(dev, priv->fw.aux_icm, 0);
  588. return err;
  589. }
  590. static void mlx4_free_icms(struct mlx4_dev *dev)
  591. {
  592. struct mlx4_priv *priv = mlx4_priv(dev);
  593. mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
  594. mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
  595. mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
  596. mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
  597. mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
  598. mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
  599. mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
  600. mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
  601. mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
  602. mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
  603. mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
  604. mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
  605. mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
  606. mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
  607. mlx4_UNMAP_ICM_AUX(dev);
  608. mlx4_free_icm(dev, priv->fw.aux_icm, 0);
  609. }
  610. static void mlx4_close_hca(struct mlx4_dev *dev)
  611. {
  612. mlx4_CLOSE_HCA(dev, 0);
  613. mlx4_free_icms(dev);
  614. mlx4_UNMAP_FA(dev);
  615. mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
  616. }
  617. static int mlx4_init_hca(struct mlx4_dev *dev)
  618. {
  619. struct mlx4_priv *priv = mlx4_priv(dev);
  620. struct mlx4_adapter adapter;
  621. struct mlx4_dev_cap dev_cap;
  622. struct mlx4_mod_stat_cfg mlx4_cfg;
  623. struct mlx4_profile profile;
  624. struct mlx4_init_hca_param init_hca;
  625. u64 icm_size;
  626. int err;
  627. err = mlx4_QUERY_FW(dev);
  628. if (err) {
  629. if (err == -EACCES)
  630. mlx4_info(dev, "non-primary physical function, skipping.\n");
  631. else
  632. mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
  633. return err;
  634. }
  635. err = mlx4_load_fw(dev);
  636. if (err) {
  637. mlx4_err(dev, "Failed to start FW, aborting.\n");
  638. return err;
  639. }
  640. mlx4_cfg.log_pg_sz_m = 1;
  641. mlx4_cfg.log_pg_sz = 0;
  642. err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
  643. if (err)
  644. mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
  645. err = mlx4_dev_cap(dev, &dev_cap);
  646. if (err) {
  647. mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
  648. goto err_stop_fw;
  649. }
  650. profile = default_profile;
  651. icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
  652. if ((long long) icm_size < 0) {
  653. err = icm_size;
  654. goto err_stop_fw;
  655. }
  656. init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
  657. err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
  658. if (err)
  659. goto err_stop_fw;
  660. err = mlx4_INIT_HCA(dev, &init_hca);
  661. if (err) {
  662. mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
  663. goto err_free_icm;
  664. }
  665. err = mlx4_QUERY_ADAPTER(dev, &adapter);
  666. if (err) {
  667. mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
  668. goto err_close;
  669. }
  670. priv->eq_table.inta_pin = adapter.inta_pin;
  671. memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
  672. return 0;
  673. err_close:
  674. mlx4_CLOSE_HCA(dev, 0);
  675. err_free_icm:
  676. mlx4_free_icms(dev);
  677. err_stop_fw:
  678. mlx4_UNMAP_FA(dev);
  679. mlx4_free_icm(dev, priv->fw.fw_icm, 0);
  680. return err;
  681. }
  682. static int mlx4_setup_hca(struct mlx4_dev *dev)
  683. {
  684. struct mlx4_priv *priv = mlx4_priv(dev);
  685. int err;
  686. int port;
  687. __be32 ib_port_default_caps;
  688. err = mlx4_init_uar_table(dev);
  689. if (err) {
  690. mlx4_err(dev, "Failed to initialize "
  691. "user access region table, aborting.\n");
  692. return err;
  693. }
  694. err = mlx4_uar_alloc(dev, &priv->driver_uar);
  695. if (err) {
  696. mlx4_err(dev, "Failed to allocate driver access region, "
  697. "aborting.\n");
  698. goto err_uar_table_free;
  699. }
  700. priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
  701. if (!priv->kar) {
  702. mlx4_err(dev, "Couldn't map kernel access region, "
  703. "aborting.\n");
  704. err = -ENOMEM;
  705. goto err_uar_free;
  706. }
  707. err = mlx4_init_pd_table(dev);
  708. if (err) {
  709. mlx4_err(dev, "Failed to initialize "
  710. "protection domain table, aborting.\n");
  711. goto err_kar_unmap;
  712. }
  713. err = mlx4_init_mr_table(dev);
  714. if (err) {
  715. mlx4_err(dev, "Failed to initialize "
  716. "memory region table, aborting.\n");
  717. goto err_pd_table_free;
  718. }
  719. err = mlx4_init_eq_table(dev);
  720. if (err) {
  721. mlx4_err(dev, "Failed to initialize "
  722. "event queue table, aborting.\n");
  723. goto err_mr_table_free;
  724. }
  725. err = mlx4_cmd_use_events(dev);
  726. if (err) {
  727. mlx4_err(dev, "Failed to switch to event-driven "
  728. "firmware commands, aborting.\n");
  729. goto err_eq_table_free;
  730. }
  731. err = mlx4_NOP(dev);
  732. if (err) {
  733. if (dev->flags & MLX4_FLAG_MSI_X) {
  734. mlx4_warn(dev, "NOP command failed to generate MSI-X "
  735. "interrupt IRQ %d).\n",
  736. priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
  737. mlx4_warn(dev, "Trying again without MSI-X.\n");
  738. } else {
  739. mlx4_err(dev, "NOP command failed to generate interrupt "
  740. "(IRQ %d), aborting.\n",
  741. priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
  742. mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
  743. }
  744. goto err_cmd_poll;
  745. }
  746. mlx4_dbg(dev, "NOP command IRQ test passed\n");
  747. err = mlx4_init_cq_table(dev);
  748. if (err) {
  749. mlx4_err(dev, "Failed to initialize "
  750. "completion queue table, aborting.\n");
  751. goto err_cmd_poll;
  752. }
  753. err = mlx4_init_srq_table(dev);
  754. if (err) {
  755. mlx4_err(dev, "Failed to initialize "
  756. "shared receive queue table, aborting.\n");
  757. goto err_cq_table_free;
  758. }
  759. err = mlx4_init_qp_table(dev);
  760. if (err) {
  761. mlx4_err(dev, "Failed to initialize "
  762. "queue pair table, aborting.\n");
  763. goto err_srq_table_free;
  764. }
  765. err = mlx4_init_mcg_table(dev);
  766. if (err) {
  767. mlx4_err(dev, "Failed to initialize "
  768. "multicast group table, aborting.\n");
  769. goto err_qp_table_free;
  770. }
  771. for (port = 1; port <= dev->caps.num_ports; port++) {
  772. ib_port_default_caps = 0;
  773. err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
  774. if (err)
  775. mlx4_warn(dev, "failed to get port %d default "
  776. "ib capabilities (%d). Continuing with "
  777. "caps = 0\n", port, err);
  778. dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
  779. err = mlx4_SET_PORT(dev, port);
  780. if (err) {
  781. mlx4_err(dev, "Failed to set port %d, aborting\n",
  782. port);
  783. goto err_mcg_table_free;
  784. }
  785. }
  786. return 0;
  787. err_mcg_table_free:
  788. mlx4_cleanup_mcg_table(dev);
  789. err_qp_table_free:
  790. mlx4_cleanup_qp_table(dev);
  791. err_srq_table_free:
  792. mlx4_cleanup_srq_table(dev);
  793. err_cq_table_free:
  794. mlx4_cleanup_cq_table(dev);
  795. err_cmd_poll:
  796. mlx4_cmd_use_polling(dev);
  797. err_eq_table_free:
  798. mlx4_cleanup_eq_table(dev);
  799. err_mr_table_free:
  800. mlx4_cleanup_mr_table(dev);
  801. err_pd_table_free:
  802. mlx4_cleanup_pd_table(dev);
  803. err_kar_unmap:
  804. iounmap(priv->kar);
  805. err_uar_free:
  806. mlx4_uar_free(dev, &priv->driver_uar);
  807. err_uar_table_free:
  808. mlx4_cleanup_uar_table(dev);
  809. return err;
  810. }
  811. static void mlx4_enable_msi_x(struct mlx4_dev *dev)
  812. {
  813. struct mlx4_priv *priv = mlx4_priv(dev);
  814. struct msix_entry *entries;
  815. int nreq;
  816. int err;
  817. int i;
  818. if (msi_x) {
  819. nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
  820. num_possible_cpus() + 1);
  821. entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
  822. if (!entries)
  823. goto no_msi;
  824. for (i = 0; i < nreq; ++i)
  825. entries[i].entry = i;
  826. retry:
  827. err = pci_enable_msix(dev->pdev, entries, nreq);
  828. if (err) {
  829. /* Try again if at least 2 vectors are available */
  830. if (err > 1) {
  831. mlx4_info(dev, "Requested %d vectors, "
  832. "but only %d MSI-X vectors available, "
  833. "trying again\n", nreq, err);
  834. nreq = err;
  835. goto retry;
  836. }
  837. kfree(entries);
  838. goto no_msi;
  839. }
  840. dev->caps.num_comp_vectors = nreq - 1;
  841. for (i = 0; i < nreq; ++i)
  842. priv->eq_table.eq[i].irq = entries[i].vector;
  843. dev->flags |= MLX4_FLAG_MSI_X;
  844. kfree(entries);
  845. return;
  846. }
  847. no_msi:
  848. dev->caps.num_comp_vectors = 1;
  849. for (i = 0; i < 2; ++i)
  850. priv->eq_table.eq[i].irq = dev->pdev->irq;
  851. }
  852. static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
  853. {
  854. struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
  855. int err = 0;
  856. info->dev = dev;
  857. info->port = port;
  858. mlx4_init_mac_table(dev, &info->mac_table);
  859. mlx4_init_vlan_table(dev, &info->vlan_table);
  860. sprintf(info->dev_name, "mlx4_port%d", port);
  861. info->port_attr.attr.name = info->dev_name;
  862. info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
  863. info->port_attr.show = show_port_type;
  864. info->port_attr.store = set_port_type;
  865. err = device_create_file(&dev->pdev->dev, &info->port_attr);
  866. if (err) {
  867. mlx4_err(dev, "Failed to create file for port %d\n", port);
  868. info->port = -1;
  869. }
  870. return err;
  871. }
  872. static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
  873. {
  874. if (info->port < 0)
  875. return;
  876. device_remove_file(&info->dev->pdev->dev, &info->port_attr);
  877. }
  878. static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
  879. {
  880. struct mlx4_priv *priv;
  881. struct mlx4_dev *dev;
  882. int err;
  883. int port;
  884. printk(KERN_INFO PFX "Initializing %s\n",
  885. pci_name(pdev));
  886. err = pci_enable_device(pdev);
  887. if (err) {
  888. dev_err(&pdev->dev, "Cannot enable PCI device, "
  889. "aborting.\n");
  890. return err;
  891. }
  892. /*
  893. * Check for BARs. We expect 0: 1MB
  894. */
  895. if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
  896. pci_resource_len(pdev, 0) != 1 << 20) {
  897. dev_err(&pdev->dev, "Missing DCS, aborting.\n");
  898. err = -ENODEV;
  899. goto err_disable_pdev;
  900. }
  901. if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
  902. dev_err(&pdev->dev, "Missing UAR, aborting.\n");
  903. err = -ENODEV;
  904. goto err_disable_pdev;
  905. }
  906. err = pci_request_regions(pdev, DRV_NAME);
  907. if (err) {
  908. dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
  909. goto err_disable_pdev;
  910. }
  911. pci_set_master(pdev);
  912. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  913. if (err) {
  914. dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
  915. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  916. if (err) {
  917. dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
  918. goto err_release_regions;
  919. }
  920. }
  921. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  922. if (err) {
  923. dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
  924. "consistent PCI DMA mask.\n");
  925. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  926. if (err) {
  927. dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
  928. "aborting.\n");
  929. goto err_release_regions;
  930. }
  931. }
  932. priv = kzalloc(sizeof *priv, GFP_KERNEL);
  933. if (!priv) {
  934. dev_err(&pdev->dev, "Device struct alloc failed, "
  935. "aborting.\n");
  936. err = -ENOMEM;
  937. goto err_release_regions;
  938. }
  939. dev = &priv->dev;
  940. dev->pdev = pdev;
  941. INIT_LIST_HEAD(&priv->ctx_list);
  942. spin_lock_init(&priv->ctx_lock);
  943. mutex_init(&priv->port_mutex);
  944. INIT_LIST_HEAD(&priv->pgdir_list);
  945. mutex_init(&priv->pgdir_mutex);
  946. /*
  947. * Now reset the HCA before we touch the PCI capabilities or
  948. * attempt a firmware command, since a boot ROM may have left
  949. * the HCA in an undefined state.
  950. */
  951. err = mlx4_reset(dev);
  952. if (err) {
  953. mlx4_err(dev, "Failed to reset HCA, aborting.\n");
  954. goto err_free_dev;
  955. }
  956. if (mlx4_cmd_init(dev)) {
  957. mlx4_err(dev, "Failed to init command interface, aborting.\n");
  958. goto err_free_dev;
  959. }
  960. err = mlx4_init_hca(dev);
  961. if (err)
  962. goto err_cmd;
  963. err = mlx4_alloc_eq_table(dev);
  964. if (err)
  965. goto err_close;
  966. mlx4_enable_msi_x(dev);
  967. err = mlx4_setup_hca(dev);
  968. if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
  969. dev->flags &= ~MLX4_FLAG_MSI_X;
  970. pci_disable_msix(pdev);
  971. err = mlx4_setup_hca(dev);
  972. }
  973. if (err)
  974. goto err_free_eq;
  975. for (port = 1; port <= dev->caps.num_ports; port++) {
  976. err = mlx4_init_port_info(dev, port);
  977. if (err)
  978. goto err_port;
  979. }
  980. err = mlx4_register_device(dev);
  981. if (err)
  982. goto err_port;
  983. mlx4_sense_init(dev);
  984. mlx4_start_sense(dev);
  985. pci_set_drvdata(pdev, dev);
  986. return 0;
  987. err_port:
  988. for (port = 1; port <= dev->caps.num_ports; port++)
  989. mlx4_cleanup_port_info(&priv->port[port]);
  990. mlx4_cleanup_mcg_table(dev);
  991. mlx4_cleanup_qp_table(dev);
  992. mlx4_cleanup_srq_table(dev);
  993. mlx4_cleanup_cq_table(dev);
  994. mlx4_cmd_use_polling(dev);
  995. mlx4_cleanup_eq_table(dev);
  996. mlx4_cleanup_mr_table(dev);
  997. mlx4_cleanup_pd_table(dev);
  998. mlx4_cleanup_uar_table(dev);
  999. err_free_eq:
  1000. mlx4_free_eq_table(dev);
  1001. err_close:
  1002. if (dev->flags & MLX4_FLAG_MSI_X)
  1003. pci_disable_msix(pdev);
  1004. mlx4_close_hca(dev);
  1005. err_cmd:
  1006. mlx4_cmd_cleanup(dev);
  1007. err_free_dev:
  1008. kfree(priv);
  1009. err_release_regions:
  1010. pci_release_regions(pdev);
  1011. err_disable_pdev:
  1012. pci_disable_device(pdev);
  1013. pci_set_drvdata(pdev, NULL);
  1014. return err;
  1015. }
  1016. static int __devinit mlx4_init_one(struct pci_dev *pdev,
  1017. const struct pci_device_id *id)
  1018. {
  1019. static int mlx4_version_printed;
  1020. if (!mlx4_version_printed) {
  1021. printk(KERN_INFO "%s", mlx4_version);
  1022. ++mlx4_version_printed;
  1023. }
  1024. return __mlx4_init_one(pdev, id);
  1025. }
  1026. static void mlx4_remove_one(struct pci_dev *pdev)
  1027. {
  1028. struct mlx4_dev *dev = pci_get_drvdata(pdev);
  1029. struct mlx4_priv *priv = mlx4_priv(dev);
  1030. int p;
  1031. if (dev) {
  1032. mlx4_stop_sense(dev);
  1033. mlx4_unregister_device(dev);
  1034. for (p = 1; p <= dev->caps.num_ports; p++) {
  1035. mlx4_cleanup_port_info(&priv->port[p]);
  1036. mlx4_CLOSE_PORT(dev, p);
  1037. }
  1038. mlx4_cleanup_mcg_table(dev);
  1039. mlx4_cleanup_qp_table(dev);
  1040. mlx4_cleanup_srq_table(dev);
  1041. mlx4_cleanup_cq_table(dev);
  1042. mlx4_cmd_use_polling(dev);
  1043. mlx4_cleanup_eq_table(dev);
  1044. mlx4_cleanup_mr_table(dev);
  1045. mlx4_cleanup_pd_table(dev);
  1046. iounmap(priv->kar);
  1047. mlx4_uar_free(dev, &priv->driver_uar);
  1048. mlx4_cleanup_uar_table(dev);
  1049. mlx4_free_eq_table(dev);
  1050. mlx4_close_hca(dev);
  1051. mlx4_cmd_cleanup(dev);
  1052. if (dev->flags & MLX4_FLAG_MSI_X)
  1053. pci_disable_msix(pdev);
  1054. kfree(priv);
  1055. pci_release_regions(pdev);
  1056. pci_disable_device(pdev);
  1057. pci_set_drvdata(pdev, NULL);
  1058. }
  1059. }
  1060. int mlx4_restart_one(struct pci_dev *pdev)
  1061. {
  1062. mlx4_remove_one(pdev);
  1063. return __mlx4_init_one(pdev, NULL);
  1064. }
  1065. static struct pci_device_id mlx4_pci_table[] = {
  1066. { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
  1067. { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
  1068. { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
  1069. { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
  1070. { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
  1071. { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
  1072. { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
  1073. { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
  1074. { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
  1075. { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
  1076. { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
  1077. { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
  1078. { 0, }
  1079. };
  1080. MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
  1081. static struct pci_driver mlx4_driver = {
  1082. .name = DRV_NAME,
  1083. .id_table = mlx4_pci_table,
  1084. .probe = mlx4_init_one,
  1085. .remove = __devexit_p(mlx4_remove_one)
  1086. };
  1087. static int __init mlx4_verify_params(void)
  1088. {
  1089. if ((log_num_mac < 0) || (log_num_mac > 7)) {
  1090. printk(KERN_WARNING "mlx4_core: bad num_mac: %d\n", log_num_mac);
  1091. return -1;
  1092. }
  1093. if ((log_num_vlan < 0) || (log_num_vlan > 7)) {
  1094. printk(KERN_WARNING "mlx4_core: bad num_vlan: %d\n", log_num_vlan);
  1095. return -1;
  1096. }
  1097. if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
  1098. printk(KERN_WARNING "mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
  1099. return -1;
  1100. }
  1101. return 0;
  1102. }
  1103. static int __init mlx4_init(void)
  1104. {
  1105. int ret;
  1106. if (mlx4_verify_params())
  1107. return -EINVAL;
  1108. mlx4_catas_init();
  1109. mlx4_wq = create_singlethread_workqueue("mlx4");
  1110. if (!mlx4_wq)
  1111. return -ENOMEM;
  1112. ret = pci_register_driver(&mlx4_driver);
  1113. return ret < 0 ? ret : 0;
  1114. }
  1115. static void __exit mlx4_cleanup(void)
  1116. {
  1117. pci_unregister_driver(&mlx4_driver);
  1118. destroy_workqueue(mlx4_wq);
  1119. }
  1120. module_init(mlx4_init);
  1121. module_exit(mlx4_cleanup);