mthca_main.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126
  1. /*
  2. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. * $Id: mthca_main.c 1396 2004-12-28 04:10:27Z roland $
  33. */
  34. #include <linux/config.h>
  35. #include <linux/version.h>
  36. #include <linux/module.h>
  37. #include <linux/init.h>
  38. #include <linux/errno.h>
  39. #include <linux/pci.h>
  40. #include <linux/interrupt.h>
  41. #include "mthca_dev.h"
  42. #include "mthca_config_reg.h"
  43. #include "mthca_cmd.h"
  44. #include "mthca_profile.h"
  45. #include "mthca_memfree.h"
  46. MODULE_AUTHOR("Roland Dreier");
  47. MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver");
  48. MODULE_LICENSE("Dual BSD/GPL");
  49. MODULE_VERSION(DRV_VERSION);
  50. #ifdef CONFIG_PCI_MSI
  51. static int msi_x = 0;
  52. module_param(msi_x, int, 0444);
  53. MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
  54. static int msi = 0;
  55. module_param(msi, int, 0444);
  56. MODULE_PARM_DESC(msi, "attempt to use MSI if nonzero");
  57. #else /* CONFIG_PCI_MSI */
  58. #define msi_x (0)
  59. #define msi (0)
  60. #endif /* CONFIG_PCI_MSI */
  61. static const char mthca_version[] __devinitdata =
  62. "ib_mthca: Mellanox InfiniBand HCA driver v"
  63. DRV_VERSION " (" DRV_RELDATE ")\n";
  64. static struct mthca_profile default_profile = {
  65. .num_qp = 1 << 16,
  66. .rdb_per_qp = 4,
  67. .num_cq = 1 << 16,
  68. .num_mcg = 1 << 13,
  69. .num_mpt = 1 << 17,
  70. .num_mtt = 1 << 20,
  71. .num_udav = 1 << 15, /* Tavor only */
  72. .uarc_size = 1 << 18, /* Arbel only */
  73. };
  74. static int __devinit mthca_tune_pci(struct mthca_dev *mdev)
  75. {
  76. int cap;
  77. u16 val;
  78. /* First try to max out Read Byte Count */
  79. cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX);
  80. if (cap) {
  81. if (pci_read_config_word(mdev->pdev, cap + PCI_X_CMD, &val)) {
  82. mthca_err(mdev, "Couldn't read PCI-X command register, "
  83. "aborting.\n");
  84. return -ENODEV;
  85. }
  86. val = (val & ~PCI_X_CMD_MAX_READ) | (3 << 2);
  87. if (pci_write_config_word(mdev->pdev, cap + PCI_X_CMD, val)) {
  88. mthca_err(mdev, "Couldn't write PCI-X command register, "
  89. "aborting.\n");
  90. return -ENODEV;
  91. }
  92. } else if (mdev->hca_type == TAVOR)
  93. mthca_info(mdev, "No PCI-X capability, not setting RBC.\n");
  94. cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_EXP);
  95. if (cap) {
  96. if (pci_read_config_word(mdev->pdev, cap + PCI_EXP_DEVCTL, &val)) {
  97. mthca_err(mdev, "Couldn't read PCI Express device control "
  98. "register, aborting.\n");
  99. return -ENODEV;
  100. }
  101. val = (val & ~PCI_EXP_DEVCTL_READRQ) | (5 << 12);
  102. if (pci_write_config_word(mdev->pdev, cap + PCI_EXP_DEVCTL, val)) {
  103. mthca_err(mdev, "Couldn't write PCI Express device control "
  104. "register, aborting.\n");
  105. return -ENODEV;
  106. }
  107. } else if (mdev->hca_type == ARBEL_NATIVE ||
  108. mdev->hca_type == ARBEL_COMPAT)
  109. mthca_info(mdev, "No PCI Express capability, "
  110. "not setting Max Read Request Size.\n");
  111. return 0;
  112. }
  113. static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
  114. {
  115. int err;
  116. u8 status;
  117. err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status);
  118. if (err) {
  119. mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
  120. return err;
  121. }
  122. if (status) {
  123. mthca_err(mdev, "QUERY_DEV_LIM returned status 0x%02x, "
  124. "aborting.\n", status);
  125. return -EINVAL;
  126. }
  127. if (dev_lim->min_page_sz > PAGE_SIZE) {
  128. mthca_err(mdev, "HCA minimum page size of %d bigger than "
  129. "kernel PAGE_SIZE of %ld, aborting.\n",
  130. dev_lim->min_page_sz, PAGE_SIZE);
  131. return -ENODEV;
  132. }
  133. if (dev_lim->num_ports > MTHCA_MAX_PORTS) {
  134. mthca_err(mdev, "HCA has %d ports, but we only support %d, "
  135. "aborting.\n",
  136. dev_lim->num_ports, MTHCA_MAX_PORTS);
  137. return -ENODEV;
  138. }
  139. mdev->limits.num_ports = dev_lim->num_ports;
  140. mdev->limits.vl_cap = dev_lim->max_vl;
  141. mdev->limits.mtu_cap = dev_lim->max_mtu;
  142. mdev->limits.gid_table_len = dev_lim->max_gids;
  143. mdev->limits.pkey_table_len = dev_lim->max_pkeys;
  144. mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay;
  145. mdev->limits.max_sg = dev_lim->max_sg;
  146. mdev->limits.reserved_qps = dev_lim->reserved_qps;
  147. mdev->limits.reserved_srqs = dev_lim->reserved_srqs;
  148. mdev->limits.reserved_eecs = dev_lim->reserved_eecs;
  149. mdev->limits.reserved_cqs = dev_lim->reserved_cqs;
  150. mdev->limits.reserved_eqs = dev_lim->reserved_eqs;
  151. mdev->limits.reserved_mtts = dev_lim->reserved_mtts;
  152. mdev->limits.reserved_mrws = dev_lim->reserved_mrws;
  153. mdev->limits.reserved_uars = dev_lim->reserved_uars;
  154. mdev->limits.reserved_pds = dev_lim->reserved_pds;
  155. /* IB_DEVICE_RESIZE_MAX_WR not supported by driver.
  156. May be doable since hardware supports it for SRQ.
  157. IB_DEVICE_N_NOTIFY_CQ is supported by hardware but not by driver.
  158. IB_DEVICE_SRQ_RESIZE is supported by hardware but SRQ is not
  159. supported by driver. */
  160. mdev->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
  161. IB_DEVICE_PORT_ACTIVE_EVENT |
  162. IB_DEVICE_SYS_IMAGE_GUID |
  163. IB_DEVICE_RC_RNR_NAK_GEN;
  164. if (dev_lim->flags & DEV_LIM_FLAG_BAD_PKEY_CNTR)
  165. mdev->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
  166. if (dev_lim->flags & DEV_LIM_FLAG_BAD_QKEY_CNTR)
  167. mdev->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
  168. if (dev_lim->flags & DEV_LIM_FLAG_RAW_MULTI)
  169. mdev->device_cap_flags |= IB_DEVICE_RAW_MULTI;
  170. if (dev_lim->flags & DEV_LIM_FLAG_AUTO_PATH_MIG)
  171. mdev->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
  172. if (dev_lim->flags & DEV_LIM_FLAG_UD_AV_PORT_ENFORCE)
  173. mdev->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
  174. if (dev_lim->flags & DEV_LIM_FLAG_SRQ)
  175. mdev->mthca_flags |= MTHCA_FLAG_SRQ;
  176. return 0;
  177. }
  178. static int __devinit mthca_init_tavor(struct mthca_dev *mdev)
  179. {
  180. u8 status;
  181. int err;
  182. struct mthca_dev_lim dev_lim;
  183. struct mthca_profile profile;
  184. struct mthca_init_hca_param init_hca;
  185. struct mthca_adapter adapter;
  186. err = mthca_SYS_EN(mdev, &status);
  187. if (err) {
  188. mthca_err(mdev, "SYS_EN command failed, aborting.\n");
  189. return err;
  190. }
  191. if (status) {
  192. mthca_err(mdev, "SYS_EN returned status 0x%02x, "
  193. "aborting.\n", status);
  194. return -EINVAL;
  195. }
  196. err = mthca_QUERY_FW(mdev, &status);
  197. if (err) {
  198. mthca_err(mdev, "QUERY_FW command failed, aborting.\n");
  199. goto err_disable;
  200. }
  201. if (status) {
  202. mthca_err(mdev, "QUERY_FW returned status 0x%02x, "
  203. "aborting.\n", status);
  204. err = -EINVAL;
  205. goto err_disable;
  206. }
  207. err = mthca_QUERY_DDR(mdev, &status);
  208. if (err) {
  209. mthca_err(mdev, "QUERY_DDR command failed, aborting.\n");
  210. goto err_disable;
  211. }
  212. if (status) {
  213. mthca_err(mdev, "QUERY_DDR returned status 0x%02x, "
  214. "aborting.\n", status);
  215. err = -EINVAL;
  216. goto err_disable;
  217. }
  218. err = mthca_dev_lim(mdev, &dev_lim);
  219. profile = default_profile;
  220. profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
  221. profile.uarc_size = 0;
  222. err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
  223. if (err < 0)
  224. goto err_disable;
  225. err = mthca_INIT_HCA(mdev, &init_hca, &status);
  226. if (err) {
  227. mthca_err(mdev, "INIT_HCA command failed, aborting.\n");
  228. goto err_disable;
  229. }
  230. if (status) {
  231. mthca_err(mdev, "INIT_HCA returned status 0x%02x, "
  232. "aborting.\n", status);
  233. err = -EINVAL;
  234. goto err_disable;
  235. }
  236. err = mthca_QUERY_ADAPTER(mdev, &adapter, &status);
  237. if (err) {
  238. mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n");
  239. goto err_close;
  240. }
  241. if (status) {
  242. mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, "
  243. "aborting.\n", status);
  244. err = -EINVAL;
  245. goto err_close;
  246. }
  247. mdev->eq_table.inta_pin = adapter.inta_pin;
  248. mdev->rev_id = adapter.revision_id;
  249. return 0;
  250. err_close:
  251. mthca_CLOSE_HCA(mdev, 0, &status);
  252. err_disable:
  253. mthca_SYS_DIS(mdev, &status);
  254. return err;
  255. }
  256. static int __devinit mthca_load_fw(struct mthca_dev *mdev)
  257. {
  258. u8 status;
  259. int err;
  260. /* FIXME: use HCA-attached memory for FW if present */
  261. mdev->fw.arbel.fw_icm =
  262. mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages,
  263. GFP_HIGHUSER | __GFP_NOWARN);
  264. if (!mdev->fw.arbel.fw_icm) {
  265. mthca_err(mdev, "Couldn't allocate FW area, aborting.\n");
  266. return -ENOMEM;
  267. }
  268. err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm, &status);
  269. if (err) {
  270. mthca_err(mdev, "MAP_FA command failed, aborting.\n");
  271. goto err_free;
  272. }
  273. if (status) {
  274. mthca_err(mdev, "MAP_FA returned status 0x%02x, aborting.\n", status);
  275. err = -EINVAL;
  276. goto err_free;
  277. }
  278. err = mthca_RUN_FW(mdev, &status);
  279. if (err) {
  280. mthca_err(mdev, "RUN_FW command failed, aborting.\n");
  281. goto err_unmap_fa;
  282. }
  283. if (status) {
  284. mthca_err(mdev, "RUN_FW returned status 0x%02x, aborting.\n", status);
  285. err = -EINVAL;
  286. goto err_unmap_fa;
  287. }
  288. return 0;
  289. err_unmap_fa:
  290. mthca_UNMAP_FA(mdev, &status);
  291. err_free:
  292. mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
  293. return err;
  294. }
  295. static int __devinit mthca_init_icm(struct mthca_dev *mdev,
  296. struct mthca_dev_lim *dev_lim,
  297. struct mthca_init_hca_param *init_hca,
  298. u64 icm_size)
  299. {
  300. u64 aux_pages;
  301. u8 status;
  302. int err;
  303. err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages, &status);
  304. if (err) {
  305. mthca_err(mdev, "SET_ICM_SIZE command failed, aborting.\n");
  306. return err;
  307. }
  308. if (status) {
  309. mthca_err(mdev, "SET_ICM_SIZE returned status 0x%02x, "
  310. "aborting.\n", status);
  311. return -EINVAL;
  312. }
  313. mthca_dbg(mdev, "%lld KB of HCA context requires %lld KB aux memory.\n",
  314. (unsigned long long) icm_size >> 10,
  315. (unsigned long long) aux_pages << 2);
  316. mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages,
  317. GFP_HIGHUSER | __GFP_NOWARN);
  318. if (!mdev->fw.arbel.aux_icm) {
  319. mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n");
  320. return -ENOMEM;
  321. }
  322. err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm, &status);
  323. if (err) {
  324. mthca_err(mdev, "MAP_ICM_AUX command failed, aborting.\n");
  325. goto err_free_aux;
  326. }
  327. if (status) {
  328. mthca_err(mdev, "MAP_ICM_AUX returned status 0x%02x, aborting.\n", status);
  329. err = -EINVAL;
  330. goto err_free_aux;
  331. }
  332. err = mthca_map_eq_icm(mdev, init_hca->eqc_base);
  333. if (err) {
  334. mthca_err(mdev, "Failed to map EQ context memory, aborting.\n");
  335. goto err_unmap_aux;
  336. }
  337. mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
  338. dev_lim->mtt_seg_sz,
  339. mdev->limits.num_mtt_segs,
  340. mdev->limits.reserved_mtts, 1);
  341. if (!mdev->mr_table.mtt_table) {
  342. mthca_err(mdev, "Failed to map MTT context memory, aborting.\n");
  343. err = -ENOMEM;
  344. goto err_unmap_eq;
  345. }
  346. mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base,
  347. dev_lim->mpt_entry_sz,
  348. mdev->limits.num_mpts,
  349. mdev->limits.reserved_mrws, 1);
  350. if (!mdev->mr_table.mpt_table) {
  351. mthca_err(mdev, "Failed to map MPT context memory, aborting.\n");
  352. err = -ENOMEM;
  353. goto err_unmap_mtt;
  354. }
  355. mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base,
  356. dev_lim->qpc_entry_sz,
  357. mdev->limits.num_qps,
  358. mdev->limits.reserved_qps, 0);
  359. if (!mdev->qp_table.qp_table) {
  360. mthca_err(mdev, "Failed to map QP context memory, aborting.\n");
  361. err = -ENOMEM;
  362. goto err_unmap_mpt;
  363. }
  364. mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base,
  365. dev_lim->eqpc_entry_sz,
  366. mdev->limits.num_qps,
  367. mdev->limits.reserved_qps, 0);
  368. if (!mdev->qp_table.eqp_table) {
  369. mthca_err(mdev, "Failed to map EQP context memory, aborting.\n");
  370. err = -ENOMEM;
  371. goto err_unmap_qp;
  372. }
  373. mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
  374. dev_lim->cqc_entry_sz,
  375. mdev->limits.num_cqs,
  376. mdev->limits.reserved_cqs, 0);
  377. if (!mdev->cq_table.table) {
  378. mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
  379. err = -ENOMEM;
  380. goto err_unmap_eqp;
  381. }
  382. /*
  383. * It's not strictly required, but for simplicity just map the
  384. * whole multicast group table now. The table isn't very big
  385. * and it's a lot easier than trying to track ref counts.
  386. */
  387. mdev->mcg_table.table = mthca_alloc_icm_table(mdev, init_hca->mc_base,
  388. MTHCA_MGM_ENTRY_SIZE,
  389. mdev->limits.num_mgms +
  390. mdev->limits.num_amgms,
  391. mdev->limits.num_mgms +
  392. mdev->limits.num_amgms,
  393. 0);
  394. if (!mdev->mcg_table.table) {
  395. mthca_err(mdev, "Failed to map MCG context memory, aborting.\n");
  396. err = -ENOMEM;
  397. goto err_unmap_cq;
  398. }
  399. return 0;
  400. err_unmap_cq:
  401. mthca_free_icm_table(mdev, mdev->cq_table.table);
  402. err_unmap_eqp:
  403. mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
  404. err_unmap_qp:
  405. mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
  406. err_unmap_mpt:
  407. mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
  408. err_unmap_mtt:
  409. mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
  410. err_unmap_eq:
  411. mthca_unmap_eq_icm(mdev);
  412. err_unmap_aux:
  413. mthca_UNMAP_ICM_AUX(mdev, &status);
  414. err_free_aux:
  415. mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
  416. return err;
  417. }
  418. static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
  419. {
  420. struct mthca_dev_lim dev_lim;
  421. struct mthca_profile profile;
  422. struct mthca_init_hca_param init_hca;
  423. struct mthca_adapter adapter;
  424. u64 icm_size;
  425. u8 status;
  426. int err;
  427. err = mthca_QUERY_FW(mdev, &status);
  428. if (err) {
  429. mthca_err(mdev, "QUERY_FW command failed, aborting.\n");
  430. return err;
  431. }
  432. if (status) {
  433. mthca_err(mdev, "QUERY_FW returned status 0x%02x, "
  434. "aborting.\n", status);
  435. return -EINVAL;
  436. }
  437. err = mthca_ENABLE_LAM(mdev, &status);
  438. if (err) {
  439. mthca_err(mdev, "ENABLE_LAM command failed, aborting.\n");
  440. return err;
  441. }
  442. if (status == MTHCA_CMD_STAT_LAM_NOT_PRE) {
  443. mthca_dbg(mdev, "No HCA-attached memory (running in MemFree mode)\n");
  444. mdev->mthca_flags |= MTHCA_FLAG_NO_LAM;
  445. } else if (status) {
  446. mthca_err(mdev, "ENABLE_LAM returned status 0x%02x, "
  447. "aborting.\n", status);
  448. return -EINVAL;
  449. }
  450. err = mthca_load_fw(mdev);
  451. if (err) {
  452. mthca_err(mdev, "Failed to start FW, aborting.\n");
  453. goto err_disable;
  454. }
  455. err = mthca_dev_lim(mdev, &dev_lim);
  456. if (err) {
  457. mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
  458. goto err_stop_fw;
  459. }
  460. profile = default_profile;
  461. profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
  462. profile.num_udav = 0;
  463. icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
  464. if ((int) icm_size < 0) {
  465. err = icm_size;
  466. goto err_stop_fw;
  467. }
  468. err = mthca_init_icm(mdev, &dev_lim, &init_hca, icm_size);
  469. if (err)
  470. goto err_stop_fw;
  471. err = mthca_INIT_HCA(mdev, &init_hca, &status);
  472. if (err) {
  473. mthca_err(mdev, "INIT_HCA command failed, aborting.\n");
  474. goto err_free_icm;
  475. }
  476. if (status) {
  477. mthca_err(mdev, "INIT_HCA returned status 0x%02x, "
  478. "aborting.\n", status);
  479. err = -EINVAL;
  480. goto err_free_icm;
  481. }
  482. err = mthca_QUERY_ADAPTER(mdev, &adapter, &status);
  483. if (err) {
  484. mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n");
  485. goto err_free_icm;
  486. }
  487. if (status) {
  488. mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, "
  489. "aborting.\n", status);
  490. err = -EINVAL;
  491. goto err_free_icm;
  492. }
  493. mdev->eq_table.inta_pin = adapter.inta_pin;
  494. mdev->rev_id = adapter.revision_id;
  495. return 0;
  496. err_free_icm:
  497. mthca_free_icm_table(mdev, mdev->cq_table.table);
  498. mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
  499. mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
  500. mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
  501. mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
  502. mthca_unmap_eq_icm(mdev);
  503. mthca_UNMAP_ICM_AUX(mdev, &status);
  504. mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
  505. err_stop_fw:
  506. mthca_UNMAP_FA(mdev, &status);
  507. mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
  508. err_disable:
  509. if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
  510. mthca_DISABLE_LAM(mdev, &status);
  511. return err;
  512. }
  513. static int __devinit mthca_init_hca(struct mthca_dev *mdev)
  514. {
  515. if (mdev->hca_type == ARBEL_NATIVE)
  516. return mthca_init_arbel(mdev);
  517. else
  518. return mthca_init_tavor(mdev);
  519. }
  520. static int __devinit mthca_setup_hca(struct mthca_dev *dev)
  521. {
  522. int err;
  523. u8 status;
  524. MTHCA_INIT_DOORBELL_LOCK(&dev->doorbell_lock);
  525. err = mthca_init_uar_table(dev);
  526. if (err) {
  527. mthca_err(dev, "Failed to initialize "
  528. "user access region table, aborting.\n");
  529. return err;
  530. }
  531. err = mthca_uar_alloc(dev, &dev->driver_uar);
  532. if (err) {
  533. mthca_err(dev, "Failed to allocate driver access region, "
  534. "aborting.\n");
  535. goto err_uar_table_free;
  536. }
  537. dev->kar = ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
  538. if (!dev->kar) {
  539. mthca_err(dev, "Couldn't map kernel access region, "
  540. "aborting.\n");
  541. err = -ENOMEM;
  542. goto err_uar_free;
  543. }
  544. err = mthca_init_pd_table(dev);
  545. if (err) {
  546. mthca_err(dev, "Failed to initialize "
  547. "protection domain table, aborting.\n");
  548. goto err_kar_unmap;
  549. }
  550. err = mthca_init_mr_table(dev);
  551. if (err) {
  552. mthca_err(dev, "Failed to initialize "
  553. "memory region table, aborting.\n");
  554. goto err_pd_table_free;
  555. }
  556. err = mthca_pd_alloc(dev, &dev->driver_pd);
  557. if (err) {
  558. mthca_err(dev, "Failed to create driver PD, "
  559. "aborting.\n");
  560. goto err_mr_table_free;
  561. }
  562. err = mthca_init_eq_table(dev);
  563. if (err) {
  564. mthca_err(dev, "Failed to initialize "
  565. "event queue table, aborting.\n");
  566. goto err_pd_free;
  567. }
  568. err = mthca_cmd_use_events(dev);
  569. if (err) {
  570. mthca_err(dev, "Failed to switch to event-driven "
  571. "firmware commands, aborting.\n");
  572. goto err_eq_table_free;
  573. }
  574. err = mthca_NOP(dev, &status);
  575. if (err || status) {
  576. mthca_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting.\n",
  577. dev->mthca_flags & MTHCA_FLAG_MSI_X ?
  578. dev->eq_table.eq[MTHCA_EQ_CMD].msi_x_vector :
  579. dev->pdev->irq);
  580. if (dev->mthca_flags & (MTHCA_FLAG_MSI | MTHCA_FLAG_MSI_X))
  581. mthca_err(dev, "Try again with MSI/MSI-X disabled.\n");
  582. else
  583. mthca_err(dev, "BIOS or ACPI interrupt routing problem?\n");
  584. goto err_cmd_poll;
  585. }
  586. mthca_dbg(dev, "NOP command IRQ test passed\n");
  587. err = mthca_init_cq_table(dev);
  588. if (err) {
  589. mthca_err(dev, "Failed to initialize "
  590. "completion queue table, aborting.\n");
  591. goto err_cmd_poll;
  592. }
  593. err = mthca_init_qp_table(dev);
  594. if (err) {
  595. mthca_err(dev, "Failed to initialize "
  596. "queue pair table, aborting.\n");
  597. goto err_cq_table_free;
  598. }
  599. err = mthca_init_av_table(dev);
  600. if (err) {
  601. mthca_err(dev, "Failed to initialize "
  602. "address vector table, aborting.\n");
  603. goto err_qp_table_free;
  604. }
  605. err = mthca_init_mcg_table(dev);
  606. if (err) {
  607. mthca_err(dev, "Failed to initialize "
  608. "multicast group table, aborting.\n");
  609. goto err_av_table_free;
  610. }
  611. return 0;
  612. err_av_table_free:
  613. mthca_cleanup_av_table(dev);
  614. err_qp_table_free:
  615. mthca_cleanup_qp_table(dev);
  616. err_cq_table_free:
  617. mthca_cleanup_cq_table(dev);
  618. err_cmd_poll:
  619. mthca_cmd_use_polling(dev);
  620. err_eq_table_free:
  621. mthca_cleanup_eq_table(dev);
  622. err_pd_free:
  623. mthca_pd_free(dev, &dev->driver_pd);
  624. err_mr_table_free:
  625. mthca_cleanup_mr_table(dev);
  626. err_pd_table_free:
  627. mthca_cleanup_pd_table(dev);
  628. err_kar_unmap:
  629. iounmap(dev->kar);
  630. err_uar_free:
  631. mthca_uar_free(dev, &dev->driver_uar);
  632. err_uar_table_free:
  633. mthca_cleanup_uar_table(dev);
  634. return err;
  635. }
  636. static int __devinit mthca_request_regions(struct pci_dev *pdev,
  637. int ddr_hidden)
  638. {
  639. int err;
  640. /*
  641. * We can't just use pci_request_regions() because the MSI-X
  642. * table is right in the middle of the first BAR. If we did
  643. * pci_request_region and grab all of the first BAR, then
  644. * setting up MSI-X would fail, since the PCI core wants to do
  645. * request_mem_region on the MSI-X vector table.
  646. *
  647. * So just request what we need right now, and request any
  648. * other regions we need when setting up EQs.
  649. */
  650. if (!request_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
  651. MTHCA_HCR_SIZE, DRV_NAME))
  652. return -EBUSY;
  653. err = pci_request_region(pdev, 2, DRV_NAME);
  654. if (err)
  655. goto err_bar2_failed;
  656. if (!ddr_hidden) {
  657. err = pci_request_region(pdev, 4, DRV_NAME);
  658. if (err)
  659. goto err_bar4_failed;
  660. }
  661. return 0;
  662. err_bar4_failed:
  663. pci_release_region(pdev, 2);
  664. err_bar2_failed:
  665. release_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
  666. MTHCA_HCR_SIZE);
  667. return err;
  668. }
  669. static void mthca_release_regions(struct pci_dev *pdev,
  670. int ddr_hidden)
  671. {
  672. if (!ddr_hidden)
  673. pci_release_region(pdev, 4);
  674. pci_release_region(pdev, 2);
  675. release_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
  676. MTHCA_HCR_SIZE);
  677. }
  678. static int __devinit mthca_enable_msi_x(struct mthca_dev *mdev)
  679. {
  680. struct msix_entry entries[3];
  681. int err;
  682. entries[0].entry = 0;
  683. entries[1].entry = 1;
  684. entries[2].entry = 2;
  685. err = pci_enable_msix(mdev->pdev, entries, ARRAY_SIZE(entries));
  686. if (err) {
  687. if (err > 0)
  688. mthca_info(mdev, "Only %d MSI-X vectors available, "
  689. "not using MSI-X\n", err);
  690. return err;
  691. }
  692. mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = entries[0].vector;
  693. mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = entries[1].vector;
  694. mdev->eq_table.eq[MTHCA_EQ_CMD ].msi_x_vector = entries[2].vector;
  695. return 0;
  696. }
  697. static void mthca_close_hca(struct mthca_dev *mdev)
  698. {
  699. u8 status;
  700. mthca_CLOSE_HCA(mdev, 0, &status);
  701. if (mdev->hca_type == ARBEL_NATIVE) {
  702. mthca_free_icm_table(mdev, mdev->cq_table.table);
  703. mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
  704. mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
  705. mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
  706. mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
  707. mthca_unmap_eq_icm(mdev);
  708. mthca_UNMAP_ICM_AUX(mdev, &status);
  709. mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
  710. mthca_UNMAP_FA(mdev, &status);
  711. mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
  712. if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
  713. mthca_DISABLE_LAM(mdev, &status);
  714. } else
  715. mthca_SYS_DIS(mdev, &status);
  716. }
  717. static int __devinit mthca_init_one(struct pci_dev *pdev,
  718. const struct pci_device_id *id)
  719. {
  720. static int mthca_version_printed = 0;
  721. static int mthca_memfree_warned = 0;
  722. int ddr_hidden = 0;
  723. int err;
  724. struct mthca_dev *mdev;
  725. if (!mthca_version_printed) {
  726. printk(KERN_INFO "%s", mthca_version);
  727. ++mthca_version_printed;
  728. }
  729. printk(KERN_INFO PFX "Initializing %s (%s)\n",
  730. pci_pretty_name(pdev), pci_name(pdev));
  731. err = pci_enable_device(pdev);
  732. if (err) {
  733. dev_err(&pdev->dev, "Cannot enable PCI device, "
  734. "aborting.\n");
  735. return err;
  736. }
  737. /*
  738. * Check for BARs. We expect 0: 1MB, 2: 8MB, 4: DDR (may not
  739. * be present)
  740. */
  741. if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
  742. pci_resource_len(pdev, 0) != 1 << 20) {
  743. dev_err(&pdev->dev, "Missing DCS, aborting.");
  744. err = -ENODEV;
  745. goto err_disable_pdev;
  746. }
  747. if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM) ||
  748. pci_resource_len(pdev, 2) != 1 << 23) {
  749. dev_err(&pdev->dev, "Missing UAR, aborting.");
  750. err = -ENODEV;
  751. goto err_disable_pdev;
  752. }
  753. if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM))
  754. ddr_hidden = 1;
  755. err = mthca_request_regions(pdev, ddr_hidden);
  756. if (err) {
  757. dev_err(&pdev->dev, "Cannot obtain PCI resources, "
  758. "aborting.\n");
  759. goto err_disable_pdev;
  760. }
  761. pci_set_master(pdev);
  762. err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
  763. if (err) {
  764. dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
  765. err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  766. if (err) {
  767. dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
  768. goto err_free_res;
  769. }
  770. }
  771. err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  772. if (err) {
  773. dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
  774. "consistent PCI DMA mask.\n");
  775. err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  776. if (err) {
  777. dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
  778. "aborting.\n");
  779. goto err_free_res;
  780. }
  781. }
  782. mdev = (struct mthca_dev *) ib_alloc_device(sizeof *mdev);
  783. if (!mdev) {
  784. dev_err(&pdev->dev, "Device struct alloc failed, "
  785. "aborting.\n");
  786. err = -ENOMEM;
  787. goto err_free_res;
  788. }
  789. mdev->pdev = pdev;
  790. mdev->hca_type = id->driver_data;
  791. if (mdev->hca_type == ARBEL_NATIVE && !mthca_memfree_warned++)
  792. mthca_warn(mdev, "Warning: native MT25208 mode support is incomplete. "
  793. "Your HCA may not work properly.\n");
  794. if (ddr_hidden)
  795. mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN;
  796. /*
  797. * Now reset the HCA before we touch the PCI capabilities or
  798. * attempt a firmware command, since a boot ROM may have left
  799. * the HCA in an undefined state.
  800. */
  801. err = mthca_reset(mdev);
  802. if (err) {
  803. mthca_err(mdev, "Failed to reset HCA, aborting.\n");
  804. goto err_free_dev;
  805. }
  806. if (msi_x && !mthca_enable_msi_x(mdev))
  807. mdev->mthca_flags |= MTHCA_FLAG_MSI_X;
  808. if (msi && !(mdev->mthca_flags & MTHCA_FLAG_MSI_X) &&
  809. !pci_enable_msi(pdev))
  810. mdev->mthca_flags |= MTHCA_FLAG_MSI;
  811. sema_init(&mdev->cmd.hcr_sem, 1);
  812. sema_init(&mdev->cmd.poll_sem, 1);
  813. mdev->cmd.use_events = 0;
  814. mdev->hcr = ioremap(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE, MTHCA_HCR_SIZE);
  815. if (!mdev->hcr) {
  816. mthca_err(mdev, "Couldn't map command register, "
  817. "aborting.\n");
  818. err = -ENOMEM;
  819. goto err_free_dev;
  820. }
  821. err = mthca_tune_pci(mdev);
  822. if (err)
  823. goto err_iounmap;
  824. err = mthca_init_hca(mdev);
  825. if (err)
  826. goto err_iounmap;
  827. err = mthca_setup_hca(mdev);
  828. if (err)
  829. goto err_close;
  830. err = mthca_register_device(mdev);
  831. if (err)
  832. goto err_cleanup;
  833. err = mthca_create_agents(mdev);
  834. if (err)
  835. goto err_unregister;
  836. pci_set_drvdata(pdev, mdev);
  837. return 0;
  838. err_unregister:
  839. mthca_unregister_device(mdev);
  840. err_cleanup:
  841. mthca_cleanup_mcg_table(mdev);
  842. mthca_cleanup_av_table(mdev);
  843. mthca_cleanup_qp_table(mdev);
  844. mthca_cleanup_cq_table(mdev);
  845. mthca_cmd_use_polling(mdev);
  846. mthca_cleanup_eq_table(mdev);
  847. mthca_pd_free(mdev, &mdev->driver_pd);
  848. mthca_cleanup_mr_table(mdev);
  849. mthca_cleanup_pd_table(mdev);
  850. mthca_cleanup_uar_table(mdev);
  851. err_close:
  852. mthca_close_hca(mdev);
  853. err_iounmap:
  854. iounmap(mdev->hcr);
  855. err_free_dev:
  856. if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
  857. pci_disable_msix(pdev);
  858. if (mdev->mthca_flags & MTHCA_FLAG_MSI)
  859. pci_disable_msi(pdev);
  860. ib_dealloc_device(&mdev->ib_dev);
  861. err_free_res:
  862. mthca_release_regions(pdev, ddr_hidden);
  863. err_disable_pdev:
  864. pci_disable_device(pdev);
  865. pci_set_drvdata(pdev, NULL);
  866. return err;
  867. }
  868. static void __devexit mthca_remove_one(struct pci_dev *pdev)
  869. {
  870. struct mthca_dev *mdev = pci_get_drvdata(pdev);
  871. u8 status;
  872. int p;
  873. if (mdev) {
  874. mthca_free_agents(mdev);
  875. mthca_unregister_device(mdev);
  876. for (p = 1; p <= mdev->limits.num_ports; ++p)
  877. mthca_CLOSE_IB(mdev, p, &status);
  878. mthca_cleanup_mcg_table(mdev);
  879. mthca_cleanup_av_table(mdev);
  880. mthca_cleanup_qp_table(mdev);
  881. mthca_cleanup_cq_table(mdev);
  882. mthca_cmd_use_polling(mdev);
  883. mthca_cleanup_eq_table(mdev);
  884. mthca_pd_free(mdev, &mdev->driver_pd);
  885. mthca_cleanup_mr_table(mdev);
  886. mthca_cleanup_pd_table(mdev);
  887. iounmap(mdev->kar);
  888. mthca_uar_free(mdev, &mdev->driver_uar);
  889. mthca_cleanup_uar_table(mdev);
  890. mthca_close_hca(mdev);
  891. iounmap(mdev->hcr);
  892. if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
  893. pci_disable_msix(pdev);
  894. if (mdev->mthca_flags & MTHCA_FLAG_MSI)
  895. pci_disable_msi(pdev);
  896. ib_dealloc_device(&mdev->ib_dev);
  897. mthca_release_regions(pdev, mdev->mthca_flags &
  898. MTHCA_FLAG_DDR_HIDDEN);
  899. pci_disable_device(pdev);
  900. pci_set_drvdata(pdev, NULL);
  901. }
  902. }
  903. static struct pci_device_id mthca_pci_table[] = {
  904. { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR),
  905. .driver_data = TAVOR },
  906. { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_TAVOR),
  907. .driver_data = TAVOR },
  908. { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT),
  909. .driver_data = ARBEL_COMPAT },
  910. { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT),
  911. .driver_data = ARBEL_COMPAT },
  912. { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL),
  913. .driver_data = ARBEL_NATIVE },
  914. { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL),
  915. .driver_data = ARBEL_NATIVE },
  916. { 0, }
  917. };
  918. MODULE_DEVICE_TABLE(pci, mthca_pci_table);
  919. static struct pci_driver mthca_driver = {
  920. .name = "ib_mthca",
  921. .id_table = mthca_pci_table,
  922. .probe = mthca_init_one,
  923. .remove = __devexit_p(mthca_remove_one)
  924. };
  925. static int __init mthca_init(void)
  926. {
  927. int ret;
  928. ret = pci_register_driver(&mthca_driver);
  929. return ret < 0 ? ret : 0;
  930. }
  931. static void __exit mthca_cleanup(void)
  932. {
  933. pci_unregister_driver(&mthca_driver);
  934. }
  935. module_init(mthca_init);
  936. module_exit(mthca_cleanup);