bfa_ioc_ct.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436
  1. /*
  2. * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include <bfa.h>
  18. #include <bfa_ioc.h>
  19. #include <bfa_fwimg_priv.h>
  20. #include <cna/bfa_cna_trcmod.h>
  21. #include <cs/bfa_debug.h>
  22. #include <bfi/bfi_ioc.h>
  23. #include <bfi/bfi_ctreg.h>
  24. #include <log/bfa_log_hal.h>
  25. #include <defs/bfa_defs_pci.h>
  26. BFA_TRC_FILE(CNA, IOC_CT);
  27. /*
  28. * forward declarations
  29. */
  30. static bfa_status_t bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc);
  31. static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
  32. static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
  33. static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
  34. static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
  35. static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
  36. static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc);
  37. static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
  38. struct bfa_ioc_hwif_s hwif_ct;
  39. /**
  40. * Called from bfa_ioc_attach() to map asic specific calls.
  41. */
  42. void
  43. bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
  44. {
  45. hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
  46. hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
  47. hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
  48. hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
  49. hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
  50. hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
  51. hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
  52. hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
  53. ioc->ioc_hwif = &hwif_ct;
  54. }
  55. /**
  56. * Return true if firmware of current driver matches the running firmware.
  57. */
  58. static bfa_boolean_t
  59. bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
  60. {
  61. enum bfi_ioc_state ioc_fwstate;
  62. u32 usecnt;
  63. struct bfi_ioc_image_hdr_s fwhdr;
  64. /**
  65. * Firmware match check is relevant only for CNA.
  66. */
  67. if (!ioc->cna)
  68. return BFA_TRUE;
  69. /**
  70. * If bios boot (flash based) -- do not increment usage count
  71. */
  72. if (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < BFA_IOC_FWIMG_MINSZ)
  73. return BFA_TRUE;
  74. bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  75. usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
  76. /**
  77. * If usage count is 0, always return TRUE.
  78. */
  79. if (usecnt == 0) {
  80. bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1);
  81. bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  82. bfa_trc(ioc, usecnt);
  83. return BFA_TRUE;
  84. }
  85. ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
  86. bfa_trc(ioc, ioc_fwstate);
  87. /**
  88. * Use count cannot be non-zero and chip in uninitialized state.
  89. */
  90. bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
  91. /**
  92. * Check if another driver with a different firmware is active
  93. */
  94. bfa_ioc_fwver_get(ioc, &fwhdr);
  95. if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
  96. bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  97. bfa_trc(ioc, usecnt);
  98. return BFA_FALSE;
  99. }
  100. /**
  101. * Same firmware version. Increment the reference count.
  102. */
  103. usecnt++;
  104. bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
  105. bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  106. bfa_trc(ioc, usecnt);
  107. return BFA_TRUE;
  108. }
  109. static void
  110. bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
  111. {
  112. u32 usecnt;
  113. /**
  114. * Firmware lock is relevant only for CNA.
  115. */
  116. if (!ioc->cna)
  117. return;
  118. /**
  119. * If bios boot (flash based) -- do not decrement usage count
  120. */
  121. if (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < BFA_IOC_FWIMG_MINSZ)
  122. return;
  123. /**
  124. * decrement usage count
  125. */
  126. bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  127. usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
  128. bfa_assert(usecnt > 0);
  129. usecnt--;
  130. bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
  131. bfa_trc(ioc, usecnt);
  132. bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  133. }
  134. /**
  135. * Notify other functions on HB failure.
  136. */
  137. static void
  138. bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc)
  139. {
  140. if (ioc->cna) {
  141. bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P);
  142. /* Wait for halt to take effect */
  143. bfa_reg_read(ioc->ioc_regs.ll_halt);
  144. } else {
  145. bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET);
  146. bfa_reg_read(ioc->ioc_regs.err_set);
  147. }
  148. }
  149. /**
  150. * Host to LPU mailbox message addresses
  151. */
  152. static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
  153. { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
  154. { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
  155. { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
  156. { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
  157. };
  158. /**
  159. * Host <-> LPU mailbox command/status registers - port 0
  160. */
  161. static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
  162. { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
  163. { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
  164. { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
  165. { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
  166. };
  167. /**
  168. * Host <-> LPU mailbox command/status registers - port 1
  169. */
  170. static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
  171. { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
  172. { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
  173. { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
  174. { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
  175. };
  176. static void
  177. bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
  178. {
  179. bfa_os_addr_t rb;
  180. int pcifn = bfa_ioc_pcifn(ioc);
  181. rb = bfa_ioc_bar0(ioc);
  182. ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
  183. ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
  184. ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
  185. if (ioc->port_id == 0) {
  186. ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
  187. ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  188. ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
  189. ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
  190. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
  191. } else {
  192. ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
  193. ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
  194. ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
  195. ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
  196. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
  197. }
  198. /*
  199. * PSS control registers
  200. */
  201. ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
  202. ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
  203. ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
  204. ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
  205. /*
  206. * IOC semaphore registers and serialization
  207. */
  208. ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
  209. ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
  210. ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
  211. ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
  212. /**
  213. * sram memory access
  214. */
  215. ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
  216. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
  217. /*
  218. * err set reg : for notification of hb failure in fcmode
  219. */
  220. ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
  221. }
  222. /**
  223. * Initialize IOC to port mapping.
  224. */
  225. #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
  226. static void
  227. bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
  228. {
  229. bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
  230. u32 r32;
  231. /**
  232. * For catapult, base port id on personality register and IOC type
  233. */
  234. r32 = bfa_reg_read(rb + FNC_PERS_REG);
  235. r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
  236. ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
  237. bfa_trc(ioc, bfa_ioc_pcifn(ioc));
  238. bfa_trc(ioc, ioc->port_id);
  239. }
  240. /**
  241. * Set interrupt mode for a function: INTX or MSIX
  242. */
  243. static void
  244. bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
  245. {
  246. bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
  247. u32 r32, mode;
  248. r32 = bfa_reg_read(rb + FNC_PERS_REG);
  249. bfa_trc(ioc, r32);
  250. mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
  251. __F0_INTX_STATUS;
  252. /**
  253. * If already in desired mode, do not change anything
  254. */
  255. if (!msix && mode)
  256. return;
  257. if (msix)
  258. mode = __F0_INTX_STATUS_MSIX;
  259. else
  260. mode = __F0_INTX_STATUS_INTA;
  261. r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  262. r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  263. bfa_trc(ioc, r32);
  264. bfa_reg_write(rb + FNC_PERS_REG, r32);
  265. }
  266. static bfa_status_t
  267. bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc)
  268. {
  269. bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
  270. u32 pll_sclk, pll_fclk, r32;
  271. /*
  272. * Hold semaphore so that nobody can access the chip during init.
  273. */
  274. bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
  275. pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
  276. __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
  277. __APP_PLL_312_JITLMT0_1(3U) |
  278. __APP_PLL_312_CNTLMT0_1(1U);
  279. pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
  280. __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
  281. __APP_PLL_425_JITLMT0_1(3U) |
  282. __APP_PLL_425_CNTLMT0_1(1U);
  283. /**
  284. * For catapult, choose operational mode FC/FCoE
  285. */
  286. if (ioc->fcmode) {
  287. bfa_reg_write((rb + OP_MODE), 0);
  288. bfa_reg_write((rb + ETH_MAC_SER_REG),
  289. __APP_EMS_CMLCKSEL |
  290. __APP_EMS_REFCKBUFEN2 |
  291. __APP_EMS_CHANNEL_SEL);
  292. } else {
  293. ioc->pllinit = BFA_TRUE;
  294. bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
  295. bfa_reg_write((rb + ETH_MAC_SER_REG),
  296. __APP_EMS_REFCKBUFEN1);
  297. }
  298. bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
  299. bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
  300. bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
  301. bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
  302. bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
  303. bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
  304. bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
  305. bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
  306. bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
  307. __APP_PLL_312_LOGIC_SOFT_RESET);
  308. bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
  309. __APP_PLL_425_LOGIC_SOFT_RESET);
  310. bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
  311. __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE);
  312. bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
  313. __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE);
  314. /**
  315. * Wait for PLLs to lock.
  316. */
  317. bfa_reg_read(rb + HOSTFN0_INT_MSK);
  318. bfa_os_udelay(2000);
  319. bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
  320. bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
  321. bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
  322. __APP_PLL_312_ENABLE);
  323. bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
  324. __APP_PLL_425_ENABLE);
  325. /**
  326. * PSS memory reset is asserted at power-on-reset. Need to clear
  327. * this before running EDRAM BISTR
  328. */
  329. if (ioc->cna) {
  330. bfa_reg_write((rb + PMM_1T_RESET_REG_P0), __PMM_1T_RESET_P);
  331. bfa_reg_write((rb + PMM_1T_RESET_REG_P1), __PMM_1T_RESET_P);
  332. }
  333. r32 = bfa_reg_read((rb + PSS_CTL_REG));
  334. r32 &= ~__PSS_LMEM_RESET;
  335. bfa_reg_write((rb + PSS_CTL_REG), r32);
  336. bfa_os_udelay(1000);
  337. if (ioc->cna) {
  338. bfa_reg_write((rb + PMM_1T_RESET_REG_P0), 0);
  339. bfa_reg_write((rb + PMM_1T_RESET_REG_P1), 0);
  340. }
  341. bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
  342. bfa_os_udelay(1000);
  343. r32 = bfa_reg_read((rb + MBIST_STAT_REG));
  344. bfa_trc(ioc, r32);
  345. /**
  346. * Clear BISTR
  347. */
  348. bfa_reg_write((rb + MBIST_CTL_REG), 0);
  349. /*
  350. * release semaphore.
  351. */
  352. bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
  353. return BFA_STATUS_OK;
  354. }
  355. /**
  356. * Cleanup hw semaphore and usecnt registers
  357. */
  358. static void
  359. bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
  360. {
  361. if (ioc->cna) {
  362. bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  363. bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0);
  364. bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  365. }
  366. /*
  367. * Read the hw sem reg to make sure that it is locked
  368. * before we clear it. If it is not locked, writing 1
  369. * will lock it instead of clearing it.
  370. */
  371. bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
  372. bfa_ioc_hw_sem_release(ioc);
  373. }