bfa_ioc_ct.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include "bfad_drv.h"
  18. #include "bfa_ioc.h"
  19. #include "bfi_ctreg.h"
  20. #include "bfa_defs.h"
  21. BFA_TRC_FILE(CNA, IOC_CT);
  22. #define bfa_ioc_ct_sync_pos(__ioc) \
  23. ((uint32_t) (1 << bfa_ioc_pcifn(__ioc)))
  24. #define BFA_IOC_SYNC_REQD_SH 16
  25. #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
  26. #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
  27. #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
  28. #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
  29. (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
  30. /*
  31. * forward declarations
  32. */
  33. static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
  34. static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
  35. static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
  36. static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
  37. static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
  38. static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
  39. static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
  40. static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
  41. static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
  42. static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
  43. static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
  44. static struct bfa_ioc_hwif_s hwif_ct;
  45. /*
  46. * Called from bfa_ioc_attach() to map asic specific calls.
  47. */
  48. void
  49. bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
  50. {
  51. hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
  52. hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
  53. hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
  54. hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
  55. hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
  56. hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
  57. hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
  58. hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
  59. hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
  60. hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
  61. hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
  62. hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
  63. ioc->ioc_hwif = &hwif_ct;
  64. }
  65. /*
  66. * Return true if firmware of current driver matches the running firmware.
  67. */
  68. static bfa_boolean_t
  69. bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
  70. {
  71. enum bfi_ioc_state ioc_fwstate;
  72. u32 usecnt;
  73. struct bfi_ioc_image_hdr_s fwhdr;
  74. /*
  75. * Firmware match check is relevant only for CNA.
  76. */
  77. if (!ioc->cna)
  78. return BFA_TRUE;
  79. /*
  80. * If bios boot (flash based) -- do not increment usage count
  81. */
  82. if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
  83. BFA_IOC_FWIMG_MINSZ)
  84. return BFA_TRUE;
  85. bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  86. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  87. /*
  88. * If usage count is 0, always return TRUE.
  89. */
  90. if (usecnt == 0) {
  91. writel(1, ioc->ioc_regs.ioc_usage_reg);
  92. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  93. writel(0, ioc->ioc_regs.ioc_fail_sync);
  94. bfa_trc(ioc, usecnt);
  95. return BFA_TRUE;
  96. }
  97. ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  98. bfa_trc(ioc, ioc_fwstate);
  99. /*
  100. * Use count cannot be non-zero and chip in uninitialized state.
  101. */
  102. WARN_ON(ioc_fwstate == BFI_IOC_UNINIT);
  103. /*
  104. * Check if another driver with a different firmware is active
  105. */
  106. bfa_ioc_fwver_get(ioc, &fwhdr);
  107. if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
  108. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  109. bfa_trc(ioc, usecnt);
  110. return BFA_FALSE;
  111. }
  112. /*
  113. * Same firmware version. Increment the reference count.
  114. */
  115. usecnt++;
  116. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  117. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  118. bfa_trc(ioc, usecnt);
  119. return BFA_TRUE;
  120. }
  121. static void
  122. bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
  123. {
  124. u32 usecnt;
  125. /*
  126. * Firmware lock is relevant only for CNA.
  127. */
  128. if (!ioc->cna)
  129. return;
  130. /*
  131. * If bios boot (flash based) -- do not decrement usage count
  132. */
  133. if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
  134. BFA_IOC_FWIMG_MINSZ)
  135. return;
  136. /*
  137. * decrement usage count
  138. */
  139. bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  140. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  141. WARN_ON(usecnt <= 0);
  142. usecnt--;
  143. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  144. bfa_trc(ioc, usecnt);
  145. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  146. }
  147. /*
  148. * Notify other functions on HB failure.
  149. */
  150. static void
  151. bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
  152. {
  153. if (ioc->cna) {
  154. writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
  155. writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
  156. /* Wait for halt to take effect */
  157. readl(ioc->ioc_regs.ll_halt);
  158. readl(ioc->ioc_regs.alt_ll_halt);
  159. } else {
  160. writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
  161. readl(ioc->ioc_regs.err_set);
  162. }
  163. }
  164. /*
  165. * Host to LPU mailbox message addresses
  166. */
  167. static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
  168. { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
  169. { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
  170. { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
  171. { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
  172. };
  173. /*
  174. * Host <-> LPU mailbox command/status registers - port 0
  175. */
  176. static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
  177. { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
  178. { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
  179. { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
  180. { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
  181. };
  182. /*
  183. * Host <-> LPU mailbox command/status registers - port 1
  184. */
  185. static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
  186. { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
  187. { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
  188. { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
  189. { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
  190. };
  191. static void
  192. bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
  193. {
  194. void __iomem *rb;
  195. int pcifn = bfa_ioc_pcifn(ioc);
  196. rb = bfa_ioc_bar0(ioc);
  197. ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
  198. ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
  199. ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
  200. if (ioc->port_id == 0) {
  201. ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
  202. ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  203. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
  204. ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
  205. ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
  206. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
  207. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
  208. } else {
  209. ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
  210. ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
  211. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  212. ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
  213. ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
  214. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
  215. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
  216. }
  217. /*
  218. * PSS control registers
  219. */
  220. ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
  221. ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
  222. ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
  223. ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
  224. /*
  225. * IOC semaphore registers and serialization
  226. */
  227. ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
  228. ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
  229. ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
  230. ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
  231. ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
  232. /*
  233. * sram memory access
  234. */
  235. ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
  236. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
  237. /*
  238. * err set reg : for notification of hb failure in fcmode
  239. */
  240. ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
  241. }
  242. /*
  243. * Initialize IOC to port mapping.
  244. */
  245. #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
  246. static void
  247. bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
  248. {
  249. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  250. u32 r32;
  251. /*
  252. * For catapult, base port id on personality register and IOC type
  253. */
  254. r32 = readl(rb + FNC_PERS_REG);
  255. r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
  256. ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
  257. bfa_trc(ioc, bfa_ioc_pcifn(ioc));
  258. bfa_trc(ioc, ioc->port_id);
  259. }
  260. /*
  261. * Set interrupt mode for a function: INTX or MSIX
  262. */
  263. static void
  264. bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
  265. {
  266. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  267. u32 r32, mode;
  268. r32 = readl(rb + FNC_PERS_REG);
  269. bfa_trc(ioc, r32);
  270. mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
  271. __F0_INTX_STATUS;
  272. /*
  273. * If already in desired mode, do not change anything
  274. */
  275. if (!msix && mode)
  276. return;
  277. if (msix)
  278. mode = __F0_INTX_STATUS_MSIX;
  279. else
  280. mode = __F0_INTX_STATUS_INTA;
  281. r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  282. r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  283. bfa_trc(ioc, r32);
  284. writel(r32, rb + FNC_PERS_REG);
  285. }
  286. /*
  287. * Cleanup hw semaphore and usecnt registers
  288. */
  289. static void
  290. bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
  291. {
  292. if (ioc->cna) {
  293. bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  294. writel(0, ioc->ioc_regs.ioc_usage_reg);
  295. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  296. }
  297. /*
  298. * Read the hw sem reg to make sure that it is locked
  299. * before we clear it. If it is not locked, writing 1
  300. * will lock it instead of clearing it.
  301. */
  302. readl(ioc->ioc_regs.ioc_sem_reg);
  303. writel(1, ioc->ioc_regs.ioc_sem_reg);
  304. }
  305. /*
  306. * Synchronized IOC failure processing routines
  307. */
  308. static void
  309. bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc)
  310. {
  311. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  312. uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
  313. writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
  314. }
  315. static void
  316. bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc)
  317. {
  318. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  319. uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
  320. bfa_ioc_ct_sync_pos(ioc);
  321. writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
  322. }
  323. static void
  324. bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc)
  325. {
  326. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  327. writel((r32 | bfa_ioc_ct_sync_pos(ioc)),
  328. ioc->ioc_regs.ioc_fail_sync);
  329. }
  330. static bfa_boolean_t
  331. bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
  332. {
  333. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  334. uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
  335. uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
  336. uint32_t tmp_ackd;
  337. if (sync_ackd == 0)
  338. return BFA_TRUE;
  339. /*
  340. * The check below is to see whether any other PCI fn
  341. * has reinitialized the ASIC (reset sync_ackd bits)
  342. * and failed again while this IOC was waiting for hw
  343. * semaphore (in bfa_iocpf_sm_semwait()).
  344. */
  345. tmp_ackd = sync_ackd;
  346. if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
  347. !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
  348. sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
  349. if (sync_reqd == sync_ackd) {
  350. writel(bfa_ioc_ct_clear_sync_ackd(r32),
  351. ioc->ioc_regs.ioc_fail_sync);
  352. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  353. writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
  354. return BFA_TRUE;
  355. }
  356. /*
  357. * If another PCI fn reinitialized and failed again while
  358. * this IOC was waiting for hw sem, the sync_ackd bit for
  359. * this IOC need to be set again to allow reinitialization.
  360. */
  361. if (tmp_ackd != sync_ackd)
  362. writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
  363. return BFA_FALSE;
  364. }
  365. /*
  366. * Check the firmware state to know if pll_init has been completed already
  367. */
  368. bfa_boolean_t
  369. bfa_ioc_ct_pll_init_complete(void __iomem *rb)
  370. {
  371. if ((readl(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) ||
  372. (readl(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP))
  373. return BFA_TRUE;
  374. return BFA_FALSE;
  375. }
  376. bfa_status_t
  377. bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
  378. {
  379. u32 pll_sclk, pll_fclk, r32;
  380. pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
  381. __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
  382. __APP_PLL_312_JITLMT0_1(3U) |
  383. __APP_PLL_312_CNTLMT0_1(1U);
  384. pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
  385. __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
  386. __APP_PLL_425_JITLMT0_1(3U) |
  387. __APP_PLL_425_CNTLMT0_1(1U);
  388. if (fcmode) {
  389. writel(0, (rb + OP_MODE));
  390. writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
  391. __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
  392. } else {
  393. writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
  394. writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
  395. }
  396. writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
  397. writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
  398. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  399. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  400. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  401. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  402. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  403. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  404. writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET,
  405. rb + APP_PLL_312_CTL_REG);
  406. writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET,
  407. rb + APP_PLL_425_CTL_REG);
  408. writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
  409. rb + APP_PLL_312_CTL_REG);
  410. writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
  411. rb + APP_PLL_425_CTL_REG);
  412. readl(rb + HOSTFN0_INT_MSK);
  413. udelay(2000);
  414. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  415. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  416. writel(pll_sclk | __APP_PLL_312_ENABLE, rb + APP_PLL_312_CTL_REG);
  417. writel(pll_fclk | __APP_PLL_425_ENABLE, rb + APP_PLL_425_CTL_REG);
  418. if (!fcmode) {
  419. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
  420. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
  421. }
  422. r32 = readl((rb + PSS_CTL_REG));
  423. r32 &= ~__PSS_LMEM_RESET;
  424. writel(r32, (rb + PSS_CTL_REG));
  425. udelay(1000);
  426. if (!fcmode) {
  427. writel(0, (rb + PMM_1T_RESET_REG_P0));
  428. writel(0, (rb + PMM_1T_RESET_REG_P1));
  429. }
  430. writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
  431. udelay(1000);
  432. r32 = readl((rb + MBIST_STAT_REG));
  433. writel(0, (rb + MBIST_CTL_REG));
  434. return BFA_STATUS_OK;
  435. }