bfa_ioc_ct.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516
  1. /*
  2. * Linux network driver for Brocade Converged Network Adapter.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License (GPL) Version 2 as
  6. * published by the Free Software Foundation
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. /*
  14. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  15. * All rights reserved
  16. * www.brocade.com
  17. */
  18. #include "bfa_ioc.h"
  19. #include "cna.h"
  20. #include "bfi.h"
  21. #include "bfi_ctreg.h"
  22. #include "bfa_defs.h"
  23. #define bfa_ioc_ct_sync_pos(__ioc) \
  24. ((u32) (1 << bfa_ioc_pcifn(__ioc)))
  25. #define BFA_IOC_SYNC_REQD_SH 16
  26. #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
  27. #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
  28. #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
  29. #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
  30. (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
  31. /*
  32. * forward declarations
  33. */
  34. static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
  35. static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
  36. static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
  37. static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
  38. static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
  39. static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
  40. static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
  41. static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
  42. static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
  43. static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
  44. static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
  45. static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
  46. static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
  47. static struct bfa_ioc_hwif nw_hwif_ct;
  48. /**
  49. * Called from bfa_ioc_attach() to map asic specific calls.
  50. */
  51. void
  52. bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
  53. {
  54. nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
  55. nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
  56. nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
  57. nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
  58. nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
  59. nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
  60. nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
  61. nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
  62. nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
  63. nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
  64. nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
  65. nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
  66. nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
  67. ioc->ioc_hwif = &nw_hwif_ct;
  68. }
  69. /**
  70. * Return true if firmware of current driver matches the running firmware.
  71. */
  72. static bool
  73. bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
  74. {
  75. enum bfi_ioc_state ioc_fwstate;
  76. u32 usecnt;
  77. struct bfi_ioc_image_hdr fwhdr;
  78. /**
  79. * Firmware match check is relevant only for CNA.
  80. */
  81. if (!ioc->cna)
  82. return true;
  83. /**
  84. * If bios boot (flash based) -- do not increment usage count
  85. */
  86. if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
  87. BFA_IOC_FWIMG_MINSZ)
  88. return true;
  89. bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  90. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  91. /**
  92. * If usage count is 0, always return TRUE.
  93. */
  94. if (usecnt == 0) {
  95. writel(1, ioc->ioc_regs.ioc_usage_reg);
  96. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  97. writel(0, ioc->ioc_regs.ioc_fail_sync);
  98. return true;
  99. }
  100. ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  101. /**
  102. * Use count cannot be non-zero and chip in uninitialized state.
  103. */
  104. BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
  105. /**
  106. * Check if another driver with a different firmware is active
  107. */
  108. bfa_nw_ioc_fwver_get(ioc, &fwhdr);
  109. if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
  110. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  111. return false;
  112. }
  113. /**
  114. * Same firmware version. Increment the reference count.
  115. */
  116. usecnt++;
  117. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  118. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  119. return true;
  120. }
  121. static void
  122. bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
  123. {
  124. u32 usecnt;
  125. /**
  126. * Firmware lock is relevant only for CNA.
  127. */
  128. if (!ioc->cna)
  129. return;
  130. /**
  131. * If bios boot (flash based) -- do not decrement usage count
  132. */
  133. if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
  134. BFA_IOC_FWIMG_MINSZ)
  135. return;
  136. /**
  137. * decrement usage count
  138. */
  139. bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  140. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  141. BUG_ON(!(usecnt > 0));
  142. usecnt--;
  143. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  144. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  145. }
  146. /**
  147. * Notify other functions on HB failure.
  148. */
  149. static void
  150. bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
  151. {
  152. if (ioc->cna) {
  153. writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
  154. writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
  155. /* Wait for halt to take effect */
  156. readl(ioc->ioc_regs.ll_halt);
  157. readl(ioc->ioc_regs.alt_ll_halt);
  158. } else {
  159. writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
  160. readl(ioc->ioc_regs.err_set);
  161. }
  162. }
  163. /**
  164. * Host to LPU mailbox message addresses
  165. */
  166. static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
  167. { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
  168. { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
  169. { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
  170. { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
  171. };
  172. /**
  173. * Host <-> LPU mailbox command/status registers - port 0
  174. */
  175. static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
  176. { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
  177. { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
  178. { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
  179. { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
  180. };
  181. /**
  182. * Host <-> LPU mailbox command/status registers - port 1
  183. */
  184. static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
  185. { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
  186. { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
  187. { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
  188. { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
  189. };
  190. static void
  191. bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
  192. {
  193. void __iomem *rb;
  194. int pcifn = bfa_ioc_pcifn(ioc);
  195. rb = bfa_ioc_bar0(ioc);
  196. ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
  197. ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
  198. ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
  199. if (ioc->port_id == 0) {
  200. ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
  201. ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  202. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
  203. ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
  204. ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
  205. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
  206. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
  207. } else {
  208. ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
  209. ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
  210. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  211. ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
  212. ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
  213. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
  214. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
  215. }
  216. /*
  217. * PSS control registers
  218. */
  219. ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
  220. ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
  221. ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
  222. ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
  223. /*
  224. * IOC semaphore registers and serialization
  225. */
  226. ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
  227. ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
  228. ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
  229. ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
  230. ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
  231. /**
  232. * sram memory access
  233. */
  234. ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
  235. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
  236. /*
  237. * err set reg : for notification of hb failure in fcmode
  238. */
  239. ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
  240. }
  241. /**
  242. * Initialize IOC to port mapping.
  243. */
  244. #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
  245. static void
  246. bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
  247. {
  248. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  249. u32 r32;
  250. /**
  251. * For catapult, base port id on personality register and IOC type
  252. */
  253. r32 = readl(rb + FNC_PERS_REG);
  254. r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
  255. ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
  256. }
  257. /**
  258. * Set interrupt mode for a function: INTX or MSIX
  259. */
  260. static void
  261. bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
  262. {
  263. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  264. u32 r32, mode;
  265. r32 = readl(rb + FNC_PERS_REG);
  266. mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
  267. __F0_INTX_STATUS;
  268. /**
  269. * If already in desired mode, do not change anything
  270. */
  271. if (!msix && mode)
  272. return;
  273. if (msix)
  274. mode = __F0_INTX_STATUS_MSIX;
  275. else
  276. mode = __F0_INTX_STATUS_INTA;
  277. r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  278. r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  279. writel(r32, rb + FNC_PERS_REG);
  280. }
  281. /**
  282. * Cleanup hw semaphore and usecnt registers
  283. */
  284. static void
  285. bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
  286. {
  287. if (ioc->cna) {
  288. bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  289. writel(0, ioc->ioc_regs.ioc_usage_reg);
  290. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  291. }
  292. /*
  293. * Read the hw sem reg to make sure that it is locked
  294. * before we clear it. If it is not locked, writing 1
  295. * will lock it instead of clearing it.
  296. */
  297. readl(ioc->ioc_regs.ioc_sem_reg);
  298. bfa_nw_ioc_hw_sem_release(ioc);
  299. }
  300. /**
  301. * Synchronized IOC failure processing routines
  302. */
  303. static bool
  304. bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
  305. {
  306. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  307. u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
  308. /*
  309. * Driver load time. If the sync required bit for this PCI fn
  310. * is set, it is due to an unclean exit by the driver for this
  311. * PCI fn in the previous incarnation. Whoever comes here first
  312. * should clean it up, no matter which PCI fn.
  313. */
  314. if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
  315. writel(0, ioc->ioc_regs.ioc_fail_sync);
  316. writel(1, ioc->ioc_regs.ioc_usage_reg);
  317. writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
  318. writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
  319. return true;
  320. }
  321. return bfa_ioc_ct_sync_complete(ioc);
  322. }
  323. /**
  324. * Synchronized IOC failure processing routines
  325. */
  326. static void
  327. bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
  328. {
  329. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  330. u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
  331. writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
  332. }
  333. static void
  334. bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
  335. {
  336. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  337. u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
  338. bfa_ioc_ct_sync_pos(ioc);
  339. writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
  340. }
  341. static void
  342. bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
  343. {
  344. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  345. writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
  346. }
  347. static bool
  348. bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
  349. {
  350. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  351. u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
  352. u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
  353. u32 tmp_ackd;
  354. if (sync_ackd == 0)
  355. return true;
  356. /**
  357. * The check below is to see whether any other PCI fn
  358. * has reinitialized the ASIC (reset sync_ackd bits)
  359. * and failed again while this IOC was waiting for hw
  360. * semaphore (in bfa_iocpf_sm_semwait()).
  361. */
  362. tmp_ackd = sync_ackd;
  363. if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
  364. !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
  365. sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
  366. if (sync_reqd == sync_ackd) {
  367. writel(bfa_ioc_ct_clear_sync_ackd(r32),
  368. ioc->ioc_regs.ioc_fail_sync);
  369. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  370. writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
  371. return true;
  372. }
  373. /**
  374. * If another PCI fn reinitialized and failed again while
  375. * this IOC was waiting for hw sem, the sync_ackd bit for
  376. * this IOC need to be set again to allow reinitialization.
  377. */
  378. if (tmp_ackd != sync_ackd)
  379. writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
  380. return false;
  381. }
  382. static enum bfa_status
  383. bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode)
  384. {
  385. u32 pll_sclk, pll_fclk, r32;
  386. pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
  387. __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
  388. __APP_PLL_312_JITLMT0_1(3U) |
  389. __APP_PLL_312_CNTLMT0_1(1U);
  390. pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
  391. __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
  392. __APP_PLL_425_JITLMT0_1(3U) |
  393. __APP_PLL_425_CNTLMT0_1(1U);
  394. if (fcmode) {
  395. writel(0, (rb + OP_MODE));
  396. writel(__APP_EMS_CMLCKSEL |
  397. __APP_EMS_REFCKBUFEN2 |
  398. __APP_EMS_CHANNEL_SEL,
  399. (rb + ETH_MAC_SER_REG));
  400. } else {
  401. writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
  402. writel(__APP_EMS_REFCKBUFEN1,
  403. (rb + ETH_MAC_SER_REG));
  404. }
  405. writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
  406. writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
  407. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  408. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  409. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  410. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  411. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  412. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  413. writel(pll_sclk |
  414. __APP_PLL_312_LOGIC_SOFT_RESET,
  415. rb + APP_PLL_312_CTL_REG);
  416. writel(pll_fclk |
  417. __APP_PLL_425_LOGIC_SOFT_RESET,
  418. rb + APP_PLL_425_CTL_REG);
  419. writel(pll_sclk |
  420. __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
  421. rb + APP_PLL_312_CTL_REG);
  422. writel(pll_fclk |
  423. __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
  424. rb + APP_PLL_425_CTL_REG);
  425. readl(rb + HOSTFN0_INT_MSK);
  426. udelay(2000);
  427. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  428. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  429. writel(pll_sclk |
  430. __APP_PLL_312_ENABLE,
  431. rb + APP_PLL_312_CTL_REG);
  432. writel(pll_fclk |
  433. __APP_PLL_425_ENABLE,
  434. rb + APP_PLL_425_CTL_REG);
  435. if (!fcmode) {
  436. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
  437. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
  438. }
  439. r32 = readl((rb + PSS_CTL_REG));
  440. r32 &= ~__PSS_LMEM_RESET;
  441. writel(r32, (rb + PSS_CTL_REG));
  442. udelay(1000);
  443. if (!fcmode) {
  444. writel(0, (rb + PMM_1T_RESET_REG_P0));
  445. writel(0, (rb + PMM_1T_RESET_REG_P1));
  446. }
  447. writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
  448. udelay(1000);
  449. r32 = readl((rb + MBIST_STAT_REG));
  450. writel(0, (rb + MBIST_CTL_REG));
  451. return BFA_STATUS_OK;
  452. }