bfa_ioc_ct.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501
  1. /*
  2. * Linux network driver for Brocade Converged Network Adapter.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License (GPL) Version 2 as
  6. * published by the Free Software Foundation
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. /*
  14. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  15. * All rights reserved
  16. * www.brocade.com
  17. */
  18. #include "bfa_ioc.h"
  19. #include "cna.h"
  20. #include "bfi.h"
  21. #include "bfi_reg.h"
  22. #include "bfa_defs.h"
  23. #define bfa_ioc_ct_sync_pos(__ioc) \
  24. ((u32) (1 << bfa_ioc_pcifn(__ioc)))
  25. #define BFA_IOC_SYNC_REQD_SH 16
  26. #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
  27. #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
  28. #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
  29. #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
  30. (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
  31. /*
  32. * forward declarations
  33. */
  34. static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
  35. static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
  36. static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
  37. static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
  38. static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
  39. static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
  40. static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
  41. static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
  42. static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
  43. static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
  44. static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
  45. static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
  46. static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
  47. enum bfi_asic_mode asic_mode);
  48. static const struct bfa_ioc_hwif nw_hwif_ct = {
  49. .ioc_pll_init = bfa_ioc_ct_pll_init,
  50. .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
  51. .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
  52. .ioc_reg_init = bfa_ioc_ct_reg_init,
  53. .ioc_map_port = bfa_ioc_ct_map_port,
  54. .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
  55. .ioc_notify_fail = bfa_ioc_ct_notify_fail,
  56. .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
  57. .ioc_sync_start = bfa_ioc_ct_sync_start,
  58. .ioc_sync_join = bfa_ioc_ct_sync_join,
  59. .ioc_sync_leave = bfa_ioc_ct_sync_leave,
  60. .ioc_sync_ack = bfa_ioc_ct_sync_ack,
  61. .ioc_sync_complete = bfa_ioc_ct_sync_complete,
  62. };
  63. /**
  64. * Called from bfa_ioc_attach() to map asic specific calls.
  65. */
  66. void
  67. bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
  68. {
  69. ioc->ioc_hwif = &nw_hwif_ct;
  70. }
  71. /**
  72. * Return true if firmware of current driver matches the running firmware.
  73. */
  74. static bool
  75. bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
  76. {
  77. enum bfi_ioc_state ioc_fwstate;
  78. u32 usecnt;
  79. struct bfi_ioc_image_hdr fwhdr;
  80. /**
  81. * If bios boot (flash based) -- do not increment usage count
  82. */
  83. if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
  84. BFA_IOC_FWIMG_MINSZ)
  85. return true;
  86. bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  87. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  88. /**
  89. * If usage count is 0, always return TRUE.
  90. */
  91. if (usecnt == 0) {
  92. writel(1, ioc->ioc_regs.ioc_usage_reg);
  93. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  94. writel(0, ioc->ioc_regs.ioc_fail_sync);
  95. return true;
  96. }
  97. ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  98. /**
  99. * Use count cannot be non-zero and chip in uninitialized state.
  100. */
  101. BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
  102. /**
  103. * Check if another driver with a different firmware is active
  104. */
  105. bfa_nw_ioc_fwver_get(ioc, &fwhdr);
  106. if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
  107. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  108. return false;
  109. }
  110. /**
  111. * Same firmware version. Increment the reference count.
  112. */
  113. usecnt++;
  114. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  115. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  116. return true;
  117. }
  118. static void
  119. bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
  120. {
  121. u32 usecnt;
  122. /**
  123. * If bios boot (flash based) -- do not decrement usage count
  124. */
  125. if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
  126. BFA_IOC_FWIMG_MINSZ)
  127. return;
  128. /**
  129. * decrement usage count
  130. */
  131. bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  132. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  133. BUG_ON(!(usecnt > 0));
  134. usecnt--;
  135. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  136. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  137. }
  138. /**
  139. * Notify other functions on HB failure.
  140. */
  141. static void
  142. bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
  143. {
  144. writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
  145. writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
  146. /* Wait for halt to take effect */
  147. readl(ioc->ioc_regs.ll_halt);
  148. readl(ioc->ioc_regs.alt_ll_halt);
  149. }
  150. /**
  151. * Host to LPU mailbox message addresses
  152. */
  153. static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = {
  154. { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
  155. { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
  156. { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
  157. { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
  158. };
  159. /**
  160. * Host <-> LPU mailbox command/status registers - port 0
  161. */
  162. static struct { u32 hfn, lpu; } ct_p0reg[] = {
  163. { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
  164. { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
  165. { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
  166. { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
  167. };
  168. /**
  169. * Host <-> LPU mailbox command/status registers - port 1
  170. */
  171. static struct { u32 hfn, lpu; } ct_p1reg[] = {
  172. { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
  173. { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
  174. { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
  175. { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
  176. };
  177. static void
  178. bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
  179. {
  180. void __iomem *rb;
  181. int pcifn = bfa_ioc_pcifn(ioc);
  182. rb = bfa_ioc_bar0(ioc);
  183. ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
  184. ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
  185. ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
  186. if (ioc->port_id == 0) {
  187. ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
  188. ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  189. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
  190. ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
  191. ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
  192. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
  193. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
  194. } else {
  195. ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
  196. ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
  197. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  198. ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
  199. ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
  200. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
  201. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
  202. }
  203. /*
  204. * PSS control registers
  205. */
  206. ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
  207. ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
  208. ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
  209. ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
  210. /*
  211. * IOC semaphore registers and serialization
  212. */
  213. ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
  214. ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
  215. ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
  216. ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
  217. ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
  218. /**
  219. * sram memory access
  220. */
  221. ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
  222. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
  223. /*
  224. * err set reg : for notification of hb failure in fcmode
  225. */
  226. ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
  227. }
  228. /**
  229. * Initialize IOC to port mapping.
  230. */
  231. #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
  232. static void
  233. bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
  234. {
  235. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  236. u32 r32;
  237. /**
  238. * For catapult, base port id on personality register and IOC type
  239. */
  240. r32 = readl(rb + FNC_PERS_REG);
  241. r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
  242. ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
  243. }
  244. /**
  245. * Set interrupt mode for a function: INTX or MSIX
  246. */
  247. static void
  248. bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
  249. {
  250. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  251. u32 r32, mode;
  252. r32 = readl(rb + FNC_PERS_REG);
  253. mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
  254. __F0_INTX_STATUS;
  255. /**
  256. * If already in desired mode, do not change anything
  257. */
  258. if ((!msix && mode) || (msix && !mode))
  259. return;
  260. if (msix)
  261. mode = __F0_INTX_STATUS_MSIX;
  262. else
  263. mode = __F0_INTX_STATUS_INTA;
  264. r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  265. r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  266. writel(r32, rb + FNC_PERS_REG);
  267. }
  268. /**
  269. * Cleanup hw semaphore and usecnt registers
  270. */
  271. static void
  272. bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
  273. {
  274. bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  275. writel(0, ioc->ioc_regs.ioc_usage_reg);
  276. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  277. /*
  278. * Read the hw sem reg to make sure that it is locked
  279. * before we clear it. If it is not locked, writing 1
  280. * will lock it instead of clearing it.
  281. */
  282. readl(ioc->ioc_regs.ioc_sem_reg);
  283. bfa_nw_ioc_hw_sem_release(ioc);
  284. }
  285. /**
  286. * Synchronized IOC failure processing routines
  287. */
  288. static bool
  289. bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
  290. {
  291. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  292. u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
  293. /*
  294. * Driver load time. If the sync required bit for this PCI fn
  295. * is set, it is due to an unclean exit by the driver for this
  296. * PCI fn in the previous incarnation. Whoever comes here first
  297. * should clean it up, no matter which PCI fn.
  298. */
  299. if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
  300. writel(0, ioc->ioc_regs.ioc_fail_sync);
  301. writel(1, ioc->ioc_regs.ioc_usage_reg);
  302. writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
  303. writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
  304. return true;
  305. }
  306. return bfa_ioc_ct_sync_complete(ioc);
  307. }
  308. /**
  309. * Synchronized IOC failure processing routines
  310. */
  311. static void
  312. bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
  313. {
  314. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  315. u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
  316. writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
  317. }
  318. static void
  319. bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
  320. {
  321. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  322. u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
  323. bfa_ioc_ct_sync_pos(ioc);
  324. writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
  325. }
  326. static void
  327. bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
  328. {
  329. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  330. writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
  331. }
  332. static bool
  333. bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
  334. {
  335. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  336. u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
  337. u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
  338. u32 tmp_ackd;
  339. if (sync_ackd == 0)
  340. return true;
  341. /**
  342. * The check below is to see whether any other PCI fn
  343. * has reinitialized the ASIC (reset sync_ackd bits)
  344. * and failed again while this IOC was waiting for hw
  345. * semaphore (in bfa_iocpf_sm_semwait()).
  346. */
  347. tmp_ackd = sync_ackd;
  348. if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
  349. !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
  350. sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
  351. if (sync_reqd == sync_ackd) {
  352. writel(bfa_ioc_ct_clear_sync_ackd(r32),
  353. ioc->ioc_regs.ioc_fail_sync);
  354. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  355. writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
  356. return true;
  357. }
  358. /**
  359. * If another PCI fn reinitialized and failed again while
  360. * this IOC was waiting for hw sem, the sync_ackd bit for
  361. * this IOC need to be set again to allow reinitialization.
  362. */
  363. if (tmp_ackd != sync_ackd)
  364. writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
  365. return false;
  366. }
  367. static enum bfa_status
  368. bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
  369. {
  370. u32 pll_sclk, pll_fclk, r32;
  371. bool fcmode = (asic_mode == BFI_ASIC_MODE_FC);
  372. pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
  373. __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
  374. __APP_PLL_SCLK_JITLMT0_1(3U) |
  375. __APP_PLL_SCLK_CNTLMT0_1(1U);
  376. pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
  377. __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
  378. __APP_PLL_LCLK_JITLMT0_1(3U) |
  379. __APP_PLL_LCLK_CNTLMT0_1(1U);
  380. if (fcmode) {
  381. writel(0, (rb + OP_MODE));
  382. writel(__APP_EMS_CMLCKSEL |
  383. __APP_EMS_REFCKBUFEN2 |
  384. __APP_EMS_CHANNEL_SEL,
  385. (rb + ETH_MAC_SER_REG));
  386. } else {
  387. writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
  388. writel(__APP_EMS_REFCKBUFEN1,
  389. (rb + ETH_MAC_SER_REG));
  390. }
  391. writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
  392. writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
  393. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  394. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  395. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  396. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  397. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  398. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  399. writel(pll_sclk |
  400. __APP_PLL_SCLK_LOGIC_SOFT_RESET,
  401. rb + APP_PLL_SCLK_CTL_REG);
  402. writel(pll_fclk |
  403. __APP_PLL_LCLK_LOGIC_SOFT_RESET,
  404. rb + APP_PLL_LCLK_CTL_REG);
  405. writel(pll_sclk |
  406. __APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE,
  407. rb + APP_PLL_SCLK_CTL_REG);
  408. writel(pll_fclk |
  409. __APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE,
  410. rb + APP_PLL_LCLK_CTL_REG);
  411. readl(rb + HOSTFN0_INT_MSK);
  412. udelay(2000);
  413. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  414. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  415. writel(pll_sclk |
  416. __APP_PLL_SCLK_ENABLE,
  417. rb + APP_PLL_SCLK_CTL_REG);
  418. writel(pll_fclk |
  419. __APP_PLL_LCLK_ENABLE,
  420. rb + APP_PLL_LCLK_CTL_REG);
  421. if (!fcmode) {
  422. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
  423. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
  424. }
  425. r32 = readl((rb + PSS_CTL_REG));
  426. r32 &= ~__PSS_LMEM_RESET;
  427. writel(r32, (rb + PSS_CTL_REG));
  428. udelay(1000);
  429. if (!fcmode) {
  430. writel(0, (rb + PMM_1T_RESET_REG_P0));
  431. writel(0, (rb + PMM_1T_RESET_REG_P1));
  432. }
  433. writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
  434. udelay(1000);
  435. r32 = readl((rb + MBIST_STAT_REG));
  436. writel(0, (rb + MBIST_CTL_REG));
  437. return BFA_STATUS_OK;
  438. }