bfa_ioc_ct.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488
  1. /*
  2. * Linux network driver for Brocade Converged Network Adapter.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License (GPL) Version 2 as
  6. * published by the Free Software Foundation
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. /*
  14. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  15. * All rights reserved
  16. * www.brocade.com
  17. */
  18. #include "bfa_ioc.h"
  19. #include "cna.h"
  20. #include "bfi.h"
  21. #include "bfi_ctreg.h"
  22. #include "bfa_defs.h"
  23. #define bfa_ioc_ct_sync_pos(__ioc) \
  24. ((u32) (1 << bfa_ioc_pcifn(__ioc)))
  25. #define BFA_IOC_SYNC_REQD_SH 16
  26. #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
  27. #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
  28. #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
  29. #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
  30. (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
  31. /*
  32. * forward declarations
  33. */
  34. static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
  35. static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
  36. static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
  37. static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
  38. static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
  39. static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
  40. static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
  41. static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
  42. static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
  43. static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
  44. static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
  45. static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
  46. static struct bfa_ioc_hwif nw_hwif_ct;
  47. /**
  48. * Called from bfa_ioc_attach() to map asic specific calls.
  49. */
  50. void
  51. bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
  52. {
  53. nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
  54. nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
  55. nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
  56. nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
  57. nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
  58. nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
  59. nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
  60. nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
  61. nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
  62. nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
  63. nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
  64. nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
  65. ioc->ioc_hwif = &nw_hwif_ct;
  66. }
  67. /**
  68. * Return true if firmware of current driver matches the running firmware.
  69. */
  70. static bool
  71. bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
  72. {
  73. enum bfi_ioc_state ioc_fwstate;
  74. u32 usecnt;
  75. struct bfi_ioc_image_hdr fwhdr;
  76. /**
  77. * Firmware match check is relevant only for CNA.
  78. */
  79. if (!ioc->cna)
  80. return true;
  81. /**
  82. * If bios boot (flash based) -- do not increment usage count
  83. */
  84. if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
  85. BFA_IOC_FWIMG_MINSZ)
  86. return true;
  87. bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  88. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  89. /**
  90. * If usage count is 0, always return TRUE.
  91. */
  92. if (usecnt == 0) {
  93. writel(1, ioc->ioc_regs.ioc_usage_reg);
  94. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  95. writel(0, ioc->ioc_regs.ioc_fail_sync);
  96. return true;
  97. }
  98. ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  99. /**
  100. * Use count cannot be non-zero and chip in uninitialized state.
  101. */
  102. BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
  103. /**
  104. * Check if another driver with a different firmware is active
  105. */
  106. bfa_nw_ioc_fwver_get(ioc, &fwhdr);
  107. if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
  108. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  109. return false;
  110. }
  111. /**
  112. * Same firmware version. Increment the reference count.
  113. */
  114. usecnt++;
  115. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  116. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  117. return true;
  118. }
  119. static void
  120. bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
  121. {
  122. u32 usecnt;
  123. /**
  124. * Firmware lock is relevant only for CNA.
  125. */
  126. if (!ioc->cna)
  127. return;
  128. /**
  129. * If bios boot (flash based) -- do not decrement usage count
  130. */
  131. if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
  132. BFA_IOC_FWIMG_MINSZ)
  133. return;
  134. /**
  135. * decrement usage count
  136. */
  137. bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  138. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  139. BUG_ON(!(usecnt > 0));
  140. usecnt--;
  141. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  142. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  143. }
  144. /**
  145. * Notify other functions on HB failure.
  146. */
  147. static void
  148. bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
  149. {
  150. if (ioc->cna) {
  151. writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
  152. writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
  153. /* Wait for halt to take effect */
  154. readl(ioc->ioc_regs.ll_halt);
  155. readl(ioc->ioc_regs.alt_ll_halt);
  156. } else {
  157. writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
  158. readl(ioc->ioc_regs.err_set);
  159. }
  160. }
  161. /**
  162. * Host to LPU mailbox message addresses
  163. */
  164. static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
  165. { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
  166. { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
  167. { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
  168. { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
  169. };
  170. /**
  171. * Host <-> LPU mailbox command/status registers - port 0
  172. */
  173. static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
  174. { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
  175. { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
  176. { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
  177. { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
  178. };
  179. /**
  180. * Host <-> LPU mailbox command/status registers - port 1
  181. */
  182. static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
  183. { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
  184. { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
  185. { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
  186. { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
  187. };
  188. static void
  189. bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
  190. {
  191. void __iomem *rb;
  192. int pcifn = bfa_ioc_pcifn(ioc);
  193. rb = bfa_ioc_bar0(ioc);
  194. ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
  195. ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
  196. ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
  197. if (ioc->port_id == 0) {
  198. ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
  199. ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  200. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
  201. ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
  202. ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
  203. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
  204. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
  205. } else {
  206. ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
  207. ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
  208. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  209. ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
  210. ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
  211. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
  212. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
  213. }
  214. /*
  215. * PSS control registers
  216. */
  217. ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
  218. ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
  219. ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
  220. ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
  221. /*
  222. * IOC semaphore registers and serialization
  223. */
  224. ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
  225. ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
  226. ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
  227. ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
  228. ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
  229. /**
  230. * sram memory access
  231. */
  232. ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
  233. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
  234. /*
  235. * err set reg : for notification of hb failure in fcmode
  236. */
  237. ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
  238. }
  239. /**
  240. * Initialize IOC to port mapping.
  241. */
  242. #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
  243. static void
  244. bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
  245. {
  246. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  247. u32 r32;
  248. /**
  249. * For catapult, base port id on personality register and IOC type
  250. */
  251. r32 = readl(rb + FNC_PERS_REG);
  252. r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
  253. ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
  254. }
  255. /**
  256. * Set interrupt mode for a function: INTX or MSIX
  257. */
  258. static void
  259. bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
  260. {
  261. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  262. u32 r32, mode;
  263. r32 = readl(rb + FNC_PERS_REG);
  264. mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
  265. __F0_INTX_STATUS;
  266. /**
  267. * If already in desired mode, do not change anything
  268. */
  269. if (!msix && mode)
  270. return;
  271. if (msix)
  272. mode = __F0_INTX_STATUS_MSIX;
  273. else
  274. mode = __F0_INTX_STATUS_INTA;
  275. r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  276. r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  277. writel(r32, rb + FNC_PERS_REG);
  278. }
  279. /**
  280. * Cleanup hw semaphore and usecnt registers
  281. */
  282. static void
  283. bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
  284. {
  285. if (ioc->cna) {
  286. bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  287. writel(0, ioc->ioc_regs.ioc_usage_reg);
  288. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  289. }
  290. /*
  291. * Read the hw sem reg to make sure that it is locked
  292. * before we clear it. If it is not locked, writing 1
  293. * will lock it instead of clearing it.
  294. */
  295. readl(ioc->ioc_regs.ioc_sem_reg);
  296. bfa_nw_ioc_hw_sem_release(ioc);
  297. }
  298. /**
  299. * Synchronized IOC failure processing routines
  300. */
  301. static void
  302. bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
  303. {
  304. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  305. u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
  306. writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
  307. }
  308. static void
  309. bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
  310. {
  311. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  312. u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
  313. bfa_ioc_ct_sync_pos(ioc);
  314. writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
  315. }
  316. static void
  317. bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
  318. {
  319. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  320. writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
  321. }
  322. static bool
  323. bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
  324. {
  325. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  326. u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
  327. u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
  328. u32 tmp_ackd;
  329. if (sync_ackd == 0)
  330. return true;
  331. /**
  332. * The check below is to see whether any other PCI fn
  333. * has reinitialized the ASIC (reset sync_ackd bits)
  334. * and failed again while this IOC was waiting for hw
  335. * semaphore (in bfa_iocpf_sm_semwait()).
  336. */
  337. tmp_ackd = sync_ackd;
  338. if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
  339. !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
  340. sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
  341. if (sync_reqd == sync_ackd) {
  342. writel(bfa_ioc_ct_clear_sync_ackd(r32),
  343. ioc->ioc_regs.ioc_fail_sync);
  344. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  345. writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
  346. return true;
  347. }
  348. /**
  349. * If another PCI fn reinitialized and failed again while
  350. * this IOC was waiting for hw sem, the sync_ackd bit for
  351. * this IOC need to be set again to allow reinitialization.
  352. */
  353. if (tmp_ackd != sync_ackd)
  354. writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
  355. return false;
  356. }
  357. static enum bfa_status
  358. bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode)
  359. {
  360. u32 pll_sclk, pll_fclk, r32;
  361. pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
  362. __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
  363. __APP_PLL_312_JITLMT0_1(3U) |
  364. __APP_PLL_312_CNTLMT0_1(1U);
  365. pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
  366. __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
  367. __APP_PLL_425_JITLMT0_1(3U) |
  368. __APP_PLL_425_CNTLMT0_1(1U);
  369. if (fcmode) {
  370. writel(0, (rb + OP_MODE));
  371. writel(__APP_EMS_CMLCKSEL |
  372. __APP_EMS_REFCKBUFEN2 |
  373. __APP_EMS_CHANNEL_SEL,
  374. (rb + ETH_MAC_SER_REG));
  375. } else {
  376. writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
  377. writel(__APP_EMS_REFCKBUFEN1,
  378. (rb + ETH_MAC_SER_REG));
  379. }
  380. writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
  381. writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
  382. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  383. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  384. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  385. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  386. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  387. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  388. writel(pll_sclk |
  389. __APP_PLL_312_LOGIC_SOFT_RESET,
  390. rb + APP_PLL_312_CTL_REG);
  391. writel(pll_fclk |
  392. __APP_PLL_425_LOGIC_SOFT_RESET,
  393. rb + APP_PLL_425_CTL_REG);
  394. writel(pll_sclk |
  395. __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
  396. rb + APP_PLL_312_CTL_REG);
  397. writel(pll_fclk |
  398. __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
  399. rb + APP_PLL_425_CTL_REG);
  400. readl(rb + HOSTFN0_INT_MSK);
  401. udelay(2000);
  402. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  403. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  404. writel(pll_sclk |
  405. __APP_PLL_312_ENABLE,
  406. rb + APP_PLL_312_CTL_REG);
  407. writel(pll_fclk |
  408. __APP_PLL_425_ENABLE,
  409. rb + APP_PLL_425_CTL_REG);
  410. if (!fcmode) {
  411. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
  412. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
  413. }
  414. r32 = readl((rb + PSS_CTL_REG));
  415. r32 &= ~__PSS_LMEM_RESET;
  416. writel(r32, (rb + PSS_CTL_REG));
  417. udelay(1000);
  418. if (!fcmode) {
  419. writel(0, (rb + PMM_1T_RESET_REG_P0));
  420. writel(0, (rb + PMM_1T_RESET_REG_P1));
  421. }
  422. writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
  423. udelay(1000);
  424. r32 = readl((rb + MBIST_STAT_REG));
  425. writel(0, (rb + MBIST_CTL_REG));
  426. return BFA_STATUS_OK;
  427. }