bfa_ioc_ct.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include "bfad_drv.h"
  18. #include "bfa_ioc.h"
  19. #include "bfi_reg.h"
  20. #include "bfa_defs.h"
  21. BFA_TRC_FILE(CNA, IOC_CT);
  22. #define bfa_ioc_ct_sync_pos(__ioc) \
  23. ((uint32_t) (1 << bfa_ioc_pcifn(__ioc)))
  24. #define BFA_IOC_SYNC_REQD_SH 16
  25. #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
  26. #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
  27. #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
  28. #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
  29. (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
  30. /*
  31. * forward declarations
  32. */
  33. static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
  34. static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
  35. static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
  36. static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
  37. static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
  38. static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
  39. static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
  40. static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
  41. static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
  42. static struct bfa_ioc_hwif_s hwif_ct;
  43. static struct bfa_ioc_hwif_s hwif_ct2;
  44. /*
  45. * Return true if firmware of current driver matches the running firmware.
  46. */
  47. static bfa_boolean_t
  48. bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
  49. {
  50. enum bfi_ioc_state ioc_fwstate;
  51. u32 usecnt;
  52. struct bfi_ioc_image_hdr_s fwhdr;
  53. bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  54. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  55. /*
  56. * If usage count is 0, always return TRUE.
  57. */
  58. if (usecnt == 0) {
  59. writel(1, ioc->ioc_regs.ioc_usage_reg);
  60. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  61. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  62. writel(0, ioc->ioc_regs.ioc_fail_sync);
  63. bfa_trc(ioc, usecnt);
  64. return BFA_TRUE;
  65. }
  66. ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  67. bfa_trc(ioc, ioc_fwstate);
  68. /*
  69. * Use count cannot be non-zero and chip in uninitialized state.
  70. */
  71. WARN_ON(ioc_fwstate == BFI_IOC_UNINIT);
  72. /*
  73. * Check if another driver with a different firmware is active
  74. */
  75. bfa_ioc_fwver_get(ioc, &fwhdr);
  76. if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
  77. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  78. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  79. bfa_trc(ioc, usecnt);
  80. return BFA_FALSE;
  81. }
  82. /*
  83. * Same firmware version. Increment the reference count.
  84. */
  85. usecnt++;
  86. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  87. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  88. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  89. bfa_trc(ioc, usecnt);
  90. return BFA_TRUE;
  91. }
  92. static void
  93. bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
  94. {
  95. u32 usecnt;
  96. /*
  97. * decrement usage count
  98. */
  99. bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  100. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  101. WARN_ON(usecnt <= 0);
  102. usecnt--;
  103. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  104. bfa_trc(ioc, usecnt);
  105. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  106. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  107. }
  108. /*
  109. * Notify other functions on HB failure.
  110. */
  111. static void
  112. bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
  113. {
  114. if (bfa_ioc_is_cna(ioc)) {
  115. writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
  116. writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
  117. /* Wait for halt to take effect */
  118. readl(ioc->ioc_regs.ll_halt);
  119. readl(ioc->ioc_regs.alt_ll_halt);
  120. } else {
  121. writel(~0U, ioc->ioc_regs.err_set);
  122. readl(ioc->ioc_regs.err_set);
  123. }
  124. }
  125. /*
  126. * Host to LPU mailbox message addresses
  127. */
  128. static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = {
  129. { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
  130. { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
  131. { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
  132. { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
  133. };
  134. /*
  135. * Host <-> LPU mailbox command/status registers - port 0
  136. */
  137. static struct { u32 hfn, lpu; } ct_p0reg[] = {
  138. { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
  139. { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
  140. { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
  141. { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
  142. };
  143. /*
  144. * Host <-> LPU mailbox command/status registers - port 1
  145. */
  146. static struct { u32 hfn, lpu; } ct_p1reg[] = {
  147. { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
  148. { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
  149. { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
  150. { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
  151. };
  152. static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn, hfn, lpu, lpu_read; }
  153. ct2_reg[] = {
  154. { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
  155. CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
  156. CT2_HOSTFN_LPU0_READ_STAT},
  157. { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
  158. CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
  159. CT2_HOSTFN_LPU1_READ_STAT},
  160. };
  161. static void
  162. bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
  163. {
  164. void __iomem *rb;
  165. int pcifn = bfa_ioc_pcifn(ioc);
  166. rb = bfa_ioc_bar0(ioc);
  167. ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
  168. ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
  169. ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
  170. if (ioc->port_id == 0) {
  171. ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
  172. ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  173. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
  174. ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
  175. ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
  176. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
  177. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
  178. } else {
  179. ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
  180. ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
  181. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  182. ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
  183. ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
  184. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
  185. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
  186. }
  187. /*
  188. * PSS control registers
  189. */
  190. ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
  191. ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
  192. ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
  193. ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
  194. /*
  195. * IOC semaphore registers and serialization
  196. */
  197. ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
  198. ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
  199. ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
  200. ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
  201. ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
  202. /*
  203. * sram memory access
  204. */
  205. ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
  206. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
  207. /*
  208. * err set reg : for notification of hb failure in fcmode
  209. */
  210. ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
  211. }
  212. static void
  213. bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc)
  214. {
  215. void __iomem *rb;
  216. int port = bfa_ioc_portid(ioc);
  217. rb = bfa_ioc_bar0(ioc);
  218. ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
  219. ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
  220. ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
  221. ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
  222. ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
  223. ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
  224. if (port == 0) {
  225. ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
  226. ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
  227. ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
  228. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
  229. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
  230. } else {
  231. ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG);
  232. ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG);
  233. ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
  234. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
  235. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
  236. }
  237. /*
  238. * PSS control registers
  239. */
  240. ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
  241. ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
  242. ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG);
  243. ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG);
  244. /*
  245. * IOC semaphore registers and serialization
  246. */
  247. ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG);
  248. ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG);
  249. ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG);
  250. ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT);
  251. ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC);
  252. /*
  253. * sram memory access
  254. */
  255. ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
  256. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
  257. /*
  258. * err set reg : for notification of hb failure in fcmode
  259. */
  260. ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
  261. }
  262. /*
  263. * Initialize IOC to port mapping.
  264. */
  265. #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
  266. static void
  267. bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
  268. {
  269. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  270. u32 r32;
  271. /*
  272. * For catapult, base port id on personality register and IOC type
  273. */
  274. r32 = readl(rb + FNC_PERS_REG);
  275. r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
  276. ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
  277. bfa_trc(ioc, bfa_ioc_pcifn(ioc));
  278. bfa_trc(ioc, ioc->port_id);
  279. }
  280. static void
  281. bfa_ioc_ct2_map_port(struct bfa_ioc_s *ioc)
  282. {
  283. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  284. u32 r32;
  285. r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
  286. ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
  287. bfa_trc(ioc, bfa_ioc_pcifn(ioc));
  288. bfa_trc(ioc, ioc->port_id);
  289. }
  290. /*
  291. * Set interrupt mode for a function: INTX or MSIX
  292. */
  293. static void
  294. bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
  295. {
  296. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  297. u32 r32, mode;
  298. r32 = readl(rb + FNC_PERS_REG);
  299. bfa_trc(ioc, r32);
  300. mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
  301. __F0_INTX_STATUS;
  302. /*
  303. * If already in desired mode, do not change anything
  304. */
  305. if ((!msix && mode) || (msix && !mode))
  306. return;
  307. if (msix)
  308. mode = __F0_INTX_STATUS_MSIX;
  309. else
  310. mode = __F0_INTX_STATUS_INTA;
  311. r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  312. r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  313. bfa_trc(ioc, r32);
  314. writel(r32, rb + FNC_PERS_REG);
  315. }
  316. bfa_boolean_t
  317. bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc)
  318. {
  319. u32 r32;
  320. r32 = readl(ioc->ioc_regs.lpu_read_stat);
  321. if (r32) {
  322. writel(1, ioc->ioc_regs.lpu_read_stat);
  323. return BFA_TRUE;
  324. }
  325. return BFA_FALSE;
  326. }
  327. /*
  328. * Cleanup hw semaphore and usecnt registers
  329. */
  330. static void
  331. bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
  332. {
  333. bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  334. writel(0, ioc->ioc_regs.ioc_usage_reg);
  335. readl(ioc->ioc_regs.ioc_usage_sem_reg);
  336. writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
  337. writel(0, ioc->ioc_regs.ioc_fail_sync);
  338. /*
  339. * Read the hw sem reg to make sure that it is locked
  340. * before we clear it. If it is not locked, writing 1
  341. * will lock it instead of clearing it.
  342. */
  343. readl(ioc->ioc_regs.ioc_sem_reg);
  344. writel(1, ioc->ioc_regs.ioc_sem_reg);
  345. }
  346. static bfa_boolean_t
  347. bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
  348. {
  349. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  350. uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
  351. /*
  352. * Driver load time. If the sync required bit for this PCI fn
  353. * is set, it is due to an unclean exit by the driver for this
  354. * PCI fn in the previous incarnation. Whoever comes here first
  355. * should clean it up, no matter which PCI fn.
  356. */
  357. if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
  358. writel(0, ioc->ioc_regs.ioc_fail_sync);
  359. writel(1, ioc->ioc_regs.ioc_usage_reg);
  360. writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
  361. writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
  362. return BFA_TRUE;
  363. }
  364. return bfa_ioc_ct_sync_complete(ioc);
  365. }
  366. /*
  367. * Synchronized IOC failure processing routines
  368. */
  369. static void
  370. bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc)
  371. {
  372. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  373. uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
  374. writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
  375. }
  376. static void
  377. bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc)
  378. {
  379. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  380. uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
  381. bfa_ioc_ct_sync_pos(ioc);
  382. writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
  383. }
  384. static void
  385. bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc)
  386. {
  387. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  388. writel((r32 | bfa_ioc_ct_sync_pos(ioc)),
  389. ioc->ioc_regs.ioc_fail_sync);
  390. }
  391. static bfa_boolean_t
  392. bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
  393. {
  394. uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  395. uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
  396. uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
  397. uint32_t tmp_ackd;
  398. if (sync_ackd == 0)
  399. return BFA_TRUE;
  400. /*
  401. * The check below is to see whether any other PCI fn
  402. * has reinitialized the ASIC (reset sync_ackd bits)
  403. * and failed again while this IOC was waiting for hw
  404. * semaphore (in bfa_iocpf_sm_semwait()).
  405. */
  406. tmp_ackd = sync_ackd;
  407. if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
  408. !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
  409. sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
  410. if (sync_reqd == sync_ackd) {
  411. writel(bfa_ioc_ct_clear_sync_ackd(r32),
  412. ioc->ioc_regs.ioc_fail_sync);
  413. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  414. writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
  415. return BFA_TRUE;
  416. }
  417. /*
  418. * If another PCI fn reinitialized and failed again while
  419. * this IOC was waiting for hw sem, the sync_ackd bit for
  420. * this IOC need to be set again to allow reinitialization.
  421. */
  422. if (tmp_ackd != sync_ackd)
  423. writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
  424. return BFA_FALSE;
  425. }
  426. /**
  427. * Called from bfa_ioc_attach() to map asic specific calls.
  428. */
  429. static void
  430. bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif)
  431. {
  432. hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
  433. hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
  434. hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail;
  435. hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
  436. hwif->ioc_sync_start = bfa_ioc_ct_sync_start;
  437. hwif->ioc_sync_join = bfa_ioc_ct_sync_join;
  438. hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave;
  439. hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack;
  440. hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete;
  441. }
  442. /**
  443. * Called from bfa_ioc_attach() to map asic specific calls.
  444. */
  445. void
  446. bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
  447. {
  448. bfa_ioc_set_ctx_hwif(ioc, &hwif_ct);
  449. hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
  450. hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
  451. hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
  452. hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
  453. ioc->ioc_hwif = &hwif_ct;
  454. }
  455. /**
  456. * Called from bfa_ioc_attach() to map asic specific calls.
  457. */
  458. void
  459. bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc)
  460. {
  461. bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2);
  462. hwif_ct2.ioc_pll_init = bfa_ioc_ct2_pll_init;
  463. hwif_ct2.ioc_reg_init = bfa_ioc_ct2_reg_init;
  464. hwif_ct2.ioc_map_port = bfa_ioc_ct2_map_port;
  465. hwif_ct2.ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat;
  466. hwif_ct2.ioc_isr_mode_set = NULL;
  467. ioc->ioc_hwif = &hwif_ct2;
  468. }
  469. /*
  470. * Workaround for MSI-X resource allocation for catapult-2 with no asic block
  471. */
  472. #define HOSTFN_MSIX_DEFAULT 64
  473. #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
  474. #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
  475. #define __MSIX_VT_NUMVT__MK 0x003ff800
  476. #define __MSIX_VT_NUMVT__SH 11
  477. #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
  478. #define __MSIX_VT_OFST_ 0x000007ff
  479. void
  480. bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc)
  481. {
  482. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  483. u32 r32;
  484. r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
  485. if (r32 & __MSIX_VT_NUMVT__MK) {
  486. writel(r32 & __MSIX_VT_OFST_,
  487. rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
  488. return;
  489. }
  490. writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
  491. HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
  492. rb + HOSTFN_MSIX_VT_OFST_NUMVT);
  493. writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
  494. rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
  495. }
  496. bfa_status_t
  497. bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
  498. {
  499. u32 pll_sclk, pll_fclk, r32;
  500. bfa_boolean_t fcmode = (mode == BFI_ASIC_MODE_FC);
  501. pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
  502. __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
  503. __APP_PLL_SCLK_JITLMT0_1(3U) |
  504. __APP_PLL_SCLK_CNTLMT0_1(1U);
  505. pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
  506. __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
  507. __APP_PLL_LCLK_JITLMT0_1(3U) |
  508. __APP_PLL_LCLK_CNTLMT0_1(1U);
  509. if (fcmode) {
  510. writel(0, (rb + OP_MODE));
  511. writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
  512. __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
  513. } else {
  514. writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
  515. writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
  516. }
  517. writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
  518. writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
  519. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  520. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  521. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  522. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  523. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  524. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  525. writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
  526. rb + APP_PLL_SCLK_CTL_REG);
  527. writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
  528. rb + APP_PLL_LCLK_CTL_REG);
  529. writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET |
  530. __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
  531. writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET |
  532. __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
  533. readl(rb + HOSTFN0_INT_MSK);
  534. udelay(2000);
  535. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  536. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  537. writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
  538. writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
  539. if (!fcmode) {
  540. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
  541. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
  542. }
  543. r32 = readl((rb + PSS_CTL_REG));
  544. r32 &= ~__PSS_LMEM_RESET;
  545. writel(r32, (rb + PSS_CTL_REG));
  546. udelay(1000);
  547. if (!fcmode) {
  548. writel(0, (rb + PMM_1T_RESET_REG_P0));
  549. writel(0, (rb + PMM_1T_RESET_REG_P1));
  550. }
  551. writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
  552. udelay(1000);
  553. r32 = readl((rb + MBIST_STAT_REG));
  554. writel(0, (rb + MBIST_CTL_REG));
  555. return BFA_STATUS_OK;
  556. }
  557. static void
  558. bfa_ioc_ct2_sclk_init(void __iomem *rb)
  559. {
  560. u32 r32;
  561. /*
  562. * put s_clk PLL and PLL FSM in reset
  563. */
  564. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  565. r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
  566. r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
  567. __APP_PLL_SCLK_LOGIC_SOFT_RESET);
  568. writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  569. /*
  570. * Ignore mode and program for the max clock (which is FC16)
  571. * Firmware/NFC will do the PLL init appropiately
  572. */
  573. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  574. r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
  575. writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  576. /*
  577. * while doing PLL init dont clock gate ethernet subsystem
  578. */
  579. r32 = readl((rb + CT2_CHIP_MISC_PRG));
  580. writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG));
  581. r32 = readl((rb + CT2_PCIE_MISC_REG));
  582. writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG));
  583. /*
  584. * set sclk value
  585. */
  586. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  587. r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
  588. __APP_PLL_SCLK_CLK_DIV2);
  589. writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  590. /*
  591. * poll for s_clk lock or delay 1ms
  592. */
  593. udelay(1000);
  594. }
  595. static void
  596. bfa_ioc_ct2_lclk_init(void __iomem *rb)
  597. {
  598. u32 r32;
  599. /*
  600. * put l_clk PLL and PLL FSM in reset
  601. */
  602. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  603. r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
  604. r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
  605. __APP_PLL_LCLK_LOGIC_SOFT_RESET);
  606. writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
  607. /*
  608. * set LPU speed (set for FC16 which will work for other modes)
  609. */
  610. r32 = readl((rb + CT2_CHIP_MISC_PRG));
  611. writel(r32, (rb + CT2_CHIP_MISC_PRG));
  612. /*
  613. * set LPU half speed (set for FC16 which will work for other modes)
  614. */
  615. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  616. writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
  617. /*
  618. * set lclk for mode (set for FC16)
  619. */
  620. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  621. r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
  622. r32 |= 0x20c1731b;
  623. writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
  624. /*
  625. * poll for s_clk lock or delay 1ms
  626. */
  627. udelay(1000);
  628. }
  629. static void
  630. bfa_ioc_ct2_mem_init(void __iomem *rb)
  631. {
  632. u32 r32;
  633. r32 = readl((rb + PSS_CTL_REG));
  634. r32 &= ~__PSS_LMEM_RESET;
  635. writel(r32, (rb + PSS_CTL_REG));
  636. udelay(1000);
  637. writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
  638. udelay(1000);
  639. writel(0, (rb + CT2_MBIST_CTL_REG));
  640. }
  641. void
  642. bfa_ioc_ct2_mac_reset(void __iomem *rb)
  643. {
  644. /* put port0, port1 MAC & AHB in reset */
  645. writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
  646. rb + CT2_CSI_MAC_CONTROL_REG(0));
  647. writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
  648. rb + CT2_CSI_MAC_CONTROL_REG(1));
  649. }
  650. static void
  651. bfa_ioc_ct2_enable_flash(void __iomem *rb)
  652. {
  653. u32 r32;
  654. r32 = readl((rb + PSS_GPIO_OUT_REG));
  655. writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
  656. r32 = readl((rb + PSS_GPIO_OE_REG));
  657. writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
  658. }
  659. #define CT2_NFC_MAX_DELAY 1000
  660. #define CT2_NFC_PAUSE_MAX_DELAY 4000
  661. #define CT2_NFC_VER_VALID 0x147
  662. #define CT2_NFC_STATE_RUNNING 0x20000001
  663. #define BFA_IOC_PLL_POLL 1000000
  664. static bfa_boolean_t
  665. bfa_ioc_ct2_nfc_halted(void __iomem *rb)
  666. {
  667. u32 r32;
  668. r32 = readl(rb + CT2_NFC_CSR_SET_REG);
  669. if (r32 & __NFC_CONTROLLER_HALTED)
  670. return BFA_TRUE;
  671. return BFA_FALSE;
  672. }
  673. static void
  674. bfa_ioc_ct2_nfc_halt(void __iomem *rb)
  675. {
  676. int i;
  677. writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
  678. for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
  679. if (bfa_ioc_ct2_nfc_halted(rb))
  680. break;
  681. udelay(1000);
  682. }
  683. WARN_ON(!bfa_ioc_ct2_nfc_halted(rb));
  684. }
  685. static void
  686. bfa_ioc_ct2_nfc_resume(void __iomem *rb)
  687. {
  688. u32 r32;
  689. int i;
  690. writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
  691. for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
  692. r32 = readl(rb + CT2_NFC_CSR_SET_REG);
  693. if (!(r32 & __NFC_CONTROLLER_HALTED))
  694. return;
  695. udelay(1000);
  696. }
  697. WARN_ON(1);
  698. }
  699. static void
  700. bfa_ioc_ct2_clk_reset(void __iomem *rb)
  701. {
  702. u32 r32;
  703. bfa_ioc_ct2_sclk_init(rb);
  704. bfa_ioc_ct2_lclk_init(rb);
  705. /*
  706. * release soft reset on s_clk & l_clk
  707. */
  708. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  709. writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
  710. (rb + CT2_APP_PLL_SCLK_CTL_REG));
  711. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  712. writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
  713. (rb + CT2_APP_PLL_LCLK_CTL_REG));
  714. }
  715. static void
  716. bfa_ioc_ct2_nfc_clk_reset(void __iomem *rb)
  717. {
  718. u32 r32, i;
  719. r32 = readl((rb + PSS_CTL_REG));
  720. r32 |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
  721. writel(r32, (rb + PSS_CTL_REG));
  722. writel(__RESET_AND_START_SCLK_LCLK_PLLS, rb + CT2_CSI_FW_CTL_SET_REG);
  723. for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
  724. r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
  725. if ((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
  726. break;
  727. }
  728. WARN_ON(!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
  729. for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
  730. r32 = readl(rb + CT2_NFC_FLASH_STS_REG);
  731. if (!(r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS))
  732. break;
  733. }
  734. WARN_ON((r32 & __FLASH_PLL_INIT_AND_RESET_IN_PROGRESS));
  735. r32 = readl(rb + CT2_CSI_FW_CTL_REG);
  736. WARN_ON((r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
  737. }
  738. static void
  739. bfa_ioc_ct2_wait_till_nfc_running(void __iomem *rb)
  740. {
  741. u32 r32;
  742. int i;
  743. if (bfa_ioc_ct2_nfc_halted(rb))
  744. bfa_ioc_ct2_nfc_resume(rb);
  745. for (i = 0; i < CT2_NFC_PAUSE_MAX_DELAY; i++) {
  746. r32 = readl(rb + CT2_NFC_STS_REG);
  747. if (r32 == CT2_NFC_STATE_RUNNING)
  748. return;
  749. udelay(1000);
  750. }
  751. r32 = readl(rb + CT2_NFC_STS_REG);
  752. WARN_ON(!(r32 == CT2_NFC_STATE_RUNNING));
  753. }
  754. bfa_status_t
  755. bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
  756. {
  757. u32 wgn, r32, nfc_ver;
  758. wgn = readl(rb + CT2_WGN_STATUS);
  759. if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
  760. /*
  761. * If flash is corrupted, enable flash explicitly
  762. */
  763. bfa_ioc_ct2_clk_reset(rb);
  764. bfa_ioc_ct2_enable_flash(rb);
  765. bfa_ioc_ct2_mac_reset(rb);
  766. bfa_ioc_ct2_clk_reset(rb);
  767. bfa_ioc_ct2_enable_flash(rb);
  768. } else {
  769. nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
  770. if ((nfc_ver >= CT2_NFC_VER_VALID) &&
  771. (wgn == (__A2T_AHB_LOAD | __WGN_READY))) {
  772. bfa_ioc_ct2_wait_till_nfc_running(rb);
  773. bfa_ioc_ct2_nfc_clk_reset(rb);
  774. } else {
  775. bfa_ioc_ct2_nfc_halt(rb);
  776. bfa_ioc_ct2_clk_reset(rb);
  777. bfa_ioc_ct2_mac_reset(rb);
  778. bfa_ioc_ct2_clk_reset(rb);
  779. }
  780. }
  781. /*
  782. * Mask the interrupts and clear any
  783. * pending interrupts left by BIOS/EFI
  784. */
  785. writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
  786. writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
  787. /* For first time initialization, no need to clear interrupts */
  788. r32 = readl(rb + HOST_SEM5_REG);
  789. if (r32 & 0x1) {
  790. r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
  791. if (r32 == 1) {
  792. writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
  793. readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
  794. }
  795. r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
  796. if (r32 == 1) {
  797. writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
  798. readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
  799. }
  800. }
  801. bfa_ioc_ct2_mem_init(rb);
  802. writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
  803. writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
  804. return BFA_STATUS_OK;
  805. }