bfa_ioc_ct.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904
  1. /*
  2. * Linux network driver for Brocade Converged Network Adapter.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License (GPL) Version 2 as
  6. * published by the Free Software Foundation
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. /*
  14. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  15. * All rights reserved
  16. * www.brocade.com
  17. */
  18. #include "bfa_ioc.h"
  19. #include "cna.h"
  20. #include "bfi.h"
  21. #include "bfi_reg.h"
  22. #include "bfa_defs.h"
  23. #define bfa_ioc_ct_sync_pos(__ioc) \
  24. ((u32) (1 << bfa_ioc_pcifn(__ioc)))
  25. #define BFA_IOC_SYNC_REQD_SH 16
  26. #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
  27. #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
  28. #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
  29. #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
  30. (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
  31. /*
  32. * forward declarations
  33. */
  34. static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
  35. static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
  36. static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
  37. static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc);
  38. static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
  39. static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc);
  40. static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
  41. static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
  42. static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
  43. static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
  44. static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
  45. static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
  46. static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
  47. static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
  48. static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
  49. enum bfi_asic_mode asic_mode);
  50. static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
  51. enum bfi_asic_mode asic_mode);
  52. static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc);
  53. static const struct bfa_ioc_hwif nw_hwif_ct = {
  54. .ioc_pll_init = bfa_ioc_ct_pll_init,
  55. .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
  56. .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
  57. .ioc_reg_init = bfa_ioc_ct_reg_init,
  58. .ioc_map_port = bfa_ioc_ct_map_port,
  59. .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
  60. .ioc_notify_fail = bfa_ioc_ct_notify_fail,
  61. .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
  62. .ioc_sync_start = bfa_ioc_ct_sync_start,
  63. .ioc_sync_join = bfa_ioc_ct_sync_join,
  64. .ioc_sync_leave = bfa_ioc_ct_sync_leave,
  65. .ioc_sync_ack = bfa_ioc_ct_sync_ack,
  66. .ioc_sync_complete = bfa_ioc_ct_sync_complete,
  67. };
  68. static const struct bfa_ioc_hwif nw_hwif_ct2 = {
  69. .ioc_pll_init = bfa_ioc_ct2_pll_init,
  70. .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
  71. .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
  72. .ioc_reg_init = bfa_ioc_ct2_reg_init,
  73. .ioc_map_port = bfa_ioc_ct2_map_port,
  74. .ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat,
  75. .ioc_isr_mode_set = NULL,
  76. .ioc_notify_fail = bfa_ioc_ct_notify_fail,
  77. .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
  78. .ioc_sync_start = bfa_ioc_ct_sync_start,
  79. .ioc_sync_join = bfa_ioc_ct_sync_join,
  80. .ioc_sync_leave = bfa_ioc_ct_sync_leave,
  81. .ioc_sync_ack = bfa_ioc_ct_sync_ack,
  82. .ioc_sync_complete = bfa_ioc_ct_sync_complete,
  83. };
  84. /* Called from bfa_ioc_attach() to map asic specific calls. */
  85. void
  86. bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
  87. {
  88. ioc->ioc_hwif = &nw_hwif_ct;
  89. }
  90. void
  91. bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc)
  92. {
  93. ioc->ioc_hwif = &nw_hwif_ct2;
  94. }
  95. /* Return true if firmware of current driver matches the running firmware. */
  96. static bool
  97. bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
  98. {
  99. enum bfi_ioc_state ioc_fwstate;
  100. u32 usecnt;
  101. struct bfi_ioc_image_hdr fwhdr;
  102. /**
  103. * If bios boot (flash based) -- do not increment usage count
  104. */
  105. if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
  106. BFA_IOC_FWIMG_MINSZ)
  107. return true;
  108. bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  109. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  110. /**
  111. * If usage count is 0, always return TRUE.
  112. */
  113. if (usecnt == 0) {
  114. writel(1, ioc->ioc_regs.ioc_usage_reg);
  115. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  116. writel(0, ioc->ioc_regs.ioc_fail_sync);
  117. return true;
  118. }
  119. ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  120. /**
  121. * Use count cannot be non-zero and chip in uninitialized state.
  122. */
  123. BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
  124. /**
  125. * Check if another driver with a different firmware is active
  126. */
  127. bfa_nw_ioc_fwver_get(ioc, &fwhdr);
  128. if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
  129. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  130. return false;
  131. }
  132. /**
  133. * Same firmware version. Increment the reference count.
  134. */
  135. usecnt++;
  136. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  137. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  138. return true;
  139. }
  140. static void
  141. bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
  142. {
  143. u32 usecnt;
  144. /**
  145. * If bios boot (flash based) -- do not decrement usage count
  146. */
  147. if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
  148. BFA_IOC_FWIMG_MINSZ)
  149. return;
  150. /**
  151. * decrement usage count
  152. */
  153. bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  154. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  155. BUG_ON(!(usecnt > 0));
  156. usecnt--;
  157. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  158. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  159. }
  160. /* Notify other functions on HB failure. */
  161. static void
  162. bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
  163. {
  164. writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
  165. writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
  166. /* Wait for halt to take effect */
  167. readl(ioc->ioc_regs.ll_halt);
  168. readl(ioc->ioc_regs.alt_ll_halt);
  169. }
  170. /* Host to LPU mailbox message addresses */
  171. static const struct {
  172. u32 hfn_mbox;
  173. u32 lpu_mbox;
  174. u32 hfn_pgn;
  175. } ct_fnreg[] = {
  176. { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
  177. { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
  178. { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
  179. { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
  180. };
  181. /* Host <-> LPU mailbox command/status registers - port 0 */
  182. static const struct {
  183. u32 hfn;
  184. u32 lpu;
  185. } ct_p0reg[] = {
  186. { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
  187. { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
  188. { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
  189. { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
  190. };
  191. /* Host <-> LPU mailbox command/status registers - port 1 */
  192. static const struct {
  193. u32 hfn;
  194. u32 lpu;
  195. } ct_p1reg[] = {
  196. { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
  197. { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
  198. { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
  199. { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
  200. };
  201. static const struct {
  202. u32 hfn_mbox;
  203. u32 lpu_mbox;
  204. u32 hfn_pgn;
  205. u32 hfn;
  206. u32 lpu;
  207. u32 lpu_read;
  208. } ct2_reg[] = {
  209. { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
  210. CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
  211. CT2_HOSTFN_LPU0_READ_STAT},
  212. { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
  213. CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
  214. CT2_HOSTFN_LPU1_READ_STAT},
  215. };
  216. static void
  217. bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
  218. {
  219. void __iomem *rb;
  220. int pcifn = bfa_ioc_pcifn(ioc);
  221. rb = bfa_ioc_bar0(ioc);
  222. ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
  223. ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
  224. ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
  225. if (ioc->port_id == 0) {
  226. ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
  227. ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  228. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
  229. ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
  230. ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
  231. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
  232. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
  233. } else {
  234. ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG;
  235. ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG;
  236. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  237. ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
  238. ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
  239. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
  240. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
  241. }
  242. /*
  243. * PSS control registers
  244. */
  245. ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
  246. ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
  247. ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG;
  248. ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG;
  249. /*
  250. * IOC semaphore registers and serialization
  251. */
  252. ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG;
  253. ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG;
  254. ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG;
  255. ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT;
  256. ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC;
  257. /**
  258. * sram memory access
  259. */
  260. ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
  261. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
  262. /*
  263. * err set reg : for notification of hb failure in fcmode
  264. */
  265. ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
  266. }
  267. static void
  268. bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc)
  269. {
  270. void __iomem *rb;
  271. int port = bfa_ioc_portid(ioc);
  272. rb = bfa_ioc_bar0(ioc);
  273. ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
  274. ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
  275. ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
  276. ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
  277. ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
  278. ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
  279. if (port == 0) {
  280. ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
  281. ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
  282. ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
  283. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
  284. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
  285. } else {
  286. ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG;
  287. ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
  288. ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
  289. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
  290. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
  291. }
  292. /*
  293. * PSS control registers
  294. */
  295. ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
  296. ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
  297. ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG;
  298. ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG;
  299. /*
  300. * IOC semaphore registers and serialization
  301. */
  302. ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG;
  303. ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG;
  304. ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG;
  305. ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT;
  306. ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC;
  307. /**
  308. * sram memory access
  309. */
  310. ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
  311. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
  312. /*
  313. * err set reg : for notification of hb failure in fcmode
  314. */
  315. ioc->ioc_regs.err_set = rb + ERR_SET_REG;
  316. }
  317. /* Initialize IOC to port mapping. */
  318. #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
  319. static void
  320. bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
  321. {
  322. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  323. u32 r32;
  324. /**
  325. * For catapult, base port id on personality register and IOC type
  326. */
  327. r32 = readl(rb + FNC_PERS_REG);
  328. r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
  329. ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
  330. }
  331. static void
  332. bfa_ioc_ct2_map_port(struct bfa_ioc *ioc)
  333. {
  334. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  335. u32 r32;
  336. r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
  337. ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
  338. }
  339. /* Set interrupt mode for a function: INTX or MSIX */
  340. static void
  341. bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
  342. {
  343. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  344. u32 r32, mode;
  345. r32 = readl(rb + FNC_PERS_REG);
  346. mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
  347. __F0_INTX_STATUS;
  348. /**
  349. * If already in desired mode, do not change anything
  350. */
  351. if ((!msix && mode) || (msix && !mode))
  352. return;
  353. if (msix)
  354. mode = __F0_INTX_STATUS_MSIX;
  355. else
  356. mode = __F0_INTX_STATUS_INTA;
  357. r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  358. r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  359. writel(r32, rb + FNC_PERS_REG);
  360. }
  361. static bool
  362. bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc)
  363. {
  364. u32 r32;
  365. r32 = readl(ioc->ioc_regs.lpu_read_stat);
  366. if (r32) {
  367. writel(1, ioc->ioc_regs.lpu_read_stat);
  368. return true;
  369. }
  370. return false;
  371. }
  372. /* MSI-X resource allocation for 1860 with no asic block */
  373. #define HOSTFN_MSIX_DEFAULT 64
  374. #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
  375. #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
  376. #define __MSIX_VT_NUMVT__MK 0x003ff800
  377. #define __MSIX_VT_NUMVT__SH 11
  378. #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
  379. #define __MSIX_VT_OFST_ 0x000007ff
  380. void
  381. bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc)
  382. {
  383. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  384. u32 r32;
  385. r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
  386. if (r32 & __MSIX_VT_NUMVT__MK) {
  387. writel(r32 & __MSIX_VT_OFST_,
  388. rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
  389. return;
  390. }
  391. writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
  392. HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
  393. rb + HOSTFN_MSIX_VT_OFST_NUMVT);
  394. writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
  395. rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
  396. }
  397. /* Cleanup hw semaphore and usecnt registers */
  398. static void
  399. bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
  400. {
  401. bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  402. writel(0, ioc->ioc_regs.ioc_usage_reg);
  403. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  404. /*
  405. * Read the hw sem reg to make sure that it is locked
  406. * before we clear it. If it is not locked, writing 1
  407. * will lock it instead of clearing it.
  408. */
  409. readl(ioc->ioc_regs.ioc_sem_reg);
  410. bfa_nw_ioc_hw_sem_release(ioc);
  411. }
  412. /* Synchronized IOC failure processing routines */
  413. static bool
  414. bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
  415. {
  416. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  417. u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
  418. /*
  419. * Driver load time. If the sync required bit for this PCI fn
  420. * is set, it is due to an unclean exit by the driver for this
  421. * PCI fn in the previous incarnation. Whoever comes here first
  422. * should clean it up, no matter which PCI fn.
  423. */
  424. if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
  425. writel(0, ioc->ioc_regs.ioc_fail_sync);
  426. writel(1, ioc->ioc_regs.ioc_usage_reg);
  427. writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
  428. writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
  429. return true;
  430. }
  431. return bfa_ioc_ct_sync_complete(ioc);
  432. }
  433. /* Synchronized IOC failure processing routines */
  434. static void
  435. bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
  436. {
  437. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  438. u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
  439. writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
  440. }
  441. static void
  442. bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
  443. {
  444. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  445. u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
  446. bfa_ioc_ct_sync_pos(ioc);
  447. writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
  448. }
  449. static void
  450. bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
  451. {
  452. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  453. writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
  454. }
  455. static bool
  456. bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
  457. {
  458. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  459. u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
  460. u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
  461. u32 tmp_ackd;
  462. if (sync_ackd == 0)
  463. return true;
  464. /**
  465. * The check below is to see whether any other PCI fn
  466. * has reinitialized the ASIC (reset sync_ackd bits)
  467. * and failed again while this IOC was waiting for hw
  468. * semaphore (in bfa_iocpf_sm_semwait()).
  469. */
  470. tmp_ackd = sync_ackd;
  471. if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
  472. !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
  473. sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
  474. if (sync_reqd == sync_ackd) {
  475. writel(bfa_ioc_ct_clear_sync_ackd(r32),
  476. ioc->ioc_regs.ioc_fail_sync);
  477. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  478. writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
  479. return true;
  480. }
  481. /**
  482. * If another PCI fn reinitialized and failed again while
  483. * this IOC was waiting for hw sem, the sync_ackd bit for
  484. * this IOC need to be set again to allow reinitialization.
  485. */
  486. if (tmp_ackd != sync_ackd)
  487. writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
  488. return false;
  489. }
  490. static enum bfa_status
  491. bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
  492. {
  493. u32 pll_sclk, pll_fclk, r32;
  494. bool fcmode = (asic_mode == BFI_ASIC_MODE_FC);
  495. pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
  496. __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
  497. __APP_PLL_SCLK_JITLMT0_1(3U) |
  498. __APP_PLL_SCLK_CNTLMT0_1(1U);
  499. pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
  500. __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
  501. __APP_PLL_LCLK_JITLMT0_1(3U) |
  502. __APP_PLL_LCLK_CNTLMT0_1(1U);
  503. if (fcmode) {
  504. writel(0, (rb + OP_MODE));
  505. writel(__APP_EMS_CMLCKSEL |
  506. __APP_EMS_REFCKBUFEN2 |
  507. __APP_EMS_CHANNEL_SEL,
  508. (rb + ETH_MAC_SER_REG));
  509. } else {
  510. writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
  511. writel(__APP_EMS_REFCKBUFEN1,
  512. (rb + ETH_MAC_SER_REG));
  513. }
  514. writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
  515. writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
  516. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  517. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  518. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  519. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  520. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  521. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  522. writel(pll_sclk |
  523. __APP_PLL_SCLK_LOGIC_SOFT_RESET,
  524. rb + APP_PLL_SCLK_CTL_REG);
  525. writel(pll_fclk |
  526. __APP_PLL_LCLK_LOGIC_SOFT_RESET,
  527. rb + APP_PLL_LCLK_CTL_REG);
  528. writel(pll_sclk |
  529. __APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE,
  530. rb + APP_PLL_SCLK_CTL_REG);
  531. writel(pll_fclk |
  532. __APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE,
  533. rb + APP_PLL_LCLK_CTL_REG);
  534. readl(rb + HOSTFN0_INT_MSK);
  535. udelay(2000);
  536. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  537. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  538. writel(pll_sclk |
  539. __APP_PLL_SCLK_ENABLE,
  540. rb + APP_PLL_SCLK_CTL_REG);
  541. writel(pll_fclk |
  542. __APP_PLL_LCLK_ENABLE,
  543. rb + APP_PLL_LCLK_CTL_REG);
  544. if (!fcmode) {
  545. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
  546. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
  547. }
  548. r32 = readl((rb + PSS_CTL_REG));
  549. r32 &= ~__PSS_LMEM_RESET;
  550. writel(r32, (rb + PSS_CTL_REG));
  551. udelay(1000);
  552. if (!fcmode) {
  553. writel(0, (rb + PMM_1T_RESET_REG_P0));
  554. writel(0, (rb + PMM_1T_RESET_REG_P1));
  555. }
  556. writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
  557. udelay(1000);
  558. r32 = readl((rb + MBIST_STAT_REG));
  559. writel(0, (rb + MBIST_CTL_REG));
  560. return BFA_STATUS_OK;
  561. }
  562. static void
  563. bfa_ioc_ct2_sclk_init(void __iomem *rb)
  564. {
  565. u32 r32;
  566. /*
  567. * put s_clk PLL and PLL FSM in reset
  568. */
  569. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  570. r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
  571. r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
  572. __APP_PLL_SCLK_LOGIC_SOFT_RESET);
  573. writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  574. /*
  575. * Ignore mode and program for the max clock (which is FC16)
  576. * Firmware/NFC will do the PLL init appropiately
  577. */
  578. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  579. r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
  580. writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  581. /*
  582. * while doing PLL init dont clock gate ethernet subsystem
  583. */
  584. r32 = readl((rb + CT2_CHIP_MISC_PRG));
  585. writel((r32 | __ETH_CLK_ENABLE_PORT0),
  586. (rb + CT2_CHIP_MISC_PRG));
  587. r32 = readl((rb + CT2_PCIE_MISC_REG));
  588. writel((r32 | __ETH_CLK_ENABLE_PORT1),
  589. (rb + CT2_PCIE_MISC_REG));
  590. /*
  591. * set sclk value
  592. */
  593. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  594. r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
  595. __APP_PLL_SCLK_CLK_DIV2);
  596. writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  597. /*
  598. * poll for s_clk lock or delay 1ms
  599. */
  600. udelay(1000);
  601. /*
  602. * Dont do clock gating for ethernet subsystem, firmware/NFC will
  603. * do this appropriately
  604. */
  605. }
  606. static void
  607. bfa_ioc_ct2_lclk_init(void __iomem *rb)
  608. {
  609. u32 r32;
  610. /*
  611. * put l_clk PLL and PLL FSM in reset
  612. */
  613. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  614. r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
  615. r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
  616. __APP_PLL_LCLK_LOGIC_SOFT_RESET);
  617. writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
  618. /*
  619. * set LPU speed (set for FC16 which will work for other modes)
  620. */
  621. r32 = readl((rb + CT2_CHIP_MISC_PRG));
  622. writel(r32, (rb + CT2_CHIP_MISC_PRG));
  623. /*
  624. * set LPU half speed (set for FC16 which will work for other modes)
  625. */
  626. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  627. writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
  628. /*
  629. * set lclk for mode (set for FC16)
  630. */
  631. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  632. r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
  633. r32 |= 0x20c1731b;
  634. writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
  635. /*
  636. * poll for s_clk lock or delay 1ms
  637. */
  638. udelay(1000);
  639. }
  640. static void
  641. bfa_ioc_ct2_mem_init(void __iomem *rb)
  642. {
  643. u32 r32;
  644. r32 = readl((rb + PSS_CTL_REG));
  645. r32 &= ~__PSS_LMEM_RESET;
  646. writel(r32, (rb + PSS_CTL_REG));
  647. udelay(1000);
  648. writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
  649. udelay(1000);
  650. writel(0, (rb + CT2_MBIST_CTL_REG));
  651. }
  652. static void
  653. bfa_ioc_ct2_mac_reset(void __iomem *rb)
  654. {
  655. volatile u32 r32;
  656. bfa_ioc_ct2_sclk_init(rb);
  657. bfa_ioc_ct2_lclk_init(rb);
  658. /*
  659. * release soft reset on s_clk & l_clk
  660. */
  661. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  662. writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
  663. (rb + CT2_APP_PLL_SCLK_CTL_REG));
  664. /*
  665. * release soft reset on s_clk & l_clk
  666. */
  667. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  668. writel((r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET),
  669. (rb + CT2_APP_PLL_LCLK_CTL_REG));
  670. /* put port0, port1 MAC & AHB in reset */
  671. writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
  672. (rb + CT2_CSI_MAC_CONTROL_REG(0)));
  673. writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
  674. (rb + CT2_CSI_MAC_CONTROL_REG(1)));
  675. }
  676. #define CT2_NFC_MAX_DELAY 1000
  677. #define CT2_NFC_VER_VALID 0x143
  678. #define BFA_IOC_PLL_POLL 1000000
  679. static bool
  680. bfa_ioc_ct2_nfc_halted(void __iomem *rb)
  681. {
  682. volatile u32 r32;
  683. r32 = readl(rb + CT2_NFC_CSR_SET_REG);
  684. if (r32 & __NFC_CONTROLLER_HALTED)
  685. return true;
  686. return false;
  687. }
  688. static void
  689. bfa_ioc_ct2_nfc_resume(void __iomem *rb)
  690. {
  691. volatile u32 r32;
  692. int i;
  693. writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
  694. for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
  695. r32 = readl(rb + CT2_NFC_CSR_SET_REG);
  696. if (!(r32 & __NFC_CONTROLLER_HALTED))
  697. return;
  698. udelay(1000);
  699. }
  700. BUG_ON(1);
  701. }
  702. static enum bfa_status
  703. bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
  704. {
  705. volatile u32 wgn, r32;
  706. u32 nfc_ver, i;
  707. wgn = readl(rb + CT2_WGN_STATUS);
  708. nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
  709. if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
  710. (nfc_ver >= CT2_NFC_VER_VALID)) {
  711. if (bfa_ioc_ct2_nfc_halted(rb))
  712. bfa_ioc_ct2_nfc_resume(rb);
  713. writel(__RESET_AND_START_SCLK_LCLK_PLLS,
  714. rb + CT2_CSI_FW_CTL_SET_REG);
  715. for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
  716. r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
  717. if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
  718. break;
  719. }
  720. BUG_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
  721. for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
  722. r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
  723. if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
  724. break;
  725. }
  726. BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
  727. udelay(1000);
  728. r32 = readl(rb + CT2_CSI_FW_CTL_REG);
  729. BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
  730. } else {
  731. writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG));
  732. for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
  733. r32 = readl(rb + CT2_NFC_CSR_SET_REG);
  734. if (r32 & __NFC_CONTROLLER_HALTED)
  735. break;
  736. udelay(1000);
  737. }
  738. bfa_ioc_ct2_mac_reset(rb);
  739. bfa_ioc_ct2_sclk_init(rb);
  740. bfa_ioc_ct2_lclk_init(rb);
  741. /* release soft reset on s_clk & l_clk */
  742. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  743. writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
  744. rb + CT2_APP_PLL_SCLK_CTL_REG);
  745. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  746. writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
  747. rb + CT2_APP_PLL_LCLK_CTL_REG);
  748. }
  749. /* Announce flash device presence, if flash was corrupted. */
  750. if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
  751. r32 = readl((rb + PSS_GPIO_OUT_REG));
  752. writel(r32 & ~1, rb + PSS_GPIO_OUT_REG);
  753. r32 = readl((rb + PSS_GPIO_OE_REG));
  754. writel(r32 | 1, rb + PSS_GPIO_OE_REG);
  755. }
  756. /*
  757. * Mask the interrupts and clear any
  758. * pending interrupts left by BIOS/EFI
  759. */
  760. writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
  761. writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
  762. /* For first time initialization, no need to clear interrupts */
  763. r32 = readl(rb + HOST_SEM5_REG);
  764. if (r32 & 0x1) {
  765. r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
  766. if (r32 == 1) {
  767. writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
  768. readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
  769. }
  770. r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
  771. if (r32 == 1) {
  772. writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
  773. readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
  774. }
  775. }
  776. bfa_ioc_ct2_mem_init(rb);
  777. writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
  778. writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
  779. return BFA_STATUS_OK;
  780. }