bfa_ioc_ct.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878
  1. /*
  2. * Linux network driver for Brocade Converged Network Adapter.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License (GPL) Version 2 as
  6. * published by the Free Software Foundation
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. /*
  14. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  15. * All rights reserved
  16. * www.brocade.com
  17. */
  18. #include "bfa_ioc.h"
  19. #include "cna.h"
  20. #include "bfi.h"
  21. #include "bfi_reg.h"
  22. #include "bfa_defs.h"
  23. #define bfa_ioc_ct_sync_pos(__ioc) \
  24. ((u32) (1 << bfa_ioc_pcifn(__ioc)))
  25. #define BFA_IOC_SYNC_REQD_SH 16
  26. #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
  27. #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
  28. #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
  29. #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
  30. (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
  31. /*
  32. * forward declarations
  33. */
  34. static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
  35. static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
  36. static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
  37. static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc);
  38. static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
  39. static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc);
  40. static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
  41. static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
  42. static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
  43. static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
  44. static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
  45. static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
  46. static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
  47. static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
  48. static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
  49. enum bfi_asic_mode asic_mode);
  50. static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
  51. enum bfi_asic_mode asic_mode);
  52. static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc);
  53. static const struct bfa_ioc_hwif nw_hwif_ct = {
  54. .ioc_pll_init = bfa_ioc_ct_pll_init,
  55. .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
  56. .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
  57. .ioc_reg_init = bfa_ioc_ct_reg_init,
  58. .ioc_map_port = bfa_ioc_ct_map_port,
  59. .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
  60. .ioc_notify_fail = bfa_ioc_ct_notify_fail,
  61. .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
  62. .ioc_sync_start = bfa_ioc_ct_sync_start,
  63. .ioc_sync_join = bfa_ioc_ct_sync_join,
  64. .ioc_sync_leave = bfa_ioc_ct_sync_leave,
  65. .ioc_sync_ack = bfa_ioc_ct_sync_ack,
  66. .ioc_sync_complete = bfa_ioc_ct_sync_complete,
  67. };
  68. static const struct bfa_ioc_hwif nw_hwif_ct2 = {
  69. .ioc_pll_init = bfa_ioc_ct2_pll_init,
  70. .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
  71. .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
  72. .ioc_reg_init = bfa_ioc_ct2_reg_init,
  73. .ioc_map_port = bfa_ioc_ct2_map_port,
  74. .ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat,
  75. .ioc_isr_mode_set = NULL,
  76. .ioc_notify_fail = bfa_ioc_ct_notify_fail,
  77. .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
  78. .ioc_sync_start = bfa_ioc_ct_sync_start,
  79. .ioc_sync_join = bfa_ioc_ct_sync_join,
  80. .ioc_sync_leave = bfa_ioc_ct_sync_leave,
  81. .ioc_sync_ack = bfa_ioc_ct_sync_ack,
  82. .ioc_sync_complete = bfa_ioc_ct_sync_complete,
  83. };
  84. /**
  85. * Called from bfa_ioc_attach() to map asic specific calls.
  86. */
  87. void
  88. bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
  89. {
  90. ioc->ioc_hwif = &nw_hwif_ct;
  91. }
  92. void
  93. bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc)
  94. {
  95. ioc->ioc_hwif = &nw_hwif_ct2;
  96. }
  97. /**
  98. * Return true if firmware of current driver matches the running firmware.
  99. */
  100. static bool
  101. bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
  102. {
  103. enum bfi_ioc_state ioc_fwstate;
  104. u32 usecnt;
  105. struct bfi_ioc_image_hdr fwhdr;
  106. /**
  107. * If bios boot (flash based) -- do not increment usage count
  108. */
  109. if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
  110. BFA_IOC_FWIMG_MINSZ)
  111. return true;
  112. bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  113. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  114. /**
  115. * If usage count is 0, always return TRUE.
  116. */
  117. if (usecnt == 0) {
  118. writel(1, ioc->ioc_regs.ioc_usage_reg);
  119. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  120. writel(0, ioc->ioc_regs.ioc_fail_sync);
  121. return true;
  122. }
  123. ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  124. /**
  125. * Use count cannot be non-zero and chip in uninitialized state.
  126. */
  127. BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
  128. /**
  129. * Check if another driver with a different firmware is active
  130. */
  131. bfa_nw_ioc_fwver_get(ioc, &fwhdr);
  132. if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
  133. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  134. return false;
  135. }
  136. /**
  137. * Same firmware version. Increment the reference count.
  138. */
  139. usecnt++;
  140. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  141. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  142. return true;
  143. }
  144. static void
  145. bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
  146. {
  147. u32 usecnt;
  148. /**
  149. * If bios boot (flash based) -- do not decrement usage count
  150. */
  151. if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
  152. BFA_IOC_FWIMG_MINSZ)
  153. return;
  154. /**
  155. * decrement usage count
  156. */
  157. bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  158. usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
  159. BUG_ON(!(usecnt > 0));
  160. usecnt--;
  161. writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
  162. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  163. }
  164. /**
  165. * Notify other functions on HB failure.
  166. */
  167. static void
  168. bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
  169. {
  170. writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
  171. writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
  172. /* Wait for halt to take effect */
  173. readl(ioc->ioc_regs.ll_halt);
  174. readl(ioc->ioc_regs.alt_ll_halt);
  175. }
  176. /**
  177. * Host to LPU mailbox message addresses
  178. */
  179. static const struct {
  180. u32 hfn_mbox;
  181. u32 lpu_mbox;
  182. u32 hfn_pgn;
  183. } ct_fnreg[] = {
  184. { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
  185. { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
  186. { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
  187. { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
  188. };
  189. /**
  190. * Host <-> LPU mailbox command/status registers - port 0
  191. */
  192. static const struct {
  193. u32 hfn;
  194. u32 lpu;
  195. } ct_p0reg[] = {
  196. { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
  197. { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
  198. { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
  199. { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
  200. };
  201. /**
  202. * Host <-> LPU mailbox command/status registers - port 1
  203. */
  204. static const struct {
  205. u32 hfn;
  206. u32 lpu;
  207. } ct_p1reg[] = {
  208. { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
  209. { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
  210. { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
  211. { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
  212. };
  213. static const struct {
  214. u32 hfn_mbox;
  215. u32 lpu_mbox;
  216. u32 hfn_pgn;
  217. u32 hfn;
  218. u32 lpu;
  219. u32 lpu_read;
  220. } ct2_reg[] = {
  221. { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
  222. CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
  223. CT2_HOSTFN_LPU0_READ_STAT},
  224. { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
  225. CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
  226. CT2_HOSTFN_LPU1_READ_STAT},
  227. };
  228. static void
  229. bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
  230. {
  231. void __iomem *rb;
  232. int pcifn = bfa_ioc_pcifn(ioc);
  233. rb = bfa_ioc_bar0(ioc);
  234. ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
  235. ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
  236. ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
  237. if (ioc->port_id == 0) {
  238. ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
  239. ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  240. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
  241. ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
  242. ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
  243. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
  244. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
  245. } else {
  246. ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG;
  247. ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG;
  248. ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  249. ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
  250. ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
  251. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
  252. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
  253. }
  254. /*
  255. * PSS control registers
  256. */
  257. ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
  258. ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
  259. ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG;
  260. ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG;
  261. /*
  262. * IOC semaphore registers and serialization
  263. */
  264. ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG;
  265. ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG;
  266. ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG;
  267. ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT;
  268. ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC;
  269. /**
  270. * sram memory access
  271. */
  272. ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
  273. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
  274. /*
  275. * err set reg : for notification of hb failure in fcmode
  276. */
  277. ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
  278. }
  279. static void
  280. bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc)
  281. {
  282. void __iomem *rb;
  283. int port = bfa_ioc_portid(ioc);
  284. rb = bfa_ioc_bar0(ioc);
  285. ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
  286. ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
  287. ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
  288. ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
  289. ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
  290. ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
  291. if (port == 0) {
  292. ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
  293. ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
  294. ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
  295. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
  296. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
  297. } else {
  298. ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG;
  299. ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
  300. ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
  301. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
  302. ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
  303. }
  304. /*
  305. * PSS control registers
  306. */
  307. ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
  308. ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
  309. ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG;
  310. ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG;
  311. /*
  312. * IOC semaphore registers and serialization
  313. */
  314. ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG;
  315. ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG;
  316. ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG;
  317. ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT;
  318. ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC;
  319. /**
  320. * sram memory access
  321. */
  322. ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
  323. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
  324. /*
  325. * err set reg : for notification of hb failure in fcmode
  326. */
  327. ioc->ioc_regs.err_set = rb + ERR_SET_REG;
  328. }
  329. /**
  330. * Initialize IOC to port mapping.
  331. */
  332. #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
  333. static void
  334. bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
  335. {
  336. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  337. u32 r32;
  338. /**
  339. * For catapult, base port id on personality register and IOC type
  340. */
  341. r32 = readl(rb + FNC_PERS_REG);
  342. r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
  343. ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
  344. }
  345. static void
  346. bfa_ioc_ct2_map_port(struct bfa_ioc *ioc)
  347. {
  348. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  349. u32 r32;
  350. r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
  351. ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
  352. }
  353. /**
  354. * Set interrupt mode for a function: INTX or MSIX
  355. */
  356. static void
  357. bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
  358. {
  359. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  360. u32 r32, mode;
  361. r32 = readl(rb + FNC_PERS_REG);
  362. mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
  363. __F0_INTX_STATUS;
  364. /**
  365. * If already in desired mode, do not change anything
  366. */
  367. if ((!msix && mode) || (msix && !mode))
  368. return;
  369. if (msix)
  370. mode = __F0_INTX_STATUS_MSIX;
  371. else
  372. mode = __F0_INTX_STATUS_INTA;
  373. r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  374. r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  375. writel(r32, rb + FNC_PERS_REG);
  376. }
  377. static bool
  378. bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc)
  379. {
  380. u32 r32;
  381. r32 = readl(ioc->ioc_regs.lpu_read_stat);
  382. if (r32) {
  383. writel(1, ioc->ioc_regs.lpu_read_stat);
  384. return true;
  385. }
  386. return false;
  387. }
  388. /**
  389. * MSI-X resource allocation for 1860 with no asic block
  390. */
  391. #define HOSTFN_MSIX_DEFAULT 64
  392. #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
  393. #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
  394. #define __MSIX_VT_NUMVT__MK 0x003ff800
  395. #define __MSIX_VT_NUMVT__SH 11
  396. #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
  397. #define __MSIX_VT_OFST_ 0x000007ff
  398. void
  399. bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc)
  400. {
  401. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  402. u32 r32;
  403. r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
  404. if (r32 & __MSIX_VT_NUMVT__MK) {
  405. writel(r32 & __MSIX_VT_OFST_,
  406. rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
  407. return;
  408. }
  409. writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
  410. HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
  411. rb + HOSTFN_MSIX_VT_OFST_NUMVT);
  412. writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
  413. rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
  414. }
  415. /**
  416. * Cleanup hw semaphore and usecnt registers
  417. */
  418. static void
  419. bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
  420. {
  421. bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
  422. writel(0, ioc->ioc_regs.ioc_usage_reg);
  423. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
  424. /*
  425. * Read the hw sem reg to make sure that it is locked
  426. * before we clear it. If it is not locked, writing 1
  427. * will lock it instead of clearing it.
  428. */
  429. readl(ioc->ioc_regs.ioc_sem_reg);
  430. bfa_nw_ioc_hw_sem_release(ioc);
  431. }
  432. /**
  433. * Synchronized IOC failure processing routines
  434. */
  435. static bool
  436. bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
  437. {
  438. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  439. u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
  440. /*
  441. * Driver load time. If the sync required bit for this PCI fn
  442. * is set, it is due to an unclean exit by the driver for this
  443. * PCI fn in the previous incarnation. Whoever comes here first
  444. * should clean it up, no matter which PCI fn.
  445. */
  446. if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
  447. writel(0, ioc->ioc_regs.ioc_fail_sync);
  448. writel(1, ioc->ioc_regs.ioc_usage_reg);
  449. writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
  450. writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
  451. return true;
  452. }
  453. return bfa_ioc_ct_sync_complete(ioc);
  454. }
  455. /**
  456. * Synchronized IOC failure processing routines
  457. */
  458. static void
  459. bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
  460. {
  461. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  462. u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
  463. writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
  464. }
  465. static void
  466. bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
  467. {
  468. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  469. u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
  470. bfa_ioc_ct_sync_pos(ioc);
  471. writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
  472. }
  473. static void
  474. bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
  475. {
  476. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  477. writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
  478. }
  479. static bool
  480. bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
  481. {
  482. u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
  483. u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
  484. u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
  485. u32 tmp_ackd;
  486. if (sync_ackd == 0)
  487. return true;
  488. /**
  489. * The check below is to see whether any other PCI fn
  490. * has reinitialized the ASIC (reset sync_ackd bits)
  491. * and failed again while this IOC was waiting for hw
  492. * semaphore (in bfa_iocpf_sm_semwait()).
  493. */
  494. tmp_ackd = sync_ackd;
  495. if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
  496. !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
  497. sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
  498. if (sync_reqd == sync_ackd) {
  499. writel(bfa_ioc_ct_clear_sync_ackd(r32),
  500. ioc->ioc_regs.ioc_fail_sync);
  501. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  502. writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
  503. return true;
  504. }
  505. /**
  506. * If another PCI fn reinitialized and failed again while
  507. * this IOC was waiting for hw sem, the sync_ackd bit for
  508. * this IOC need to be set again to allow reinitialization.
  509. */
  510. if (tmp_ackd != sync_ackd)
  511. writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
  512. return false;
  513. }
  514. static enum bfa_status
  515. bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
  516. {
  517. u32 pll_sclk, pll_fclk, r32;
  518. bool fcmode = (asic_mode == BFI_ASIC_MODE_FC);
  519. pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
  520. __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
  521. __APP_PLL_SCLK_JITLMT0_1(3U) |
  522. __APP_PLL_SCLK_CNTLMT0_1(1U);
  523. pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
  524. __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
  525. __APP_PLL_LCLK_JITLMT0_1(3U) |
  526. __APP_PLL_LCLK_CNTLMT0_1(1U);
  527. if (fcmode) {
  528. writel(0, (rb + OP_MODE));
  529. writel(__APP_EMS_CMLCKSEL |
  530. __APP_EMS_REFCKBUFEN2 |
  531. __APP_EMS_CHANNEL_SEL,
  532. (rb + ETH_MAC_SER_REG));
  533. } else {
  534. writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
  535. writel(__APP_EMS_REFCKBUFEN1,
  536. (rb + ETH_MAC_SER_REG));
  537. }
  538. writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
  539. writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
  540. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  541. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  542. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  543. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  544. writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
  545. writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
  546. writel(pll_sclk |
  547. __APP_PLL_SCLK_LOGIC_SOFT_RESET,
  548. rb + APP_PLL_SCLK_CTL_REG);
  549. writel(pll_fclk |
  550. __APP_PLL_LCLK_LOGIC_SOFT_RESET,
  551. rb + APP_PLL_LCLK_CTL_REG);
  552. writel(pll_sclk |
  553. __APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE,
  554. rb + APP_PLL_SCLK_CTL_REG);
  555. writel(pll_fclk |
  556. __APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE,
  557. rb + APP_PLL_LCLK_CTL_REG);
  558. readl(rb + HOSTFN0_INT_MSK);
  559. udelay(2000);
  560. writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
  561. writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
  562. writel(pll_sclk |
  563. __APP_PLL_SCLK_ENABLE,
  564. rb + APP_PLL_SCLK_CTL_REG);
  565. writel(pll_fclk |
  566. __APP_PLL_LCLK_ENABLE,
  567. rb + APP_PLL_LCLK_CTL_REG);
  568. if (!fcmode) {
  569. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
  570. writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
  571. }
  572. r32 = readl((rb + PSS_CTL_REG));
  573. r32 &= ~__PSS_LMEM_RESET;
  574. writel(r32, (rb + PSS_CTL_REG));
  575. udelay(1000);
  576. if (!fcmode) {
  577. writel(0, (rb + PMM_1T_RESET_REG_P0));
  578. writel(0, (rb + PMM_1T_RESET_REG_P1));
  579. }
  580. writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
  581. udelay(1000);
  582. r32 = readl((rb + MBIST_STAT_REG));
  583. writel(0, (rb + MBIST_CTL_REG));
  584. return BFA_STATUS_OK;
  585. }
  586. static void
  587. bfa_ioc_ct2_sclk_init(void __iomem *rb)
  588. {
  589. u32 r32;
  590. /*
  591. * put s_clk PLL and PLL FSM in reset
  592. */
  593. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  594. r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
  595. r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
  596. __APP_PLL_SCLK_LOGIC_SOFT_RESET);
  597. writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  598. /*
  599. * Ignore mode and program for the max clock (which is FC16)
  600. * Firmware/NFC will do the PLL init appropiately
  601. */
  602. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  603. r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
  604. writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  605. /*
  606. * while doing PLL init dont clock gate ethernet subsystem
  607. */
  608. r32 = readl((rb + CT2_CHIP_MISC_PRG));
  609. writel((r32 | __ETH_CLK_ENABLE_PORT0),
  610. (rb + CT2_CHIP_MISC_PRG));
  611. r32 = readl((rb + CT2_PCIE_MISC_REG));
  612. writel((r32 | __ETH_CLK_ENABLE_PORT1),
  613. (rb + CT2_PCIE_MISC_REG));
  614. /*
  615. * set sclk value
  616. */
  617. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  618. r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
  619. __APP_PLL_SCLK_CLK_DIV2);
  620. writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
  621. /*
  622. * poll for s_clk lock or delay 1ms
  623. */
  624. udelay(1000);
  625. /*
  626. * Dont do clock gating for ethernet subsystem, firmware/NFC will
  627. * do this appropriately
  628. */
  629. }
  630. static void
  631. bfa_ioc_ct2_lclk_init(void __iomem *rb)
  632. {
  633. u32 r32;
  634. /*
  635. * put l_clk PLL and PLL FSM in reset
  636. */
  637. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  638. r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
  639. r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
  640. __APP_PLL_LCLK_LOGIC_SOFT_RESET);
  641. writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
  642. /*
  643. * set LPU speed (set for FC16 which will work for other modes)
  644. */
  645. r32 = readl((rb + CT2_CHIP_MISC_PRG));
  646. writel(r32, (rb + CT2_CHIP_MISC_PRG));
  647. /*
  648. * set LPU half speed (set for FC16 which will work for other modes)
  649. */
  650. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  651. writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
  652. /*
  653. * set lclk for mode (set for FC16)
  654. */
  655. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  656. r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
  657. r32 |= 0x20c1731b;
  658. writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
  659. /*
  660. * poll for s_clk lock or delay 1ms
  661. */
  662. udelay(1000);
  663. }
  664. static void
  665. bfa_ioc_ct2_mem_init(void __iomem *rb)
  666. {
  667. u32 r32;
  668. r32 = readl((rb + PSS_CTL_REG));
  669. r32 &= ~__PSS_LMEM_RESET;
  670. writel(r32, (rb + PSS_CTL_REG));
  671. udelay(1000);
  672. writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
  673. udelay(1000);
  674. writel(0, (rb + CT2_MBIST_CTL_REG));
  675. }
  676. static void
  677. bfa_ioc_ct2_mac_reset(void __iomem *rb)
  678. {
  679. volatile u32 r32;
  680. bfa_ioc_ct2_sclk_init(rb);
  681. bfa_ioc_ct2_lclk_init(rb);
  682. /*
  683. * release soft reset on s_clk & l_clk
  684. */
  685. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  686. writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
  687. (rb + CT2_APP_PLL_SCLK_CTL_REG));
  688. /*
  689. * release soft reset on s_clk & l_clk
  690. */
  691. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  692. writel((r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET),
  693. (rb + CT2_APP_PLL_LCLK_CTL_REG));
  694. /* put port0, port1 MAC & AHB in reset */
  695. writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
  696. (rb + CT2_CSI_MAC_CONTROL_REG(0)));
  697. writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
  698. (rb + CT2_CSI_MAC_CONTROL_REG(1)));
  699. }
  700. #define CT2_NFC_MAX_DELAY 1000
  701. static enum bfa_status
  702. bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
  703. {
  704. volatile u32 wgn, r32;
  705. int i;
  706. /*
  707. * Initialize PLL if not already done by NFC
  708. */
  709. wgn = readl(rb + CT2_WGN_STATUS);
  710. if (!(wgn & __GLBL_PF_VF_CFG_RDY)) {
  711. writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG));
  712. for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
  713. r32 = readl(rb + CT2_NFC_CSR_SET_REG);
  714. if (r32 & __NFC_CONTROLLER_HALTED)
  715. break;
  716. udelay(1000);
  717. }
  718. }
  719. /*
  720. * Mask the interrupts and clear any
  721. * pending interrupts left by BIOS/EFI
  722. */
  723. writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
  724. writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
  725. r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
  726. if (r32 == 1) {
  727. writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
  728. readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
  729. }
  730. r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
  731. if (r32 == 1) {
  732. writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
  733. readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
  734. }
  735. bfa_ioc_ct2_mac_reset(rb);
  736. bfa_ioc_ct2_sclk_init(rb);
  737. bfa_ioc_ct2_lclk_init(rb);
  738. /*
  739. * release soft reset on s_clk & l_clk
  740. */
  741. r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
  742. writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
  743. (rb + CT2_APP_PLL_SCLK_CTL_REG));
  744. /*
  745. * release soft reset on s_clk & l_clk
  746. */
  747. r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
  748. writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
  749. (rb + CT2_APP_PLL_LCLK_CTL_REG));
  750. /*
  751. * Announce flash device presence, if flash was corrupted.
  752. */
  753. if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
  754. r32 = readl((rb + PSS_GPIO_OUT_REG));
  755. writel((r32 & ~1), (rb + PSS_GPIO_OUT_REG));
  756. r32 = readl((rb + PSS_GPIO_OE_REG));
  757. writel((r32 | 1), (rb + PSS_GPIO_OE_REG));
  758. }
  759. bfa_ioc_ct2_mem_init(rb);
  760. writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
  761. writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
  762. return BFA_STATUS_OK;
  763. }