bfa_core.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include "bfad_drv.h"
  18. #include "bfa_modules.h"
  19. #include "bfi_reg.h"
  20. BFA_TRC_FILE(HAL, CORE);
  21. /*
  22. * BFA module list terminated by NULL
  23. */
  24. static struct bfa_module_s *hal_mods[] = {
  25. &hal_mod_fcdiag,
  26. &hal_mod_sgpg,
  27. &hal_mod_fcport,
  28. &hal_mod_fcxp,
  29. &hal_mod_lps,
  30. &hal_mod_uf,
  31. &hal_mod_rport,
  32. &hal_mod_fcp,
  33. &hal_mod_dconf,
  34. NULL
  35. };
  36. /*
  37. * Message handlers for various modules.
  38. */
  39. static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
  40. bfa_isr_unhandled, /* NONE */
  41. bfa_isr_unhandled, /* BFI_MC_IOC */
  42. bfa_fcdiag_intr, /* BFI_MC_DIAG */
  43. bfa_isr_unhandled, /* BFI_MC_FLASH */
  44. bfa_isr_unhandled, /* BFI_MC_CEE */
  45. bfa_fcport_isr, /* BFI_MC_FCPORT */
  46. bfa_isr_unhandled, /* BFI_MC_IOCFC */
  47. bfa_isr_unhandled, /* BFI_MC_LL */
  48. bfa_uf_isr, /* BFI_MC_UF */
  49. bfa_fcxp_isr, /* BFI_MC_FCXP */
  50. bfa_lps_isr, /* BFI_MC_LPS */
  51. bfa_rport_isr, /* BFI_MC_RPORT */
  52. bfa_itn_isr, /* BFI_MC_ITN */
  53. bfa_isr_unhandled, /* BFI_MC_IOIM_READ */
  54. bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */
  55. bfa_isr_unhandled, /* BFI_MC_IOIM_IO */
  56. bfa_ioim_isr, /* BFI_MC_IOIM */
  57. bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */
  58. bfa_tskim_isr, /* BFI_MC_TSKIM */
  59. bfa_isr_unhandled, /* BFI_MC_SBOOT */
  60. bfa_isr_unhandled, /* BFI_MC_IPFC */
  61. bfa_isr_unhandled, /* BFI_MC_PORT */
  62. bfa_isr_unhandled, /* --------- */
  63. bfa_isr_unhandled, /* --------- */
  64. bfa_isr_unhandled, /* --------- */
  65. bfa_isr_unhandled, /* --------- */
  66. bfa_isr_unhandled, /* --------- */
  67. bfa_isr_unhandled, /* --------- */
  68. bfa_isr_unhandled, /* --------- */
  69. bfa_isr_unhandled, /* --------- */
  70. bfa_isr_unhandled, /* --------- */
  71. bfa_isr_unhandled, /* --------- */
  72. };
  73. /*
  74. * Message handlers for mailbox command classes
  75. */
  76. static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
  77. NULL,
  78. NULL, /* BFI_MC_IOC */
  79. NULL, /* BFI_MC_DIAG */
  80. NULL, /* BFI_MC_FLASH */
  81. NULL, /* BFI_MC_CEE */
  82. NULL, /* BFI_MC_PORT */
  83. bfa_iocfc_isr, /* BFI_MC_IOCFC */
  84. NULL,
  85. };
  86. static void
  87. bfa_com_port_attach(struct bfa_s *bfa)
  88. {
  89. struct bfa_port_s *port = &bfa->modules.port;
  90. struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
  91. bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
  92. bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp);
  93. }
  94. /*
  95. * ablk module attach
  96. */
  97. static void
  98. bfa_com_ablk_attach(struct bfa_s *bfa)
  99. {
  100. struct bfa_ablk_s *ablk = &bfa->modules.ablk;
  101. struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
  102. bfa_ablk_attach(ablk, &bfa->ioc);
  103. bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp);
  104. }
  105. static void
  106. bfa_com_cee_attach(struct bfa_s *bfa)
  107. {
  108. struct bfa_cee_s *cee = &bfa->modules.cee;
  109. struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
  110. cee->trcmod = bfa->trcmod;
  111. bfa_cee_attach(cee, &bfa->ioc, bfa);
  112. bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp);
  113. }
  114. static void
  115. bfa_com_sfp_attach(struct bfa_s *bfa)
  116. {
  117. struct bfa_sfp_s *sfp = BFA_SFP_MOD(bfa);
  118. struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
  119. bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod);
  120. bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp);
  121. }
  122. static void
  123. bfa_com_flash_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
  124. {
  125. struct bfa_flash_s *flash = BFA_FLASH(bfa);
  126. struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
  127. bfa_flash_attach(flash, &bfa->ioc, bfa, bfa->trcmod, mincfg);
  128. bfa_flash_memclaim(flash, flash_dma->kva_curp,
  129. flash_dma->dma_curp, mincfg);
  130. }
  131. static void
  132. bfa_com_diag_attach(struct bfa_s *bfa)
  133. {
  134. struct bfa_diag_s *diag = BFA_DIAG_MOD(bfa);
  135. struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
  136. bfa_diag_attach(diag, &bfa->ioc, bfa, bfa_fcport_beacon, bfa->trcmod);
  137. bfa_diag_memclaim(diag, diag_dma->kva_curp, diag_dma->dma_curp);
  138. }
  139. static void
  140. bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
  141. {
  142. struct bfa_phy_s *phy = BFA_PHY(bfa);
  143. struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
  144. bfa_phy_attach(phy, &bfa->ioc, bfa, bfa->trcmod, mincfg);
  145. bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg);
  146. }
  147. /*
  148. * BFA IOC FC related definitions
  149. */
  150. /*
  151. * IOC local definitions
  152. */
  153. #define BFA_IOCFC_TOV 5000 /* msecs */
  154. enum {
  155. BFA_IOCFC_ACT_NONE = 0,
  156. BFA_IOCFC_ACT_INIT = 1,
  157. BFA_IOCFC_ACT_STOP = 2,
  158. BFA_IOCFC_ACT_DISABLE = 3,
  159. BFA_IOCFC_ACT_ENABLE = 4,
  160. };
  161. #define DEF_CFG_NUM_FABRICS 1
  162. #define DEF_CFG_NUM_LPORTS 256
  163. #define DEF_CFG_NUM_CQS 4
  164. #define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
  165. #define DEF_CFG_NUM_TSKIM_REQS 128
  166. #define DEF_CFG_NUM_FCXP_REQS 64
  167. #define DEF_CFG_NUM_UF_BUFS 64
  168. #define DEF_CFG_NUM_RPORTS 1024
  169. #define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
  170. #define DEF_CFG_NUM_TINS 256
  171. #define DEF_CFG_NUM_SGPGS 2048
  172. #define DEF_CFG_NUM_REQQ_ELEMS 256
  173. #define DEF_CFG_NUM_RSPQ_ELEMS 64
  174. #define DEF_CFG_NUM_SBOOT_TGTS 16
  175. #define DEF_CFG_NUM_SBOOT_LUNS 16
  176. /*
  177. * IOCFC state machine definitions/declarations
  178. */
  179. bfa_fsm_state_decl(bfa_iocfc, stopped, struct bfa_iocfc_s, enum iocfc_event);
  180. bfa_fsm_state_decl(bfa_iocfc, initing, struct bfa_iocfc_s, enum iocfc_event);
  181. bfa_fsm_state_decl(bfa_iocfc, dconf_read, struct bfa_iocfc_s, enum iocfc_event);
  182. bfa_fsm_state_decl(bfa_iocfc, init_cfg_wait,
  183. struct bfa_iocfc_s, enum iocfc_event);
  184. bfa_fsm_state_decl(bfa_iocfc, init_cfg_done,
  185. struct bfa_iocfc_s, enum iocfc_event);
  186. bfa_fsm_state_decl(bfa_iocfc, operational,
  187. struct bfa_iocfc_s, enum iocfc_event);
  188. bfa_fsm_state_decl(bfa_iocfc, dconf_write,
  189. struct bfa_iocfc_s, enum iocfc_event);
  190. bfa_fsm_state_decl(bfa_iocfc, stopping, struct bfa_iocfc_s, enum iocfc_event);
  191. bfa_fsm_state_decl(bfa_iocfc, enabling, struct bfa_iocfc_s, enum iocfc_event);
  192. bfa_fsm_state_decl(bfa_iocfc, cfg_wait, struct bfa_iocfc_s, enum iocfc_event);
  193. bfa_fsm_state_decl(bfa_iocfc, disabling, struct bfa_iocfc_s, enum iocfc_event);
  194. bfa_fsm_state_decl(bfa_iocfc, disabled, struct bfa_iocfc_s, enum iocfc_event);
  195. bfa_fsm_state_decl(bfa_iocfc, failed, struct bfa_iocfc_s, enum iocfc_event);
  196. bfa_fsm_state_decl(bfa_iocfc, init_failed,
  197. struct bfa_iocfc_s, enum iocfc_event);
  198. /*
  199. * forward declaration for IOC FC functions
  200. */
  201. static void bfa_iocfc_start_submod(struct bfa_s *bfa);
  202. static void bfa_iocfc_disable_submod(struct bfa_s *bfa);
  203. static void bfa_iocfc_send_cfg(void *bfa_arg);
  204. static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
  205. static void bfa_iocfc_disable_cbfn(void *bfa_arg);
  206. static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
  207. static void bfa_iocfc_reset_cbfn(void *bfa_arg);
  208. static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
  209. static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete);
  210. static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl);
  211. static void bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl);
  212. static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl);
  213. static void
  214. bfa_iocfc_sm_stopped_entry(struct bfa_iocfc_s *iocfc)
  215. {
  216. }
  217. static void
  218. bfa_iocfc_sm_stopped(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  219. {
  220. bfa_trc(iocfc->bfa, event);
  221. switch (event) {
  222. case IOCFC_E_INIT:
  223. case IOCFC_E_ENABLE:
  224. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_initing);
  225. break;
  226. default:
  227. bfa_sm_fault(iocfc->bfa, event);
  228. break;
  229. }
  230. }
  231. static void
  232. bfa_iocfc_sm_initing_entry(struct bfa_iocfc_s *iocfc)
  233. {
  234. bfa_ioc_enable(&iocfc->bfa->ioc);
  235. }
  236. static void
  237. bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  238. {
  239. bfa_trc(iocfc->bfa, event);
  240. switch (event) {
  241. case IOCFC_E_IOC_ENABLED:
  242. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
  243. break;
  244. case IOCFC_E_IOC_FAILED:
  245. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
  246. break;
  247. default:
  248. bfa_sm_fault(iocfc->bfa, event);
  249. break;
  250. }
  251. }
  252. static void
  253. bfa_iocfc_sm_dconf_read_entry(struct bfa_iocfc_s *iocfc)
  254. {
  255. bfa_dconf_modinit(iocfc->bfa);
  256. }
  257. static void
  258. bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  259. {
  260. bfa_trc(iocfc->bfa, event);
  261. switch (event) {
  262. case IOCFC_E_DCONF_DONE:
  263. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait);
  264. break;
  265. case IOCFC_E_IOC_FAILED:
  266. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
  267. break;
  268. default:
  269. bfa_sm_fault(iocfc->bfa, event);
  270. break;
  271. }
  272. }
  273. static void
  274. bfa_iocfc_sm_init_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
  275. {
  276. bfa_iocfc_send_cfg(iocfc->bfa);
  277. }
  278. static void
  279. bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  280. {
  281. bfa_trc(iocfc->bfa, event);
  282. switch (event) {
  283. case IOCFC_E_CFG_DONE:
  284. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done);
  285. break;
  286. case IOCFC_E_IOC_FAILED:
  287. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
  288. break;
  289. default:
  290. bfa_sm_fault(iocfc->bfa, event);
  291. break;
  292. }
  293. }
  294. static void
  295. bfa_iocfc_sm_init_cfg_done_entry(struct bfa_iocfc_s *iocfc)
  296. {
  297. iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
  298. bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
  299. bfa_iocfc_init_cb, iocfc->bfa);
  300. }
  301. static void
  302. bfa_iocfc_sm_init_cfg_done(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  303. {
  304. bfa_trc(iocfc->bfa, event);
  305. switch (event) {
  306. case IOCFC_E_START:
  307. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
  308. break;
  309. case IOCFC_E_STOP:
  310. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
  311. break;
  312. case IOCFC_E_DISABLE:
  313. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
  314. break;
  315. case IOCFC_E_IOC_FAILED:
  316. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
  317. break;
  318. default:
  319. bfa_sm_fault(iocfc->bfa, event);
  320. break;
  321. }
  322. }
  323. static void
  324. bfa_iocfc_sm_operational_entry(struct bfa_iocfc_s *iocfc)
  325. {
  326. bfa_fcport_init(iocfc->bfa);
  327. bfa_iocfc_start_submod(iocfc->bfa);
  328. }
  329. static void
  330. bfa_iocfc_sm_operational(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  331. {
  332. bfa_trc(iocfc->bfa, event);
  333. switch (event) {
  334. case IOCFC_E_STOP:
  335. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
  336. break;
  337. case IOCFC_E_DISABLE:
  338. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
  339. break;
  340. case IOCFC_E_IOC_FAILED:
  341. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
  342. break;
  343. default:
  344. bfa_sm_fault(iocfc->bfa, event);
  345. break;
  346. }
  347. }
  348. static void
  349. bfa_iocfc_sm_dconf_write_entry(struct bfa_iocfc_s *iocfc)
  350. {
  351. bfa_dconf_modexit(iocfc->bfa);
  352. }
  353. static void
  354. bfa_iocfc_sm_dconf_write(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  355. {
  356. bfa_trc(iocfc->bfa, event);
  357. switch (event) {
  358. case IOCFC_E_DCONF_DONE:
  359. case IOCFC_E_IOC_FAILED:
  360. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
  361. break;
  362. default:
  363. bfa_sm_fault(iocfc->bfa, event);
  364. break;
  365. }
  366. }
  367. static void
  368. bfa_iocfc_sm_stopping_entry(struct bfa_iocfc_s *iocfc)
  369. {
  370. bfa_ioc_disable(&iocfc->bfa->ioc);
  371. }
  372. static void
  373. bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  374. {
  375. bfa_trc(iocfc->bfa, event);
  376. switch (event) {
  377. case IOCFC_E_IOC_DISABLED:
  378. bfa_isr_disable(iocfc->bfa);
  379. bfa_iocfc_disable_submod(iocfc->bfa);
  380. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
  381. iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
  382. bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe,
  383. bfa_iocfc_stop_cb, iocfc->bfa);
  384. break;
  385. default:
  386. bfa_sm_fault(iocfc->bfa, event);
  387. break;
  388. }
  389. }
  390. static void
  391. bfa_iocfc_sm_enabling_entry(struct bfa_iocfc_s *iocfc)
  392. {
  393. bfa_ioc_enable(&iocfc->bfa->ioc);
  394. }
  395. static void
  396. bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  397. {
  398. bfa_trc(iocfc->bfa, event);
  399. switch (event) {
  400. case IOCFC_E_IOC_ENABLED:
  401. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
  402. break;
  403. case IOCFC_E_IOC_FAILED:
  404. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
  405. if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
  406. break;
  407. iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
  408. bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
  409. bfa_iocfc_enable_cb, iocfc->bfa);
  410. iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
  411. break;
  412. default:
  413. bfa_sm_fault(iocfc->bfa, event);
  414. break;
  415. }
  416. }
  417. static void
  418. bfa_iocfc_sm_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
  419. {
  420. bfa_iocfc_send_cfg(iocfc->bfa);
  421. }
  422. static void
  423. bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  424. {
  425. bfa_trc(iocfc->bfa, event);
  426. switch (event) {
  427. case IOCFC_E_CFG_DONE:
  428. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
  429. if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
  430. break;
  431. iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
  432. bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
  433. bfa_iocfc_enable_cb, iocfc->bfa);
  434. iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
  435. break;
  436. case IOCFC_E_IOC_FAILED:
  437. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
  438. if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
  439. break;
  440. iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
  441. bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
  442. bfa_iocfc_enable_cb, iocfc->bfa);
  443. iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
  444. break;
  445. default:
  446. bfa_sm_fault(iocfc->bfa, event);
  447. break;
  448. }
  449. }
  450. static void
  451. bfa_iocfc_sm_disabling_entry(struct bfa_iocfc_s *iocfc)
  452. {
  453. bfa_ioc_disable(&iocfc->bfa->ioc);
  454. }
  455. static void
  456. bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  457. {
  458. bfa_trc(iocfc->bfa, event);
  459. switch (event) {
  460. case IOCFC_E_IOC_DISABLED:
  461. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled);
  462. break;
  463. default:
  464. bfa_sm_fault(iocfc->bfa, event);
  465. break;
  466. }
  467. }
  468. static void
  469. bfa_iocfc_sm_disabled_entry(struct bfa_iocfc_s *iocfc)
  470. {
  471. bfa_isr_disable(iocfc->bfa);
  472. bfa_iocfc_disable_submod(iocfc->bfa);
  473. iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
  474. bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
  475. bfa_iocfc_disable_cb, iocfc->bfa);
  476. }
  477. static void
  478. bfa_iocfc_sm_disabled(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  479. {
  480. bfa_trc(iocfc->bfa, event);
  481. switch (event) {
  482. case IOCFC_E_STOP:
  483. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
  484. break;
  485. case IOCFC_E_ENABLE:
  486. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_enabling);
  487. break;
  488. default:
  489. bfa_sm_fault(iocfc->bfa, event);
  490. break;
  491. }
  492. }
  493. static void
  494. bfa_iocfc_sm_failed_entry(struct bfa_iocfc_s *iocfc)
  495. {
  496. bfa_isr_disable(iocfc->bfa);
  497. bfa_iocfc_disable_submod(iocfc->bfa);
  498. }
  499. static void
  500. bfa_iocfc_sm_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  501. {
  502. bfa_trc(iocfc->bfa, event);
  503. switch (event) {
  504. case IOCFC_E_STOP:
  505. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
  506. break;
  507. case IOCFC_E_DISABLE:
  508. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
  509. break;
  510. case IOCFC_E_IOC_ENABLED:
  511. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
  512. break;
  513. case IOCFC_E_IOC_FAILED:
  514. break;
  515. default:
  516. bfa_sm_fault(iocfc->bfa, event);
  517. break;
  518. }
  519. }
  520. static void
  521. bfa_iocfc_sm_init_failed_entry(struct bfa_iocfc_s *iocfc)
  522. {
  523. bfa_isr_disable(iocfc->bfa);
  524. iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
  525. bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
  526. bfa_iocfc_init_cb, iocfc->bfa);
  527. }
  528. static void
  529. bfa_iocfc_sm_init_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  530. {
  531. bfa_trc(iocfc->bfa, event);
  532. switch (event) {
  533. case IOCFC_E_STOP:
  534. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
  535. break;
  536. case IOCFC_E_DISABLE:
  537. bfa_ioc_disable(&iocfc->bfa->ioc);
  538. break;
  539. case IOCFC_E_IOC_ENABLED:
  540. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
  541. break;
  542. case IOCFC_E_IOC_DISABLED:
  543. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
  544. iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
  545. bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
  546. bfa_iocfc_disable_cb, iocfc->bfa);
  547. break;
  548. case IOCFC_E_IOC_FAILED:
  549. break;
  550. default:
  551. bfa_sm_fault(iocfc->bfa, event);
  552. break;
  553. }
  554. }
  555. /*
  556. * BFA Interrupt handling functions
  557. */
  558. static void
  559. bfa_reqq_resume(struct bfa_s *bfa, int qid)
  560. {
  561. struct list_head *waitq, *qe, *qen;
  562. struct bfa_reqq_wait_s *wqe;
  563. waitq = bfa_reqq(bfa, qid);
  564. list_for_each_safe(qe, qen, waitq) {
  565. /*
  566. * Callback only as long as there is room in request queue
  567. */
  568. if (bfa_reqq_full(bfa, qid))
  569. break;
  570. list_del(qe);
  571. wqe = (struct bfa_reqq_wait_s *) qe;
  572. wqe->qresume(wqe->cbarg);
  573. }
  574. }
  575. bfa_boolean_t
  576. bfa_isr_rspq(struct bfa_s *bfa, int qid)
  577. {
  578. struct bfi_msg_s *m;
  579. u32 pi, ci;
  580. struct list_head *waitq;
  581. bfa_boolean_t ret;
  582. ci = bfa_rspq_ci(bfa, qid);
  583. pi = bfa_rspq_pi(bfa, qid);
  584. ret = (ci != pi);
  585. while (ci != pi) {
  586. m = bfa_rspq_elem(bfa, qid, ci);
  587. WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
  588. bfa_isrs[m->mhdr.msg_class] (bfa, m);
  589. CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
  590. }
  591. /*
  592. * acknowledge RME completions and update CI
  593. */
  594. bfa_isr_rspq_ack(bfa, qid, ci);
  595. /*
  596. * Resume any pending requests in the corresponding reqq.
  597. */
  598. waitq = bfa_reqq(bfa, qid);
  599. if (!list_empty(waitq))
  600. bfa_reqq_resume(bfa, qid);
  601. return ret;
  602. }
  603. static inline void
  604. bfa_isr_reqq(struct bfa_s *bfa, int qid)
  605. {
  606. struct list_head *waitq;
  607. bfa_isr_reqq_ack(bfa, qid);
  608. /*
  609. * Resume any pending requests in the corresponding reqq.
  610. */
  611. waitq = bfa_reqq(bfa, qid);
  612. if (!list_empty(waitq))
  613. bfa_reqq_resume(bfa, qid);
  614. }
  615. void
  616. bfa_msix_all(struct bfa_s *bfa, int vec)
  617. {
  618. u32 intr, qintr;
  619. int queue;
  620. intr = readl(bfa->iocfc.bfa_regs.intr_status);
  621. if (!intr)
  622. return;
  623. /*
  624. * RME completion queue interrupt
  625. */
  626. qintr = intr & __HFN_INT_RME_MASK;
  627. if (qintr && bfa->queue_process) {
  628. for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
  629. bfa_isr_rspq(bfa, queue);
  630. }
  631. intr &= ~qintr;
  632. if (!intr)
  633. return;
  634. /*
  635. * CPE completion queue interrupt
  636. */
  637. qintr = intr & __HFN_INT_CPE_MASK;
  638. if (qintr && bfa->queue_process) {
  639. for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
  640. bfa_isr_reqq(bfa, queue);
  641. }
  642. intr &= ~qintr;
  643. if (!intr)
  644. return;
  645. bfa_msix_lpu_err(bfa, intr);
  646. }
  647. bfa_boolean_t
  648. bfa_intx(struct bfa_s *bfa)
  649. {
  650. u32 intr, qintr;
  651. int queue;
  652. bfa_boolean_t rspq_comp = BFA_FALSE;
  653. intr = readl(bfa->iocfc.bfa_regs.intr_status);
  654. qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
  655. if (qintr)
  656. writel(qintr, bfa->iocfc.bfa_regs.intr_status);
  657. /*
  658. * Unconditional RME completion queue interrupt
  659. */
  660. if (bfa->queue_process) {
  661. for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
  662. if (bfa_isr_rspq(bfa, queue))
  663. rspq_comp = BFA_TRUE;
  664. }
  665. if (!intr)
  666. return (qintr | rspq_comp) ? BFA_TRUE : BFA_FALSE;
  667. /*
  668. * CPE completion queue interrupt
  669. */
  670. qintr = intr & __HFN_INT_CPE_MASK;
  671. if (qintr && bfa->queue_process) {
  672. for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
  673. bfa_isr_reqq(bfa, queue);
  674. }
  675. intr &= ~qintr;
  676. if (!intr)
  677. return BFA_TRUE;
  678. bfa_msix_lpu_err(bfa, intr);
  679. return BFA_TRUE;
  680. }
  681. void
  682. bfa_isr_enable(struct bfa_s *bfa)
  683. {
  684. u32 umsk;
  685. int pci_func = bfa_ioc_pcifn(&bfa->ioc);
  686. bfa_trc(bfa, pci_func);
  687. bfa_msix_ctrl_install(bfa);
  688. if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
  689. umsk = __HFN_INT_ERR_MASK_CT2;
  690. umsk |= pci_func == 0 ?
  691. __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
  692. } else {
  693. umsk = __HFN_INT_ERR_MASK;
  694. umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
  695. }
  696. writel(umsk, bfa->iocfc.bfa_regs.intr_status);
  697. writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
  698. bfa->iocfc.intr_mask = ~umsk;
  699. bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
  700. }
  701. void
  702. bfa_isr_disable(struct bfa_s *bfa)
  703. {
  704. bfa_isr_mode_set(bfa, BFA_FALSE);
  705. writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
  706. bfa_msix_uninstall(bfa);
  707. }
  708. void
  709. bfa_msix_reqq(struct bfa_s *bfa, int vec)
  710. {
  711. bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0);
  712. }
  713. void
  714. bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
  715. {
  716. bfa_trc(bfa, m->mhdr.msg_class);
  717. bfa_trc(bfa, m->mhdr.msg_id);
  718. bfa_trc(bfa, m->mhdr.mtag.i2htok);
  719. WARN_ON(1);
  720. bfa_trc_stop(bfa->trcmod);
  721. }
  722. void
  723. bfa_msix_rspq(struct bfa_s *bfa, int vec)
  724. {
  725. bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
  726. }
  727. void
  728. bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
  729. {
  730. u32 intr, curr_value;
  731. bfa_boolean_t lpu_isr, halt_isr, pss_isr;
  732. intr = readl(bfa->iocfc.bfa_regs.intr_status);
  733. if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
  734. halt_isr = intr & __HFN_INT_CPQ_HALT_CT2;
  735. pss_isr = intr & __HFN_INT_ERR_PSS_CT2;
  736. lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 |
  737. __HFN_INT_MBOX_LPU1_CT2);
  738. intr &= __HFN_INT_ERR_MASK_CT2;
  739. } else {
  740. halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ?
  741. (intr & __HFN_INT_LL_HALT) : 0;
  742. pss_isr = intr & __HFN_INT_ERR_PSS;
  743. lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
  744. intr &= __HFN_INT_ERR_MASK;
  745. }
  746. if (lpu_isr)
  747. bfa_ioc_mbox_isr(&bfa->ioc);
  748. if (intr) {
  749. if (halt_isr) {
  750. /*
  751. * If LL_HALT bit is set then FW Init Halt LL Port
  752. * Register needs to be cleared as well so Interrupt
  753. * Status Register will be cleared.
  754. */
  755. curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
  756. curr_value &= ~__FW_INIT_HALT_P;
  757. writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
  758. }
  759. if (pss_isr) {
  760. /*
  761. * ERR_PSS bit needs to be cleared as well in case
  762. * interrups are shared so driver's interrupt handler is
  763. * still called even though it is already masked out.
  764. */
  765. curr_value = readl(
  766. bfa->ioc.ioc_regs.pss_err_status_reg);
  767. writel(curr_value,
  768. bfa->ioc.ioc_regs.pss_err_status_reg);
  769. }
  770. writel(intr, bfa->iocfc.bfa_regs.intr_status);
  771. bfa_ioc_error_isr(&bfa->ioc);
  772. }
  773. }
  774. /*
  775. * BFA IOC FC related functions
  776. */
  777. /*
  778. * BFA IOC private functions
  779. */
  780. /*
  781. * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
  782. */
  783. static void
  784. bfa_iocfc_send_cfg(void *bfa_arg)
  785. {
  786. struct bfa_s *bfa = bfa_arg;
  787. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  788. struct bfi_iocfc_cfg_req_s cfg_req;
  789. struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
  790. struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
  791. int i;
  792. WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
  793. bfa_trc(bfa, cfg->fwcfg.num_cqs);
  794. bfa_iocfc_reset_queues(bfa);
  795. /*
  796. * initialize IOC configuration info
  797. */
  798. cfg_info->single_msix_vec = 0;
  799. if (bfa->msix.nvecs == 1)
  800. cfg_info->single_msix_vec = 1;
  801. cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
  802. cfg_info->num_cqs = cfg->fwcfg.num_cqs;
  803. cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs);
  804. cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
  805. bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
  806. /*
  807. * dma map REQ and RSP circular queues and shadow pointers
  808. */
  809. for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
  810. bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
  811. iocfc->req_cq_ba[i].pa);
  812. bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
  813. iocfc->req_cq_shadow_ci[i].pa);
  814. cfg_info->req_cq_elems[i] =
  815. cpu_to_be16(cfg->drvcfg.num_reqq_elems);
  816. bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
  817. iocfc->rsp_cq_ba[i].pa);
  818. bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
  819. iocfc->rsp_cq_shadow_pi[i].pa);
  820. cfg_info->rsp_cq_elems[i] =
  821. cpu_to_be16(cfg->drvcfg.num_rspq_elems);
  822. }
  823. /*
  824. * Enable interrupt coalescing if it is driver init path
  825. * and not ioc disable/enable path.
  826. */
  827. if (bfa_fsm_cmp_state(iocfc, bfa_iocfc_sm_init_cfg_wait))
  828. cfg_info->intr_attr.coalesce = BFA_TRUE;
  829. /*
  830. * dma map IOC configuration itself
  831. */
  832. bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
  833. bfa_fn_lpu(bfa));
  834. bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
  835. bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
  836. sizeof(struct bfi_iocfc_cfg_req_s));
  837. }
  838. static void
  839. bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  840. struct bfa_pcidev_s *pcidev)
  841. {
  842. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  843. bfa->bfad = bfad;
  844. iocfc->bfa = bfa;
  845. iocfc->cfg = *cfg;
  846. /*
  847. * Initialize chip specific handlers.
  848. */
  849. if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) {
  850. iocfc->hwif.hw_reginit = bfa_hwct_reginit;
  851. iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
  852. iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
  853. iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
  854. iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install;
  855. iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install;
  856. iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
  857. iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
  858. iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
  859. iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
  860. iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT;
  861. iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT;
  862. } else {
  863. iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
  864. iocfc->hwif.hw_reqq_ack = NULL;
  865. iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
  866. iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
  867. iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
  868. iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
  869. iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
  870. iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
  871. iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
  872. iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
  873. iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
  874. bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
  875. iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
  876. bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
  877. }
  878. if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
  879. iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
  880. iocfc->hwif.hw_isr_mode_set = NULL;
  881. iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack;
  882. }
  883. iocfc->hwif.hw_reginit(bfa);
  884. bfa->msix.nvecs = 0;
  885. }
  886. static void
  887. bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
  888. {
  889. u8 *dm_kva = NULL;
  890. u64 dm_pa = 0;
  891. int i, per_reqq_sz, per_rspq_sz;
  892. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  893. struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
  894. struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
  895. struct bfa_mem_dma_s *reqq_dma, *rspq_dma;
  896. /* First allocate dma memory for IOC */
  897. bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma),
  898. bfa_mem_dma_phys(ioc_dma));
  899. /* Claim DMA-able memory for the request/response queues */
  900. per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
  901. BFA_DMA_ALIGN_SZ);
  902. per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
  903. BFA_DMA_ALIGN_SZ);
  904. for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
  905. reqq_dma = BFA_MEM_REQQ_DMA(bfa, i);
  906. iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma);
  907. iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma);
  908. memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz);
  909. rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i);
  910. iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma);
  911. iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma);
  912. memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz);
  913. }
  914. /* Claim IOCFC dma memory - for shadow CI/PI */
  915. dm_kva = bfa_mem_dma_virt(iocfc_dma);
  916. dm_pa = bfa_mem_dma_phys(iocfc_dma);
  917. for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
  918. iocfc->req_cq_shadow_ci[i].kva = dm_kva;
  919. iocfc->req_cq_shadow_ci[i].pa = dm_pa;
  920. dm_kva += BFA_CACHELINE_SZ;
  921. dm_pa += BFA_CACHELINE_SZ;
  922. iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
  923. iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
  924. dm_kva += BFA_CACHELINE_SZ;
  925. dm_pa += BFA_CACHELINE_SZ;
  926. }
  927. /* Claim IOCFC dma memory - for the config info page */
  928. bfa->iocfc.cfg_info.kva = dm_kva;
  929. bfa->iocfc.cfg_info.pa = dm_pa;
  930. bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
  931. dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
  932. dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
  933. /* Claim IOCFC dma memory - for the config response */
  934. bfa->iocfc.cfgrsp_dma.kva = dm_kva;
  935. bfa->iocfc.cfgrsp_dma.pa = dm_pa;
  936. bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
  937. dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
  938. BFA_CACHELINE_SZ);
  939. dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
  940. BFA_CACHELINE_SZ);
  941. /* Claim IOCFC kva memory */
  942. bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
  943. bfa_mem_kva_curp(iocfc) += BFA_DBG_FWTRC_LEN;
  944. }
  945. /*
  946. * Start BFA submodules.
  947. */
  948. static void
  949. bfa_iocfc_start_submod(struct bfa_s *bfa)
  950. {
  951. int i;
  952. bfa->queue_process = BFA_TRUE;
  953. for (i = 0; i < BFI_IOC_MAX_CQS; i++)
  954. bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i));
  955. for (i = 0; hal_mods[i]; i++)
  956. hal_mods[i]->start(bfa);
  957. bfa->iocfc.submod_enabled = BFA_TRUE;
  958. }
  959. /*
  960. * Disable BFA submodules.
  961. */
  962. static void
  963. bfa_iocfc_disable_submod(struct bfa_s *bfa)
  964. {
  965. int i;
  966. if (bfa->iocfc.submod_enabled == BFA_FALSE)
  967. return;
  968. for (i = 0; hal_mods[i]; i++)
  969. hal_mods[i]->iocdisable(bfa);
  970. bfa->iocfc.submod_enabled = BFA_FALSE;
  971. }
  972. static void
  973. bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
  974. {
  975. struct bfa_s *bfa = bfa_arg;
  976. if (complete)
  977. bfa_cb_init(bfa->bfad, bfa->iocfc.op_status);
  978. }
  979. static void
  980. bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
  981. {
  982. struct bfa_s *bfa = bfa_arg;
  983. struct bfad_s *bfad = bfa->bfad;
  984. if (compl)
  985. complete(&bfad->comp);
  986. }
  987. static void
  988. bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl)
  989. {
  990. struct bfa_s *bfa = bfa_arg;
  991. struct bfad_s *bfad = bfa->bfad;
  992. if (compl)
  993. complete(&bfad->enable_comp);
  994. }
  995. static void
  996. bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
  997. {
  998. struct bfa_s *bfa = bfa_arg;
  999. struct bfad_s *bfad = bfa->bfad;
  1000. if (compl)
  1001. complete(&bfad->disable_comp);
  1002. }
  1003. /**
  1004. * configure queue registers from firmware response
  1005. */
  1006. static void
  1007. bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
  1008. {
  1009. int i;
  1010. struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs;
  1011. void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
  1012. for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
  1013. bfa->iocfc.hw_qid[i] = qreg->hw_qid[i];
  1014. r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]);
  1015. r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]);
  1016. r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]);
  1017. r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]);
  1018. r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]);
  1019. r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]);
  1020. }
  1021. }
  1022. static void
  1023. bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg)
  1024. {
  1025. bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs);
  1026. bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs);
  1027. bfa_rport_res_recfg(bfa, fwcfg->num_rports);
  1028. bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs);
  1029. bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs);
  1030. }
  1031. /*
  1032. * Update BFA configuration from firmware configuration.
  1033. */
  1034. static void
  1035. bfa_iocfc_cfgrsp(struct bfa_s *bfa)
  1036. {
  1037. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  1038. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  1039. struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
  1040. fwcfg->num_cqs = fwcfg->num_cqs;
  1041. fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
  1042. fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
  1043. fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
  1044. fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
  1045. fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
  1046. fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
  1047. /*
  1048. * configure queue register offsets as learnt from firmware
  1049. */
  1050. bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
  1051. /*
  1052. * Re-configure resources as learnt from Firmware
  1053. */
  1054. bfa_iocfc_res_recfg(bfa, fwcfg);
  1055. /*
  1056. * Install MSIX queue handlers
  1057. */
  1058. bfa_msix_queue_install(bfa);
  1059. if (bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn != 0) {
  1060. bfa->ioc.attr->pwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn;
  1061. bfa->ioc.attr->nwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_nwwn;
  1062. bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
  1063. }
  1064. }
  1065. void
  1066. bfa_iocfc_reset_queues(struct bfa_s *bfa)
  1067. {
  1068. int q;
  1069. for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
  1070. bfa_reqq_ci(bfa, q) = 0;
  1071. bfa_reqq_pi(bfa, q) = 0;
  1072. bfa_rspq_ci(bfa, q) = 0;
  1073. bfa_rspq_pi(bfa, q) = 0;
  1074. }
  1075. }
  1076. /*
  1077. * Process FAA pwwn msg from fw.
  1078. */
  1079. static void
  1080. bfa_iocfc_process_faa_addr(struct bfa_s *bfa, struct bfi_faa_addr_msg_s *msg)
  1081. {
  1082. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  1083. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  1084. cfgrsp->pbc_cfg.pbc_pwwn = msg->pwwn;
  1085. cfgrsp->pbc_cfg.pbc_nwwn = msg->nwwn;
  1086. bfa->ioc.attr->pwwn = msg->pwwn;
  1087. bfa->ioc.attr->nwwn = msg->nwwn;
  1088. bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
  1089. }
  1090. /* Fabric Assigned Address specific functions */
  1091. /*
  1092. * Check whether IOC is ready before sending command down
  1093. */
  1094. static bfa_status_t
  1095. bfa_faa_validate_request(struct bfa_s *bfa)
  1096. {
  1097. enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
  1098. u32 card_type = bfa->ioc.attr->card_type;
  1099. if (bfa_ioc_is_operational(&bfa->ioc)) {
  1100. if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
  1101. return BFA_STATUS_FEATURE_NOT_SUPPORTED;
  1102. } else {
  1103. return BFA_STATUS_IOC_NON_OP;
  1104. }
  1105. return BFA_STATUS_OK;
  1106. }
  1107. bfa_status_t
  1108. bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
  1109. bfa_cb_iocfc_t cbfn, void *cbarg)
  1110. {
  1111. struct bfi_faa_query_s faa_attr_req;
  1112. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  1113. bfa_status_t status;
  1114. iocfc->faa_args.faa_attr = attr;
  1115. iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
  1116. iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
  1117. status = bfa_faa_validate_request(bfa);
  1118. if (status != BFA_STATUS_OK)
  1119. return status;
  1120. if (iocfc->faa_args.busy == BFA_TRUE)
  1121. return BFA_STATUS_DEVBUSY;
  1122. iocfc->faa_args.busy = BFA_TRUE;
  1123. memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s));
  1124. bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC,
  1125. BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa));
  1126. bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req,
  1127. sizeof(struct bfi_faa_query_s));
  1128. return BFA_STATUS_OK;
  1129. }
  1130. /*
  1131. * FAA query response
  1132. */
  1133. static void
  1134. bfa_faa_query_reply(struct bfa_iocfc_s *iocfc,
  1135. bfi_faa_query_rsp_t *rsp)
  1136. {
  1137. void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
  1138. if (iocfc->faa_args.faa_attr) {
  1139. iocfc->faa_args.faa_attr->faa = rsp->faa;
  1140. iocfc->faa_args.faa_attr->faa_state = rsp->faa_status;
  1141. iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source;
  1142. }
  1143. WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
  1144. iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK);
  1145. iocfc->faa_args.busy = BFA_FALSE;
  1146. }
  1147. /*
  1148. * IOC enable request is complete
  1149. */
  1150. static void
  1151. bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
  1152. {
  1153. struct bfa_s *bfa = bfa_arg;
  1154. if (status == BFA_STATUS_OK)
  1155. bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_ENABLED);
  1156. else
  1157. bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
  1158. }
  1159. /*
  1160. * IOC disable request is complete
  1161. */
  1162. static void
  1163. bfa_iocfc_disable_cbfn(void *bfa_arg)
  1164. {
  1165. struct bfa_s *bfa = bfa_arg;
  1166. bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_DISABLED);
  1167. }
  1168. /*
  1169. * Notify sub-modules of hardware failure.
  1170. */
  1171. static void
  1172. bfa_iocfc_hbfail_cbfn(void *bfa_arg)
  1173. {
  1174. struct bfa_s *bfa = bfa_arg;
  1175. bfa->queue_process = BFA_FALSE;
  1176. bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
  1177. }
  1178. /*
  1179. * Actions on chip-reset completion.
  1180. */
  1181. static void
  1182. bfa_iocfc_reset_cbfn(void *bfa_arg)
  1183. {
  1184. struct bfa_s *bfa = bfa_arg;
  1185. bfa_iocfc_reset_queues(bfa);
  1186. bfa_isr_enable(bfa);
  1187. }
  1188. /*
  1189. * Query IOC memory requirement information.
  1190. */
  1191. void
  1192. bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
  1193. struct bfa_s *bfa)
  1194. {
  1195. int q, per_reqq_sz, per_rspq_sz;
  1196. struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
  1197. struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
  1198. struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa);
  1199. u32 dm_len = 0;
  1200. /* dma memory setup for IOC */
  1201. bfa_mem_dma_setup(meminfo, ioc_dma,
  1202. BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ));
  1203. /* dma memory setup for REQ/RSP queues */
  1204. per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
  1205. BFA_DMA_ALIGN_SZ);
  1206. per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
  1207. BFA_DMA_ALIGN_SZ);
  1208. for (q = 0; q < cfg->fwcfg.num_cqs; q++) {
  1209. bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q),
  1210. per_reqq_sz);
  1211. bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q),
  1212. per_rspq_sz);
  1213. }
  1214. /* IOCFC dma memory - calculate Shadow CI/PI size */
  1215. for (q = 0; q < cfg->fwcfg.num_cqs; q++)
  1216. dm_len += (2 * BFA_CACHELINE_SZ);
  1217. /* IOCFC dma memory - calculate config info / rsp size */
  1218. dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
  1219. dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
  1220. BFA_CACHELINE_SZ);
  1221. /* dma memory setup for IOCFC */
  1222. bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len);
  1223. /* kva memory setup for IOCFC */
  1224. bfa_mem_kva_setup(meminfo, iocfc_kva, BFA_DBG_FWTRC_LEN);
  1225. }
  1226. /*
  1227. * Query IOC memory requirement information.
  1228. */
  1229. void
  1230. bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  1231. struct bfa_pcidev_s *pcidev)
  1232. {
  1233. int i;
  1234. struct bfa_ioc_s *ioc = &bfa->ioc;
  1235. bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
  1236. bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
  1237. bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
  1238. bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
  1239. ioc->trcmod = bfa->trcmod;
  1240. bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
  1241. bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC);
  1242. bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
  1243. bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
  1244. bfa_iocfc_mem_claim(bfa, cfg);
  1245. INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
  1246. INIT_LIST_HEAD(&bfa->comp_q);
  1247. for (i = 0; i < BFI_IOC_MAX_CQS; i++)
  1248. INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
  1249. bfa->iocfc.cb_reqd = BFA_FALSE;
  1250. bfa->iocfc.op_status = BFA_STATUS_OK;
  1251. bfa->iocfc.submod_enabled = BFA_FALSE;
  1252. bfa_fsm_set_state(&bfa->iocfc, bfa_iocfc_sm_stopped);
  1253. }
  1254. /*
  1255. * Query IOC memory requirement information.
  1256. */
  1257. void
  1258. bfa_iocfc_init(struct bfa_s *bfa)
  1259. {
  1260. bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_INIT);
  1261. }
  1262. /*
  1263. * IOC start called from bfa_start(). Called to start IOC operations
  1264. * at driver instantiation for this instance.
  1265. */
  1266. void
  1267. bfa_iocfc_start(struct bfa_s *bfa)
  1268. {
  1269. bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_START);
  1270. }
  1271. /*
  1272. * IOC stop called from bfa_stop(). Called only when driver is unloaded
  1273. * for this instance.
  1274. */
  1275. void
  1276. bfa_iocfc_stop(struct bfa_s *bfa)
  1277. {
  1278. bfa->queue_process = BFA_FALSE;
  1279. bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_STOP);
  1280. }
  1281. void
  1282. bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
  1283. {
  1284. struct bfa_s *bfa = bfaarg;
  1285. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  1286. union bfi_iocfc_i2h_msg_u *msg;
  1287. msg = (union bfi_iocfc_i2h_msg_u *) m;
  1288. bfa_trc(bfa, msg->mh.msg_id);
  1289. switch (msg->mh.msg_id) {
  1290. case BFI_IOCFC_I2H_CFG_REPLY:
  1291. bfa_iocfc_cfgrsp(bfa);
  1292. break;
  1293. case BFI_IOCFC_I2H_UPDATEQ_RSP:
  1294. iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
  1295. break;
  1296. case BFI_IOCFC_I2H_ADDR_MSG:
  1297. bfa_iocfc_process_faa_addr(bfa,
  1298. (struct bfi_faa_addr_msg_s *)msg);
  1299. break;
  1300. case BFI_IOCFC_I2H_FAA_QUERY_RSP:
  1301. bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
  1302. break;
  1303. default:
  1304. WARN_ON(1);
  1305. }
  1306. }
  1307. void
  1308. bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
  1309. {
  1310. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  1311. attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
  1312. attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
  1313. be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
  1314. be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
  1315. attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
  1316. be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
  1317. be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
  1318. attr->config = iocfc->cfg;
  1319. }
  1320. bfa_status_t
  1321. bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
  1322. {
  1323. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  1324. struct bfi_iocfc_set_intr_req_s *m;
  1325. iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
  1326. iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
  1327. iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
  1328. if (!bfa_iocfc_is_operational(bfa))
  1329. return BFA_STATUS_OK;
  1330. m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
  1331. if (!m)
  1332. return BFA_STATUS_DEVBUSY;
  1333. bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
  1334. bfa_fn_lpu(bfa));
  1335. m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
  1336. m->delay = iocfc->cfginfo->intr_attr.delay;
  1337. m->latency = iocfc->cfginfo->intr_attr.latency;
  1338. bfa_trc(bfa, attr->delay);
  1339. bfa_trc(bfa, attr->latency);
  1340. bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh);
  1341. return BFA_STATUS_OK;
  1342. }
  1343. void
  1344. bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa)
  1345. {
  1346. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  1347. iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
  1348. bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa);
  1349. }
  1350. /*
  1351. * Enable IOC after it is disabled.
  1352. */
  1353. void
  1354. bfa_iocfc_enable(struct bfa_s *bfa)
  1355. {
  1356. bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
  1357. "IOC Enable");
  1358. bfa->iocfc.cb_reqd = BFA_TRUE;
  1359. bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_ENABLE);
  1360. }
  1361. void
  1362. bfa_iocfc_disable(struct bfa_s *bfa)
  1363. {
  1364. bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
  1365. "IOC Disable");
  1366. bfa->queue_process = BFA_FALSE;
  1367. bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DISABLE);
  1368. }
  1369. bfa_boolean_t
  1370. bfa_iocfc_is_operational(struct bfa_s *bfa)
  1371. {
  1372. return bfa_ioc_is_operational(&bfa->ioc) &&
  1373. bfa_fsm_cmp_state(&bfa->iocfc, bfa_iocfc_sm_operational);
  1374. }
  1375. /*
  1376. * Return boot target port wwns -- read from boot information in flash.
  1377. */
  1378. void
  1379. bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
  1380. {
  1381. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  1382. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  1383. int i;
  1384. if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
  1385. bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
  1386. *nwwns = cfgrsp->pbc_cfg.nbluns;
  1387. for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
  1388. wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
  1389. return;
  1390. }
  1391. *nwwns = cfgrsp->bootwwns.nwwns;
  1392. memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
  1393. }
  1394. int
  1395. bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
  1396. {
  1397. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  1398. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  1399. memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
  1400. return cfgrsp->pbc_cfg.nvports;
  1401. }
  1402. /*
  1403. * Use this function query the memory requirement of the BFA library.
  1404. * This function needs to be called before bfa_attach() to get the
  1405. * memory required of the BFA layer for a given driver configuration.
  1406. *
  1407. * This call will fail, if the cap is out of range compared to pre-defined
  1408. * values within the BFA library
  1409. *
  1410. * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
  1411. * its configuration in this structure.
  1412. * The default values for struct bfa_iocfc_cfg_s can be
  1413. * fetched using bfa_cfg_get_default() API.
  1414. *
  1415. * If cap's boundary check fails, the library will use
  1416. * the default bfa_cap_t values (and log a warning msg).
  1417. *
  1418. * @param[out] meminfo - pointer to bfa_meminfo_t. This content
  1419. * indicates the memory type (see bfa_mem_type_t) and
  1420. * amount of memory required.
  1421. *
  1422. * Driver should allocate the memory, populate the
  1423. * starting address for each block and provide the same
  1424. * structure as input parameter to bfa_attach() call.
  1425. *
  1426. * @param[in] bfa - pointer to the bfa structure, used while fetching the
  1427. * dma, kva memory information of the bfa sub-modules.
  1428. *
  1429. * @return void
  1430. *
  1431. * Special Considerations: @note
  1432. */
  1433. void
  1434. bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
  1435. struct bfa_s *bfa)
  1436. {
  1437. int i;
  1438. struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
  1439. struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
  1440. struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
  1441. struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
  1442. struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
  1443. struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
  1444. struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
  1445. WARN_ON((cfg == NULL) || (meminfo == NULL));
  1446. memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
  1447. /* Initialize the DMA & KVA meminfo queues */
  1448. INIT_LIST_HEAD(&meminfo->dma_info.qe);
  1449. INIT_LIST_HEAD(&meminfo->kva_info.qe);
  1450. bfa_iocfc_meminfo(cfg, meminfo, bfa);
  1451. for (i = 0; hal_mods[i]; i++)
  1452. hal_mods[i]->meminfo(cfg, meminfo, bfa);
  1453. /* dma info setup */
  1454. bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo());
  1455. bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo());
  1456. bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo());
  1457. bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo());
  1458. bfa_mem_dma_setup(meminfo, flash_dma,
  1459. bfa_flash_meminfo(cfg->drvcfg.min_cfg));
  1460. bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo());
  1461. bfa_mem_dma_setup(meminfo, phy_dma,
  1462. bfa_phy_meminfo(cfg->drvcfg.min_cfg));
  1463. }
  1464. /*
  1465. * Use this function to do attach the driver instance with the BFA
  1466. * library. This function will not trigger any HW initialization
  1467. * process (which will be done in bfa_init() call)
  1468. *
  1469. * This call will fail, if the cap is out of range compared to
  1470. * pre-defined values within the BFA library
  1471. *
  1472. * @param[out] bfa Pointer to bfa_t.
  1473. * @param[in] bfad Opaque handle back to the driver's IOC structure
  1474. * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
  1475. * that was used in bfa_cfg_get_meminfo().
  1476. * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
  1477. * use the bfa_cfg_get_meminfo() call to
  1478. * find the memory blocks required, allocate the
  1479. * required memory and provide the starting addresses.
  1480. * @param[in] pcidev pointer to struct bfa_pcidev_s
  1481. *
  1482. * @return
  1483. * void
  1484. *
  1485. * Special Considerations:
  1486. *
  1487. * @note
  1488. *
  1489. */
  1490. void
  1491. bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  1492. struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
  1493. {
  1494. int i;
  1495. struct bfa_mem_dma_s *dma_info, *dma_elem;
  1496. struct bfa_mem_kva_s *kva_info, *kva_elem;
  1497. struct list_head *dm_qe, *km_qe;
  1498. bfa->fcs = BFA_FALSE;
  1499. WARN_ON((cfg == NULL) || (meminfo == NULL));
  1500. /* Initialize memory pointers for iterative allocation */
  1501. dma_info = &meminfo->dma_info;
  1502. dma_info->kva_curp = dma_info->kva;
  1503. dma_info->dma_curp = dma_info->dma;
  1504. kva_info = &meminfo->kva_info;
  1505. kva_info->kva_curp = kva_info->kva;
  1506. list_for_each(dm_qe, &dma_info->qe) {
  1507. dma_elem = (struct bfa_mem_dma_s *) dm_qe;
  1508. dma_elem->kva_curp = dma_elem->kva;
  1509. dma_elem->dma_curp = dma_elem->dma;
  1510. }
  1511. list_for_each(km_qe, &kva_info->qe) {
  1512. kva_elem = (struct bfa_mem_kva_s *) km_qe;
  1513. kva_elem->kva_curp = kva_elem->kva;
  1514. }
  1515. bfa_iocfc_attach(bfa, bfad, cfg, pcidev);
  1516. for (i = 0; hal_mods[i]; i++)
  1517. hal_mods[i]->attach(bfa, bfad, cfg, pcidev);
  1518. bfa_com_port_attach(bfa);
  1519. bfa_com_ablk_attach(bfa);
  1520. bfa_com_cee_attach(bfa);
  1521. bfa_com_sfp_attach(bfa);
  1522. bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg);
  1523. bfa_com_diag_attach(bfa);
  1524. bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg);
  1525. }
  1526. /*
  1527. * Use this function to delete a BFA IOC. IOC should be stopped (by
  1528. * calling bfa_stop()) before this function call.
  1529. *
  1530. * @param[in] bfa - pointer to bfa_t.
  1531. *
  1532. * @return
  1533. * void
  1534. *
  1535. * Special Considerations:
  1536. *
  1537. * @note
  1538. */
  1539. void
  1540. bfa_detach(struct bfa_s *bfa)
  1541. {
  1542. int i;
  1543. for (i = 0; hal_mods[i]; i++)
  1544. hal_mods[i]->detach(bfa);
  1545. bfa_ioc_detach(&bfa->ioc);
  1546. }
  1547. void
  1548. bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
  1549. {
  1550. INIT_LIST_HEAD(comp_q);
  1551. list_splice_tail_init(&bfa->comp_q, comp_q);
  1552. }
  1553. void
  1554. bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
  1555. {
  1556. struct list_head *qe;
  1557. struct list_head *qen;
  1558. struct bfa_cb_qe_s *hcb_qe;
  1559. bfa_cb_cbfn_status_t cbfn;
  1560. list_for_each_safe(qe, qen, comp_q) {
  1561. hcb_qe = (struct bfa_cb_qe_s *) qe;
  1562. if (hcb_qe->pre_rmv) {
  1563. /* qe is invalid after return, dequeue before cbfn() */
  1564. list_del(qe);
  1565. cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn);
  1566. cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
  1567. } else
  1568. hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
  1569. }
  1570. }
  1571. void
  1572. bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
  1573. {
  1574. struct list_head *qe;
  1575. struct bfa_cb_qe_s *hcb_qe;
  1576. while (!list_empty(comp_q)) {
  1577. bfa_q_deq(comp_q, &qe);
  1578. hcb_qe = (struct bfa_cb_qe_s *) qe;
  1579. WARN_ON(hcb_qe->pre_rmv);
  1580. hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
  1581. }
  1582. }
  1583. /*
  1584. * Return the list of PCI vendor/device id lists supported by this
  1585. * BFA instance.
  1586. */
  1587. void
  1588. bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
  1589. {
  1590. static struct bfa_pciid_s __pciids[] = {
  1591. {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
  1592. {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
  1593. {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
  1594. {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
  1595. };
  1596. *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
  1597. *pciids = __pciids;
  1598. }
  1599. /*
  1600. * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
  1601. * into BFA layer). The OS driver can then turn back and overwrite entries that
  1602. * have been configured by the user.
  1603. *
  1604. * @param[in] cfg - pointer to bfa_ioc_cfg_t
  1605. *
  1606. * @return
  1607. * void
  1608. *
  1609. * Special Considerations:
  1610. * note
  1611. */
  1612. void
  1613. bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
  1614. {
  1615. cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
  1616. cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
  1617. cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
  1618. cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
  1619. cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
  1620. cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
  1621. cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
  1622. cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
  1623. cfg->fwcfg.num_fwtio_reqs = 0;
  1624. cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
  1625. cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
  1626. cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
  1627. cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
  1628. cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
  1629. cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
  1630. cfg->drvcfg.ioc_recover = BFA_FALSE;
  1631. cfg->drvcfg.delay_comp = BFA_FALSE;
  1632. }
  1633. void
  1634. bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
  1635. {
  1636. bfa_cfg_get_default(cfg);
  1637. cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
  1638. cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
  1639. cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
  1640. cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
  1641. cfg->fwcfg.num_rports = BFA_RPORT_MIN;
  1642. cfg->fwcfg.num_fwtio_reqs = 0;
  1643. cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
  1644. cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
  1645. cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
  1646. cfg->drvcfg.min_cfg = BFA_TRUE;
  1647. }