bfa_core.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include "bfad_drv.h"
  18. #include "bfa_modules.h"
  19. #include "bfi_reg.h"
  20. BFA_TRC_FILE(HAL, CORE);
  21. /*
  22. * BFA module list terminated by NULL
  23. */
  24. static struct bfa_module_s *hal_mods[] = {
  25. &hal_mod_fcdiag,
  26. &hal_mod_sgpg,
  27. &hal_mod_fcport,
  28. &hal_mod_fcxp,
  29. &hal_mod_lps,
  30. &hal_mod_uf,
  31. &hal_mod_rport,
  32. &hal_mod_fcp,
  33. &hal_mod_dconf,
  34. NULL
  35. };
  36. /*
  37. * Message handlers for various modules.
  38. */
  39. static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
  40. bfa_isr_unhandled, /* NONE */
  41. bfa_isr_unhandled, /* BFI_MC_IOC */
  42. bfa_fcdiag_intr, /* BFI_MC_DIAG */
  43. bfa_isr_unhandled, /* BFI_MC_FLASH */
  44. bfa_isr_unhandled, /* BFI_MC_CEE */
  45. bfa_fcport_isr, /* BFI_MC_FCPORT */
  46. bfa_isr_unhandled, /* BFI_MC_IOCFC */
  47. bfa_isr_unhandled, /* BFI_MC_LL */
  48. bfa_uf_isr, /* BFI_MC_UF */
  49. bfa_fcxp_isr, /* BFI_MC_FCXP */
  50. bfa_lps_isr, /* BFI_MC_LPS */
  51. bfa_rport_isr, /* BFI_MC_RPORT */
  52. bfa_itn_isr, /* BFI_MC_ITN */
  53. bfa_isr_unhandled, /* BFI_MC_IOIM_READ */
  54. bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */
  55. bfa_isr_unhandled, /* BFI_MC_IOIM_IO */
  56. bfa_ioim_isr, /* BFI_MC_IOIM */
  57. bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */
  58. bfa_tskim_isr, /* BFI_MC_TSKIM */
  59. bfa_isr_unhandled, /* BFI_MC_SBOOT */
  60. bfa_isr_unhandled, /* BFI_MC_IPFC */
  61. bfa_isr_unhandled, /* BFI_MC_PORT */
  62. bfa_isr_unhandled, /* --------- */
  63. bfa_isr_unhandled, /* --------- */
  64. bfa_isr_unhandled, /* --------- */
  65. bfa_isr_unhandled, /* --------- */
  66. bfa_isr_unhandled, /* --------- */
  67. bfa_isr_unhandled, /* --------- */
  68. bfa_isr_unhandled, /* --------- */
  69. bfa_isr_unhandled, /* --------- */
  70. bfa_isr_unhandled, /* --------- */
  71. bfa_isr_unhandled, /* --------- */
  72. };
  73. /*
  74. * Message handlers for mailbox command classes
  75. */
  76. static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
  77. NULL,
  78. NULL, /* BFI_MC_IOC */
  79. NULL, /* BFI_MC_DIAG */
  80. NULL, /* BFI_MC_FLASH */
  81. NULL, /* BFI_MC_CEE */
  82. NULL, /* BFI_MC_PORT */
  83. bfa_iocfc_isr, /* BFI_MC_IOCFC */
  84. NULL,
  85. };
  86. static void
  87. bfa_com_port_attach(struct bfa_s *bfa)
  88. {
  89. struct bfa_port_s *port = &bfa->modules.port;
  90. struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
  91. bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
  92. bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp);
  93. }
  94. /*
  95. * ablk module attach
  96. */
  97. static void
  98. bfa_com_ablk_attach(struct bfa_s *bfa)
  99. {
  100. struct bfa_ablk_s *ablk = &bfa->modules.ablk;
  101. struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
  102. bfa_ablk_attach(ablk, &bfa->ioc);
  103. bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp);
  104. }
  105. static void
  106. bfa_com_cee_attach(struct bfa_s *bfa)
  107. {
  108. struct bfa_cee_s *cee = &bfa->modules.cee;
  109. struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
  110. cee->trcmod = bfa->trcmod;
  111. bfa_cee_attach(cee, &bfa->ioc, bfa);
  112. bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp);
  113. }
  114. static void
  115. bfa_com_sfp_attach(struct bfa_s *bfa)
  116. {
  117. struct bfa_sfp_s *sfp = BFA_SFP_MOD(bfa);
  118. struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
  119. bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod);
  120. bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp);
  121. }
  122. static void
  123. bfa_com_flash_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
  124. {
  125. struct bfa_flash_s *flash = BFA_FLASH(bfa);
  126. struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
  127. bfa_flash_attach(flash, &bfa->ioc, bfa, bfa->trcmod, mincfg);
  128. bfa_flash_memclaim(flash, flash_dma->kva_curp,
  129. flash_dma->dma_curp, mincfg);
  130. }
  131. static void
  132. bfa_com_diag_attach(struct bfa_s *bfa)
  133. {
  134. struct bfa_diag_s *diag = BFA_DIAG_MOD(bfa);
  135. struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
  136. bfa_diag_attach(diag, &bfa->ioc, bfa, bfa_fcport_beacon, bfa->trcmod);
  137. bfa_diag_memclaim(diag, diag_dma->kva_curp, diag_dma->dma_curp);
  138. }
  139. static void
  140. bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
  141. {
  142. struct bfa_phy_s *phy = BFA_PHY(bfa);
  143. struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
  144. bfa_phy_attach(phy, &bfa->ioc, bfa, bfa->trcmod, mincfg);
  145. bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg);
  146. }
  147. /*
  148. * BFA IOC FC related definitions
  149. */
  150. /*
  151. * IOC local definitions
  152. */
  153. #define BFA_IOCFC_TOV 5000 /* msecs */
  154. enum {
  155. BFA_IOCFC_ACT_NONE = 0,
  156. BFA_IOCFC_ACT_INIT = 1,
  157. BFA_IOCFC_ACT_STOP = 2,
  158. BFA_IOCFC_ACT_DISABLE = 3,
  159. BFA_IOCFC_ACT_ENABLE = 4,
  160. };
  161. #define DEF_CFG_NUM_FABRICS 1
  162. #define DEF_CFG_NUM_LPORTS 256
  163. #define DEF_CFG_NUM_CQS 4
  164. #define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
  165. #define DEF_CFG_NUM_TSKIM_REQS 128
  166. #define DEF_CFG_NUM_FCXP_REQS 64
  167. #define DEF_CFG_NUM_UF_BUFS 64
  168. #define DEF_CFG_NUM_RPORTS 1024
  169. #define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
  170. #define DEF_CFG_NUM_TINS 256
  171. #define DEF_CFG_NUM_SGPGS 2048
  172. #define DEF_CFG_NUM_REQQ_ELEMS 256
  173. #define DEF_CFG_NUM_RSPQ_ELEMS 64
  174. #define DEF_CFG_NUM_SBOOT_TGTS 16
  175. #define DEF_CFG_NUM_SBOOT_LUNS 16
  176. /*
  177. * IOCFC state machine definitions/declarations
  178. */
  179. bfa_fsm_state_decl(bfa_iocfc, stopped, struct bfa_iocfc_s, enum iocfc_event);
  180. bfa_fsm_state_decl(bfa_iocfc, initing, struct bfa_iocfc_s, enum iocfc_event);
  181. bfa_fsm_state_decl(bfa_iocfc, dconf_read, struct bfa_iocfc_s, enum iocfc_event);
  182. bfa_fsm_state_decl(bfa_iocfc, init_cfg_wait,
  183. struct bfa_iocfc_s, enum iocfc_event);
  184. bfa_fsm_state_decl(bfa_iocfc, init_cfg_done,
  185. struct bfa_iocfc_s, enum iocfc_event);
  186. bfa_fsm_state_decl(bfa_iocfc, operational,
  187. struct bfa_iocfc_s, enum iocfc_event);
  188. bfa_fsm_state_decl(bfa_iocfc, dconf_write,
  189. struct bfa_iocfc_s, enum iocfc_event);
  190. bfa_fsm_state_decl(bfa_iocfc, stopping, struct bfa_iocfc_s, enum iocfc_event);
  191. bfa_fsm_state_decl(bfa_iocfc, enabling, struct bfa_iocfc_s, enum iocfc_event);
  192. bfa_fsm_state_decl(bfa_iocfc, cfg_wait, struct bfa_iocfc_s, enum iocfc_event);
  193. bfa_fsm_state_decl(bfa_iocfc, disabling, struct bfa_iocfc_s, enum iocfc_event);
  194. bfa_fsm_state_decl(bfa_iocfc, disabled, struct bfa_iocfc_s, enum iocfc_event);
  195. bfa_fsm_state_decl(bfa_iocfc, failed, struct bfa_iocfc_s, enum iocfc_event);
  196. bfa_fsm_state_decl(bfa_iocfc, init_failed,
  197. struct bfa_iocfc_s, enum iocfc_event);
  198. /*
  199. * forward declaration for IOC FC functions
  200. */
  201. static void bfa_iocfc_start_submod(struct bfa_s *bfa);
  202. static void bfa_iocfc_disable_submod(struct bfa_s *bfa);
  203. static void bfa_iocfc_send_cfg(void *bfa_arg);
  204. static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
  205. static void bfa_iocfc_disable_cbfn(void *bfa_arg);
  206. static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
  207. static void bfa_iocfc_reset_cbfn(void *bfa_arg);
  208. static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
  209. static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete);
  210. static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl);
  211. static void bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl);
  212. static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl);
  213. static void
  214. bfa_iocfc_sm_stopped_entry(struct bfa_iocfc_s *iocfc)
  215. {
  216. }
  217. static void
  218. bfa_iocfc_sm_stopped(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  219. {
  220. bfa_trc(iocfc->bfa, event);
  221. switch (event) {
  222. case IOCFC_E_INIT:
  223. case IOCFC_E_ENABLE:
  224. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_initing);
  225. break;
  226. default:
  227. bfa_sm_fault(iocfc->bfa, event);
  228. break;
  229. }
  230. }
  231. static void
  232. bfa_iocfc_sm_initing_entry(struct bfa_iocfc_s *iocfc)
  233. {
  234. bfa_ioc_enable(&iocfc->bfa->ioc);
  235. }
  236. static void
  237. bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  238. {
  239. bfa_trc(iocfc->bfa, event);
  240. switch (event) {
  241. case IOCFC_E_IOC_ENABLED:
  242. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
  243. break;
  244. case IOCFC_E_DISABLE:
  245. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
  246. break;
  247. case IOCFC_E_STOP:
  248. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
  249. break;
  250. case IOCFC_E_IOC_FAILED:
  251. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
  252. break;
  253. default:
  254. bfa_sm_fault(iocfc->bfa, event);
  255. break;
  256. }
  257. }
  258. static void
  259. bfa_iocfc_sm_dconf_read_entry(struct bfa_iocfc_s *iocfc)
  260. {
  261. bfa_dconf_modinit(iocfc->bfa);
  262. }
  263. static void
  264. bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  265. {
  266. bfa_trc(iocfc->bfa, event);
  267. switch (event) {
  268. case IOCFC_E_DCONF_DONE:
  269. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait);
  270. break;
  271. case IOCFC_E_DISABLE:
  272. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
  273. break;
  274. case IOCFC_E_STOP:
  275. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
  276. break;
  277. case IOCFC_E_IOC_FAILED:
  278. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
  279. break;
  280. default:
  281. bfa_sm_fault(iocfc->bfa, event);
  282. break;
  283. }
  284. }
  285. static void
  286. bfa_iocfc_sm_init_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
  287. {
  288. bfa_iocfc_send_cfg(iocfc->bfa);
  289. }
  290. static void
  291. bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  292. {
  293. bfa_trc(iocfc->bfa, event);
  294. switch (event) {
  295. case IOCFC_E_CFG_DONE:
  296. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done);
  297. break;
  298. case IOCFC_E_DISABLE:
  299. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
  300. break;
  301. case IOCFC_E_STOP:
  302. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
  303. break;
  304. case IOCFC_E_IOC_FAILED:
  305. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
  306. break;
  307. default:
  308. bfa_sm_fault(iocfc->bfa, event);
  309. break;
  310. }
  311. }
  312. static void
  313. bfa_iocfc_sm_init_cfg_done_entry(struct bfa_iocfc_s *iocfc)
  314. {
  315. iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
  316. bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
  317. bfa_iocfc_init_cb, iocfc->bfa);
  318. }
  319. static void
  320. bfa_iocfc_sm_init_cfg_done(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  321. {
  322. bfa_trc(iocfc->bfa, event);
  323. switch (event) {
  324. case IOCFC_E_START:
  325. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
  326. break;
  327. case IOCFC_E_STOP:
  328. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
  329. break;
  330. case IOCFC_E_DISABLE:
  331. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
  332. break;
  333. case IOCFC_E_IOC_FAILED:
  334. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
  335. break;
  336. default:
  337. bfa_sm_fault(iocfc->bfa, event);
  338. break;
  339. }
  340. }
  341. static void
  342. bfa_iocfc_sm_operational_entry(struct bfa_iocfc_s *iocfc)
  343. {
  344. bfa_fcport_init(iocfc->bfa);
  345. bfa_iocfc_start_submod(iocfc->bfa);
  346. }
  347. static void
  348. bfa_iocfc_sm_operational(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  349. {
  350. bfa_trc(iocfc->bfa, event);
  351. switch (event) {
  352. case IOCFC_E_STOP:
  353. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
  354. break;
  355. case IOCFC_E_DISABLE:
  356. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
  357. break;
  358. case IOCFC_E_IOC_FAILED:
  359. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
  360. break;
  361. default:
  362. bfa_sm_fault(iocfc->bfa, event);
  363. break;
  364. }
  365. }
  366. static void
  367. bfa_iocfc_sm_dconf_write_entry(struct bfa_iocfc_s *iocfc)
  368. {
  369. bfa_dconf_modexit(iocfc->bfa);
  370. }
  371. static void
  372. bfa_iocfc_sm_dconf_write(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  373. {
  374. bfa_trc(iocfc->bfa, event);
  375. switch (event) {
  376. case IOCFC_E_DCONF_DONE:
  377. case IOCFC_E_IOC_FAILED:
  378. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
  379. break;
  380. default:
  381. bfa_sm_fault(iocfc->bfa, event);
  382. break;
  383. }
  384. }
  385. static void
  386. bfa_iocfc_sm_stopping_entry(struct bfa_iocfc_s *iocfc)
  387. {
  388. bfa_ioc_disable(&iocfc->bfa->ioc);
  389. }
  390. static void
  391. bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  392. {
  393. bfa_trc(iocfc->bfa, event);
  394. switch (event) {
  395. case IOCFC_E_IOC_DISABLED:
  396. bfa_isr_disable(iocfc->bfa);
  397. bfa_iocfc_disable_submod(iocfc->bfa);
  398. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
  399. iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
  400. bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe,
  401. bfa_iocfc_stop_cb, iocfc->bfa);
  402. break;
  403. case IOCFC_E_IOC_ENABLED:
  404. case IOCFC_E_DCONF_DONE:
  405. case IOCFC_E_CFG_DONE:
  406. break;
  407. default:
  408. bfa_sm_fault(iocfc->bfa, event);
  409. break;
  410. }
  411. }
  412. static void
  413. bfa_iocfc_sm_enabling_entry(struct bfa_iocfc_s *iocfc)
  414. {
  415. bfa_ioc_enable(&iocfc->bfa->ioc);
  416. }
  417. static void
  418. bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  419. {
  420. bfa_trc(iocfc->bfa, event);
  421. switch (event) {
  422. case IOCFC_E_IOC_ENABLED:
  423. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
  424. break;
  425. case IOCFC_E_DISABLE:
  426. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
  427. break;
  428. case IOCFC_E_STOP:
  429. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
  430. break;
  431. case IOCFC_E_IOC_FAILED:
  432. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
  433. if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
  434. break;
  435. iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
  436. bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
  437. bfa_iocfc_enable_cb, iocfc->bfa);
  438. iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
  439. break;
  440. default:
  441. bfa_sm_fault(iocfc->bfa, event);
  442. break;
  443. }
  444. }
  445. static void
  446. bfa_iocfc_sm_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
  447. {
  448. bfa_iocfc_send_cfg(iocfc->bfa);
  449. }
  450. static void
  451. bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  452. {
  453. bfa_trc(iocfc->bfa, event);
  454. switch (event) {
  455. case IOCFC_E_CFG_DONE:
  456. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
  457. if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
  458. break;
  459. iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
  460. bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
  461. bfa_iocfc_enable_cb, iocfc->bfa);
  462. iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
  463. break;
  464. case IOCFC_E_DISABLE:
  465. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
  466. break;
  467. case IOCFC_E_STOP:
  468. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
  469. break;
  470. case IOCFC_E_IOC_FAILED:
  471. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
  472. if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
  473. break;
  474. iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
  475. bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
  476. bfa_iocfc_enable_cb, iocfc->bfa);
  477. iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
  478. break;
  479. default:
  480. bfa_sm_fault(iocfc->bfa, event);
  481. break;
  482. }
  483. }
  484. static void
  485. bfa_iocfc_sm_disabling_entry(struct bfa_iocfc_s *iocfc)
  486. {
  487. bfa_ioc_disable(&iocfc->bfa->ioc);
  488. }
  489. static void
  490. bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  491. {
  492. bfa_trc(iocfc->bfa, event);
  493. switch (event) {
  494. case IOCFC_E_IOC_DISABLED:
  495. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled);
  496. break;
  497. case IOCFC_E_IOC_ENABLED:
  498. case IOCFC_E_DCONF_DONE:
  499. case IOCFC_E_CFG_DONE:
  500. break;
  501. default:
  502. bfa_sm_fault(iocfc->bfa, event);
  503. break;
  504. }
  505. }
  506. static void
  507. bfa_iocfc_sm_disabled_entry(struct bfa_iocfc_s *iocfc)
  508. {
  509. bfa_isr_disable(iocfc->bfa);
  510. bfa_iocfc_disable_submod(iocfc->bfa);
  511. iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
  512. bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
  513. bfa_iocfc_disable_cb, iocfc->bfa);
  514. }
  515. static void
  516. bfa_iocfc_sm_disabled(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  517. {
  518. bfa_trc(iocfc->bfa, event);
  519. switch (event) {
  520. case IOCFC_E_STOP:
  521. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
  522. break;
  523. case IOCFC_E_ENABLE:
  524. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_enabling);
  525. break;
  526. default:
  527. bfa_sm_fault(iocfc->bfa, event);
  528. break;
  529. }
  530. }
  531. static void
  532. bfa_iocfc_sm_failed_entry(struct bfa_iocfc_s *iocfc)
  533. {
  534. bfa_isr_disable(iocfc->bfa);
  535. bfa_iocfc_disable_submod(iocfc->bfa);
  536. }
  537. static void
  538. bfa_iocfc_sm_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  539. {
  540. bfa_trc(iocfc->bfa, event);
  541. switch (event) {
  542. case IOCFC_E_STOP:
  543. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
  544. break;
  545. case IOCFC_E_DISABLE:
  546. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
  547. break;
  548. case IOCFC_E_IOC_ENABLED:
  549. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
  550. break;
  551. case IOCFC_E_IOC_FAILED:
  552. break;
  553. default:
  554. bfa_sm_fault(iocfc->bfa, event);
  555. break;
  556. }
  557. }
  558. static void
  559. bfa_iocfc_sm_init_failed_entry(struct bfa_iocfc_s *iocfc)
  560. {
  561. bfa_isr_disable(iocfc->bfa);
  562. iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
  563. bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
  564. bfa_iocfc_init_cb, iocfc->bfa);
  565. }
  566. static void
  567. bfa_iocfc_sm_init_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
  568. {
  569. bfa_trc(iocfc->bfa, event);
  570. switch (event) {
  571. case IOCFC_E_STOP:
  572. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
  573. break;
  574. case IOCFC_E_DISABLE:
  575. bfa_ioc_disable(&iocfc->bfa->ioc);
  576. break;
  577. case IOCFC_E_IOC_ENABLED:
  578. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
  579. break;
  580. case IOCFC_E_IOC_DISABLED:
  581. bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
  582. iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
  583. bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
  584. bfa_iocfc_disable_cb, iocfc->bfa);
  585. break;
  586. case IOCFC_E_IOC_FAILED:
  587. break;
  588. default:
  589. bfa_sm_fault(iocfc->bfa, event);
  590. break;
  591. }
  592. }
  593. /*
  594. * BFA Interrupt handling functions
  595. */
  596. static void
  597. bfa_reqq_resume(struct bfa_s *bfa, int qid)
  598. {
  599. struct list_head *waitq, *qe, *qen;
  600. struct bfa_reqq_wait_s *wqe;
  601. waitq = bfa_reqq(bfa, qid);
  602. list_for_each_safe(qe, qen, waitq) {
  603. /*
  604. * Callback only as long as there is room in request queue
  605. */
  606. if (bfa_reqq_full(bfa, qid))
  607. break;
  608. list_del(qe);
  609. wqe = (struct bfa_reqq_wait_s *) qe;
  610. wqe->qresume(wqe->cbarg);
  611. }
  612. }
  613. bfa_boolean_t
  614. bfa_isr_rspq(struct bfa_s *bfa, int qid)
  615. {
  616. struct bfi_msg_s *m;
  617. u32 pi, ci;
  618. struct list_head *waitq;
  619. bfa_boolean_t ret;
  620. ci = bfa_rspq_ci(bfa, qid);
  621. pi = bfa_rspq_pi(bfa, qid);
  622. ret = (ci != pi);
  623. while (ci != pi) {
  624. m = bfa_rspq_elem(bfa, qid, ci);
  625. WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
  626. bfa_isrs[m->mhdr.msg_class] (bfa, m);
  627. CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
  628. }
  629. /*
  630. * acknowledge RME completions and update CI
  631. */
  632. bfa_isr_rspq_ack(bfa, qid, ci);
  633. /*
  634. * Resume any pending requests in the corresponding reqq.
  635. */
  636. waitq = bfa_reqq(bfa, qid);
  637. if (!list_empty(waitq))
  638. bfa_reqq_resume(bfa, qid);
  639. return ret;
  640. }
  641. static inline void
  642. bfa_isr_reqq(struct bfa_s *bfa, int qid)
  643. {
  644. struct list_head *waitq;
  645. bfa_isr_reqq_ack(bfa, qid);
  646. /*
  647. * Resume any pending requests in the corresponding reqq.
  648. */
  649. waitq = bfa_reqq(bfa, qid);
  650. if (!list_empty(waitq))
  651. bfa_reqq_resume(bfa, qid);
  652. }
  653. void
  654. bfa_msix_all(struct bfa_s *bfa, int vec)
  655. {
  656. u32 intr, qintr;
  657. int queue;
  658. intr = readl(bfa->iocfc.bfa_regs.intr_status);
  659. if (!intr)
  660. return;
  661. /*
  662. * RME completion queue interrupt
  663. */
  664. qintr = intr & __HFN_INT_RME_MASK;
  665. if (qintr && bfa->queue_process) {
  666. for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
  667. bfa_isr_rspq(bfa, queue);
  668. }
  669. intr &= ~qintr;
  670. if (!intr)
  671. return;
  672. /*
  673. * CPE completion queue interrupt
  674. */
  675. qintr = intr & __HFN_INT_CPE_MASK;
  676. if (qintr && bfa->queue_process) {
  677. for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
  678. bfa_isr_reqq(bfa, queue);
  679. }
  680. intr &= ~qintr;
  681. if (!intr)
  682. return;
  683. bfa_msix_lpu_err(bfa, intr);
  684. }
  685. bfa_boolean_t
  686. bfa_intx(struct bfa_s *bfa)
  687. {
  688. u32 intr, qintr;
  689. int queue;
  690. bfa_boolean_t rspq_comp = BFA_FALSE;
  691. intr = readl(bfa->iocfc.bfa_regs.intr_status);
  692. qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
  693. if (qintr)
  694. writel(qintr, bfa->iocfc.bfa_regs.intr_status);
  695. /*
  696. * Unconditional RME completion queue interrupt
  697. */
  698. if (bfa->queue_process) {
  699. for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
  700. if (bfa_isr_rspq(bfa, queue))
  701. rspq_comp = BFA_TRUE;
  702. }
  703. if (!intr)
  704. return (qintr | rspq_comp) ? BFA_TRUE : BFA_FALSE;
  705. /*
  706. * CPE completion queue interrupt
  707. */
  708. qintr = intr & __HFN_INT_CPE_MASK;
  709. if (qintr && bfa->queue_process) {
  710. for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
  711. bfa_isr_reqq(bfa, queue);
  712. }
  713. intr &= ~qintr;
  714. if (!intr)
  715. return BFA_TRUE;
  716. if (bfa->intr_enabled)
  717. bfa_msix_lpu_err(bfa, intr);
  718. return BFA_TRUE;
  719. }
  720. void
  721. bfa_isr_enable(struct bfa_s *bfa)
  722. {
  723. u32 umsk;
  724. int pci_func = bfa_ioc_pcifn(&bfa->ioc);
  725. bfa_trc(bfa, pci_func);
  726. bfa_msix_ctrl_install(bfa);
  727. if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
  728. umsk = __HFN_INT_ERR_MASK_CT2;
  729. umsk |= pci_func == 0 ?
  730. __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
  731. } else {
  732. umsk = __HFN_INT_ERR_MASK;
  733. umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
  734. }
  735. writel(umsk, bfa->iocfc.bfa_regs.intr_status);
  736. writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
  737. bfa->iocfc.intr_mask = ~umsk;
  738. bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
  739. /*
  740. * Set the flag indicating successful enabling of interrupts
  741. */
  742. bfa->intr_enabled = BFA_TRUE;
  743. }
  744. void
  745. bfa_isr_disable(struct bfa_s *bfa)
  746. {
  747. bfa->intr_enabled = BFA_FALSE;
  748. bfa_isr_mode_set(bfa, BFA_FALSE);
  749. writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
  750. bfa_msix_uninstall(bfa);
  751. }
  752. void
  753. bfa_msix_reqq(struct bfa_s *bfa, int vec)
  754. {
  755. bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0);
  756. }
  757. void
  758. bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
  759. {
  760. bfa_trc(bfa, m->mhdr.msg_class);
  761. bfa_trc(bfa, m->mhdr.msg_id);
  762. bfa_trc(bfa, m->mhdr.mtag.i2htok);
  763. WARN_ON(1);
  764. bfa_trc_stop(bfa->trcmod);
  765. }
  766. void
  767. bfa_msix_rspq(struct bfa_s *bfa, int vec)
  768. {
  769. bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
  770. }
  771. void
  772. bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
  773. {
  774. u32 intr, curr_value;
  775. bfa_boolean_t lpu_isr, halt_isr, pss_isr;
  776. intr = readl(bfa->iocfc.bfa_regs.intr_status);
  777. if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
  778. halt_isr = intr & __HFN_INT_CPQ_HALT_CT2;
  779. pss_isr = intr & __HFN_INT_ERR_PSS_CT2;
  780. lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 |
  781. __HFN_INT_MBOX_LPU1_CT2);
  782. intr &= __HFN_INT_ERR_MASK_CT2;
  783. } else {
  784. halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ?
  785. (intr & __HFN_INT_LL_HALT) : 0;
  786. pss_isr = intr & __HFN_INT_ERR_PSS;
  787. lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
  788. intr &= __HFN_INT_ERR_MASK;
  789. }
  790. if (lpu_isr)
  791. bfa_ioc_mbox_isr(&bfa->ioc);
  792. if (intr) {
  793. if (halt_isr) {
  794. /*
  795. * If LL_HALT bit is set then FW Init Halt LL Port
  796. * Register needs to be cleared as well so Interrupt
  797. * Status Register will be cleared.
  798. */
  799. curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
  800. curr_value &= ~__FW_INIT_HALT_P;
  801. writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
  802. }
  803. if (pss_isr) {
  804. /*
  805. * ERR_PSS bit needs to be cleared as well in case
  806. * interrups are shared so driver's interrupt handler is
  807. * still called even though it is already masked out.
  808. */
  809. curr_value = readl(
  810. bfa->ioc.ioc_regs.pss_err_status_reg);
  811. writel(curr_value,
  812. bfa->ioc.ioc_regs.pss_err_status_reg);
  813. }
  814. writel(intr, bfa->iocfc.bfa_regs.intr_status);
  815. bfa_ioc_error_isr(&bfa->ioc);
  816. }
  817. }
  818. /*
  819. * BFA IOC FC related functions
  820. */
  821. /*
  822. * BFA IOC private functions
  823. */
  824. /*
  825. * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
  826. */
  827. static void
  828. bfa_iocfc_send_cfg(void *bfa_arg)
  829. {
  830. struct bfa_s *bfa = bfa_arg;
  831. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  832. struct bfi_iocfc_cfg_req_s cfg_req;
  833. struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
  834. struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
  835. int i;
  836. WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
  837. bfa_trc(bfa, cfg->fwcfg.num_cqs);
  838. bfa_iocfc_reset_queues(bfa);
  839. /*
  840. * initialize IOC configuration info
  841. */
  842. cfg_info->single_msix_vec = 0;
  843. if (bfa->msix.nvecs == 1)
  844. cfg_info->single_msix_vec = 1;
  845. cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
  846. cfg_info->num_cqs = cfg->fwcfg.num_cqs;
  847. cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs);
  848. cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
  849. bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
  850. /*
  851. * dma map REQ and RSP circular queues and shadow pointers
  852. */
  853. for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
  854. bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
  855. iocfc->req_cq_ba[i].pa);
  856. bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
  857. iocfc->req_cq_shadow_ci[i].pa);
  858. cfg_info->req_cq_elems[i] =
  859. cpu_to_be16(cfg->drvcfg.num_reqq_elems);
  860. bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
  861. iocfc->rsp_cq_ba[i].pa);
  862. bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
  863. iocfc->rsp_cq_shadow_pi[i].pa);
  864. cfg_info->rsp_cq_elems[i] =
  865. cpu_to_be16(cfg->drvcfg.num_rspq_elems);
  866. }
  867. /*
  868. * Enable interrupt coalescing if it is driver init path
  869. * and not ioc disable/enable path.
  870. */
  871. if (bfa_fsm_cmp_state(iocfc, bfa_iocfc_sm_init_cfg_wait))
  872. cfg_info->intr_attr.coalesce = BFA_TRUE;
  873. /*
  874. * dma map IOC configuration itself
  875. */
  876. bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
  877. bfa_fn_lpu(bfa));
  878. bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
  879. bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
  880. sizeof(struct bfi_iocfc_cfg_req_s));
  881. }
  882. static void
  883. bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  884. struct bfa_pcidev_s *pcidev)
  885. {
  886. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  887. bfa->bfad = bfad;
  888. iocfc->bfa = bfa;
  889. iocfc->cfg = *cfg;
  890. /*
  891. * Initialize chip specific handlers.
  892. */
  893. if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) {
  894. iocfc->hwif.hw_reginit = bfa_hwct_reginit;
  895. iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
  896. iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
  897. iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
  898. iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install;
  899. iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install;
  900. iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
  901. iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
  902. iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
  903. iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
  904. iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT;
  905. iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT;
  906. } else {
  907. iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
  908. iocfc->hwif.hw_reqq_ack = NULL;
  909. iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
  910. iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
  911. iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
  912. iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
  913. iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
  914. iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
  915. iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
  916. iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
  917. iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
  918. bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
  919. iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
  920. bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
  921. }
  922. if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
  923. iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
  924. iocfc->hwif.hw_isr_mode_set = NULL;
  925. iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack;
  926. }
  927. iocfc->hwif.hw_reginit(bfa);
  928. bfa->msix.nvecs = 0;
  929. }
  930. static void
  931. bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
  932. {
  933. u8 *dm_kva = NULL;
  934. u64 dm_pa = 0;
  935. int i, per_reqq_sz, per_rspq_sz;
  936. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  937. struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
  938. struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
  939. struct bfa_mem_dma_s *reqq_dma, *rspq_dma;
  940. /* First allocate dma memory for IOC */
  941. bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma),
  942. bfa_mem_dma_phys(ioc_dma));
  943. /* Claim DMA-able memory for the request/response queues */
  944. per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
  945. BFA_DMA_ALIGN_SZ);
  946. per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
  947. BFA_DMA_ALIGN_SZ);
  948. for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
  949. reqq_dma = BFA_MEM_REQQ_DMA(bfa, i);
  950. iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma);
  951. iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma);
  952. memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz);
  953. rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i);
  954. iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma);
  955. iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma);
  956. memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz);
  957. }
  958. /* Claim IOCFC dma memory - for shadow CI/PI */
  959. dm_kva = bfa_mem_dma_virt(iocfc_dma);
  960. dm_pa = bfa_mem_dma_phys(iocfc_dma);
  961. for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
  962. iocfc->req_cq_shadow_ci[i].kva = dm_kva;
  963. iocfc->req_cq_shadow_ci[i].pa = dm_pa;
  964. dm_kva += BFA_CACHELINE_SZ;
  965. dm_pa += BFA_CACHELINE_SZ;
  966. iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
  967. iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
  968. dm_kva += BFA_CACHELINE_SZ;
  969. dm_pa += BFA_CACHELINE_SZ;
  970. }
  971. /* Claim IOCFC dma memory - for the config info page */
  972. bfa->iocfc.cfg_info.kva = dm_kva;
  973. bfa->iocfc.cfg_info.pa = dm_pa;
  974. bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
  975. dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
  976. dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
  977. /* Claim IOCFC dma memory - for the config response */
  978. bfa->iocfc.cfgrsp_dma.kva = dm_kva;
  979. bfa->iocfc.cfgrsp_dma.pa = dm_pa;
  980. bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
  981. dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
  982. BFA_CACHELINE_SZ);
  983. dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
  984. BFA_CACHELINE_SZ);
  985. /* Claim IOCFC kva memory */
  986. bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
  987. bfa_mem_kva_curp(iocfc) += BFA_DBG_FWTRC_LEN;
  988. }
  989. /*
  990. * Start BFA submodules.
  991. */
  992. static void
  993. bfa_iocfc_start_submod(struct bfa_s *bfa)
  994. {
  995. int i;
  996. bfa->queue_process = BFA_TRUE;
  997. for (i = 0; i < BFI_IOC_MAX_CQS; i++)
  998. bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i));
  999. for (i = 0; hal_mods[i]; i++)
  1000. hal_mods[i]->start(bfa);
  1001. bfa->iocfc.submod_enabled = BFA_TRUE;
  1002. }
  1003. /*
  1004. * Disable BFA submodules.
  1005. */
  1006. static void
  1007. bfa_iocfc_disable_submod(struct bfa_s *bfa)
  1008. {
  1009. int i;
  1010. if (bfa->iocfc.submod_enabled == BFA_FALSE)
  1011. return;
  1012. for (i = 0; hal_mods[i]; i++)
  1013. hal_mods[i]->iocdisable(bfa);
  1014. bfa->iocfc.submod_enabled = BFA_FALSE;
  1015. }
  1016. static void
  1017. bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
  1018. {
  1019. struct bfa_s *bfa = bfa_arg;
  1020. if (complete)
  1021. bfa_cb_init(bfa->bfad, bfa->iocfc.op_status);
  1022. }
  1023. static void
  1024. bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
  1025. {
  1026. struct bfa_s *bfa = bfa_arg;
  1027. struct bfad_s *bfad = bfa->bfad;
  1028. if (compl)
  1029. complete(&bfad->comp);
  1030. }
  1031. static void
  1032. bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl)
  1033. {
  1034. struct bfa_s *bfa = bfa_arg;
  1035. struct bfad_s *bfad = bfa->bfad;
  1036. if (compl)
  1037. complete(&bfad->enable_comp);
  1038. }
  1039. static void
  1040. bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
  1041. {
  1042. struct bfa_s *bfa = bfa_arg;
  1043. struct bfad_s *bfad = bfa->bfad;
  1044. if (compl)
  1045. complete(&bfad->disable_comp);
  1046. }
  1047. /**
  1048. * configure queue registers from firmware response
  1049. */
  1050. static void
  1051. bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
  1052. {
  1053. int i;
  1054. struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs;
  1055. void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
  1056. for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
  1057. bfa->iocfc.hw_qid[i] = qreg->hw_qid[i];
  1058. r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]);
  1059. r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]);
  1060. r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]);
  1061. r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]);
  1062. r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]);
  1063. r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]);
  1064. }
  1065. }
  1066. static void
  1067. bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg)
  1068. {
  1069. bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs);
  1070. bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs);
  1071. bfa_rport_res_recfg(bfa, fwcfg->num_rports);
  1072. bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs);
  1073. bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs);
  1074. }
  1075. /*
  1076. * Update BFA configuration from firmware configuration.
  1077. */
  1078. static void
  1079. bfa_iocfc_cfgrsp(struct bfa_s *bfa)
  1080. {
  1081. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  1082. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  1083. struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
  1084. fwcfg->num_cqs = fwcfg->num_cqs;
  1085. fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
  1086. fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
  1087. fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
  1088. fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
  1089. fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
  1090. fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
  1091. /*
  1092. * configure queue register offsets as learnt from firmware
  1093. */
  1094. bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
  1095. /*
  1096. * Re-configure resources as learnt from Firmware
  1097. */
  1098. bfa_iocfc_res_recfg(bfa, fwcfg);
  1099. /*
  1100. * Install MSIX queue handlers
  1101. */
  1102. bfa_msix_queue_install(bfa);
  1103. if (bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn != 0) {
  1104. bfa->ioc.attr->pwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn;
  1105. bfa->ioc.attr->nwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_nwwn;
  1106. bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
  1107. }
  1108. }
  1109. void
  1110. bfa_iocfc_reset_queues(struct bfa_s *bfa)
  1111. {
  1112. int q;
  1113. for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
  1114. bfa_reqq_ci(bfa, q) = 0;
  1115. bfa_reqq_pi(bfa, q) = 0;
  1116. bfa_rspq_ci(bfa, q) = 0;
  1117. bfa_rspq_pi(bfa, q) = 0;
  1118. }
  1119. }
  1120. /*
  1121. * Process FAA pwwn msg from fw.
  1122. */
  1123. static void
  1124. bfa_iocfc_process_faa_addr(struct bfa_s *bfa, struct bfi_faa_addr_msg_s *msg)
  1125. {
  1126. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  1127. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  1128. cfgrsp->pbc_cfg.pbc_pwwn = msg->pwwn;
  1129. cfgrsp->pbc_cfg.pbc_nwwn = msg->nwwn;
  1130. bfa->ioc.attr->pwwn = msg->pwwn;
  1131. bfa->ioc.attr->nwwn = msg->nwwn;
  1132. bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
  1133. }
  1134. /* Fabric Assigned Address specific functions */
  1135. /*
  1136. * Check whether IOC is ready before sending command down
  1137. */
  1138. static bfa_status_t
  1139. bfa_faa_validate_request(struct bfa_s *bfa)
  1140. {
  1141. enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
  1142. u32 card_type = bfa->ioc.attr->card_type;
  1143. if (bfa_ioc_is_operational(&bfa->ioc)) {
  1144. if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
  1145. return BFA_STATUS_FEATURE_NOT_SUPPORTED;
  1146. } else {
  1147. return BFA_STATUS_IOC_NON_OP;
  1148. }
  1149. return BFA_STATUS_OK;
  1150. }
  1151. bfa_status_t
  1152. bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
  1153. bfa_cb_iocfc_t cbfn, void *cbarg)
  1154. {
  1155. struct bfi_faa_query_s faa_attr_req;
  1156. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  1157. bfa_status_t status;
  1158. iocfc->faa_args.faa_attr = attr;
  1159. iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
  1160. iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
  1161. status = bfa_faa_validate_request(bfa);
  1162. if (status != BFA_STATUS_OK)
  1163. return status;
  1164. if (iocfc->faa_args.busy == BFA_TRUE)
  1165. return BFA_STATUS_DEVBUSY;
  1166. iocfc->faa_args.busy = BFA_TRUE;
  1167. memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s));
  1168. bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC,
  1169. BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa));
  1170. bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req,
  1171. sizeof(struct bfi_faa_query_s));
  1172. return BFA_STATUS_OK;
  1173. }
  1174. /*
  1175. * FAA query response
  1176. */
  1177. static void
  1178. bfa_faa_query_reply(struct bfa_iocfc_s *iocfc,
  1179. bfi_faa_query_rsp_t *rsp)
  1180. {
  1181. void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
  1182. if (iocfc->faa_args.faa_attr) {
  1183. iocfc->faa_args.faa_attr->faa = rsp->faa;
  1184. iocfc->faa_args.faa_attr->faa_state = rsp->faa_status;
  1185. iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source;
  1186. }
  1187. WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
  1188. iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK);
  1189. iocfc->faa_args.busy = BFA_FALSE;
  1190. }
  1191. /*
  1192. * IOC enable request is complete
  1193. */
  1194. static void
  1195. bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
  1196. {
  1197. struct bfa_s *bfa = bfa_arg;
  1198. if (status == BFA_STATUS_OK)
  1199. bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_ENABLED);
  1200. else
  1201. bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
  1202. }
  1203. /*
  1204. * IOC disable request is complete
  1205. */
  1206. static void
  1207. bfa_iocfc_disable_cbfn(void *bfa_arg)
  1208. {
  1209. struct bfa_s *bfa = bfa_arg;
  1210. bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_DISABLED);
  1211. }
  1212. /*
  1213. * Notify sub-modules of hardware failure.
  1214. */
  1215. static void
  1216. bfa_iocfc_hbfail_cbfn(void *bfa_arg)
  1217. {
  1218. struct bfa_s *bfa = bfa_arg;
  1219. bfa->queue_process = BFA_FALSE;
  1220. bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
  1221. }
  1222. /*
  1223. * Actions on chip-reset completion.
  1224. */
  1225. static void
  1226. bfa_iocfc_reset_cbfn(void *bfa_arg)
  1227. {
  1228. struct bfa_s *bfa = bfa_arg;
  1229. bfa_iocfc_reset_queues(bfa);
  1230. bfa_isr_enable(bfa);
  1231. }
  1232. /*
  1233. * Query IOC memory requirement information.
  1234. */
  1235. void
  1236. bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
  1237. struct bfa_s *bfa)
  1238. {
  1239. int q, per_reqq_sz, per_rspq_sz;
  1240. struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
  1241. struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
  1242. struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa);
  1243. u32 dm_len = 0;
  1244. /* dma memory setup for IOC */
  1245. bfa_mem_dma_setup(meminfo, ioc_dma,
  1246. BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ));
  1247. /* dma memory setup for REQ/RSP queues */
  1248. per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
  1249. BFA_DMA_ALIGN_SZ);
  1250. per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
  1251. BFA_DMA_ALIGN_SZ);
  1252. for (q = 0; q < cfg->fwcfg.num_cqs; q++) {
  1253. bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q),
  1254. per_reqq_sz);
  1255. bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q),
  1256. per_rspq_sz);
  1257. }
  1258. /* IOCFC dma memory - calculate Shadow CI/PI size */
  1259. for (q = 0; q < cfg->fwcfg.num_cqs; q++)
  1260. dm_len += (2 * BFA_CACHELINE_SZ);
  1261. /* IOCFC dma memory - calculate config info / rsp size */
  1262. dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
  1263. dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
  1264. BFA_CACHELINE_SZ);
  1265. /* dma memory setup for IOCFC */
  1266. bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len);
  1267. /* kva memory setup for IOCFC */
  1268. bfa_mem_kva_setup(meminfo, iocfc_kva, BFA_DBG_FWTRC_LEN);
  1269. }
  1270. /*
  1271. * Query IOC memory requirement information.
  1272. */
  1273. void
  1274. bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  1275. struct bfa_pcidev_s *pcidev)
  1276. {
  1277. int i;
  1278. struct bfa_ioc_s *ioc = &bfa->ioc;
  1279. bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
  1280. bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
  1281. bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
  1282. bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
  1283. ioc->trcmod = bfa->trcmod;
  1284. bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
  1285. bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC);
  1286. bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
  1287. bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
  1288. bfa_iocfc_mem_claim(bfa, cfg);
  1289. INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
  1290. INIT_LIST_HEAD(&bfa->comp_q);
  1291. for (i = 0; i < BFI_IOC_MAX_CQS; i++)
  1292. INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
  1293. bfa->iocfc.cb_reqd = BFA_FALSE;
  1294. bfa->iocfc.op_status = BFA_STATUS_OK;
  1295. bfa->iocfc.submod_enabled = BFA_FALSE;
  1296. bfa_fsm_set_state(&bfa->iocfc, bfa_iocfc_sm_stopped);
  1297. }
  1298. /*
  1299. * Query IOC memory requirement information.
  1300. */
  1301. void
  1302. bfa_iocfc_init(struct bfa_s *bfa)
  1303. {
  1304. bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_INIT);
  1305. }
  1306. /*
  1307. * IOC start called from bfa_start(). Called to start IOC operations
  1308. * at driver instantiation for this instance.
  1309. */
  1310. void
  1311. bfa_iocfc_start(struct bfa_s *bfa)
  1312. {
  1313. bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_START);
  1314. }
  1315. /*
  1316. * IOC stop called from bfa_stop(). Called only when driver is unloaded
  1317. * for this instance.
  1318. */
  1319. void
  1320. bfa_iocfc_stop(struct bfa_s *bfa)
  1321. {
  1322. bfa->queue_process = BFA_FALSE;
  1323. bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_STOP);
  1324. }
  1325. void
  1326. bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
  1327. {
  1328. struct bfa_s *bfa = bfaarg;
  1329. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  1330. union bfi_iocfc_i2h_msg_u *msg;
  1331. msg = (union bfi_iocfc_i2h_msg_u *) m;
  1332. bfa_trc(bfa, msg->mh.msg_id);
  1333. switch (msg->mh.msg_id) {
  1334. case BFI_IOCFC_I2H_CFG_REPLY:
  1335. bfa_iocfc_cfgrsp(bfa);
  1336. break;
  1337. case BFI_IOCFC_I2H_UPDATEQ_RSP:
  1338. iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
  1339. break;
  1340. case BFI_IOCFC_I2H_ADDR_MSG:
  1341. bfa_iocfc_process_faa_addr(bfa,
  1342. (struct bfi_faa_addr_msg_s *)msg);
  1343. break;
  1344. case BFI_IOCFC_I2H_FAA_QUERY_RSP:
  1345. bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
  1346. break;
  1347. default:
  1348. WARN_ON(1);
  1349. }
  1350. }
  1351. void
  1352. bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
  1353. {
  1354. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  1355. attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
  1356. attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
  1357. be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
  1358. be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
  1359. attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
  1360. be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
  1361. be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
  1362. attr->config = iocfc->cfg;
  1363. }
  1364. bfa_status_t
  1365. bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
  1366. {
  1367. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  1368. struct bfi_iocfc_set_intr_req_s *m;
  1369. iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
  1370. iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
  1371. iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
  1372. if (!bfa_iocfc_is_operational(bfa))
  1373. return BFA_STATUS_OK;
  1374. m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
  1375. if (!m)
  1376. return BFA_STATUS_DEVBUSY;
  1377. bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
  1378. bfa_fn_lpu(bfa));
  1379. m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
  1380. m->delay = iocfc->cfginfo->intr_attr.delay;
  1381. m->latency = iocfc->cfginfo->intr_attr.latency;
  1382. bfa_trc(bfa, attr->delay);
  1383. bfa_trc(bfa, attr->latency);
  1384. bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh);
  1385. return BFA_STATUS_OK;
  1386. }
  1387. void
  1388. bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa)
  1389. {
  1390. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  1391. iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
  1392. bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa);
  1393. }
  1394. /*
  1395. * Enable IOC after it is disabled.
  1396. */
  1397. void
  1398. bfa_iocfc_enable(struct bfa_s *bfa)
  1399. {
  1400. bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
  1401. "IOC Enable");
  1402. bfa->iocfc.cb_reqd = BFA_TRUE;
  1403. bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_ENABLE);
  1404. }
  1405. void
  1406. bfa_iocfc_disable(struct bfa_s *bfa)
  1407. {
  1408. bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
  1409. "IOC Disable");
  1410. bfa->queue_process = BFA_FALSE;
  1411. bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DISABLE);
  1412. }
  1413. bfa_boolean_t
  1414. bfa_iocfc_is_operational(struct bfa_s *bfa)
  1415. {
  1416. return bfa_ioc_is_operational(&bfa->ioc) &&
  1417. bfa_fsm_cmp_state(&bfa->iocfc, bfa_iocfc_sm_operational);
  1418. }
  1419. /*
  1420. * Return boot target port wwns -- read from boot information in flash.
  1421. */
  1422. void
  1423. bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
  1424. {
  1425. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  1426. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  1427. int i;
  1428. if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
  1429. bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
  1430. *nwwns = cfgrsp->pbc_cfg.nbluns;
  1431. for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
  1432. wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
  1433. return;
  1434. }
  1435. *nwwns = cfgrsp->bootwwns.nwwns;
  1436. memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
  1437. }
  1438. int
  1439. bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
  1440. {
  1441. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  1442. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  1443. memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
  1444. return cfgrsp->pbc_cfg.nvports;
  1445. }
  1446. /*
  1447. * Use this function query the memory requirement of the BFA library.
  1448. * This function needs to be called before bfa_attach() to get the
  1449. * memory required of the BFA layer for a given driver configuration.
  1450. *
  1451. * This call will fail, if the cap is out of range compared to pre-defined
  1452. * values within the BFA library
  1453. *
  1454. * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
  1455. * its configuration in this structure.
  1456. * The default values for struct bfa_iocfc_cfg_s can be
  1457. * fetched using bfa_cfg_get_default() API.
  1458. *
  1459. * If cap's boundary check fails, the library will use
  1460. * the default bfa_cap_t values (and log a warning msg).
  1461. *
  1462. * @param[out] meminfo - pointer to bfa_meminfo_t. This content
  1463. * indicates the memory type (see bfa_mem_type_t) and
  1464. * amount of memory required.
  1465. *
  1466. * Driver should allocate the memory, populate the
  1467. * starting address for each block and provide the same
  1468. * structure as input parameter to bfa_attach() call.
  1469. *
  1470. * @param[in] bfa - pointer to the bfa structure, used while fetching the
  1471. * dma, kva memory information of the bfa sub-modules.
  1472. *
  1473. * @return void
  1474. *
  1475. * Special Considerations: @note
  1476. */
  1477. void
  1478. bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
  1479. struct bfa_s *bfa)
  1480. {
  1481. int i;
  1482. struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
  1483. struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
  1484. struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
  1485. struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
  1486. struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
  1487. struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
  1488. struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
  1489. WARN_ON((cfg == NULL) || (meminfo == NULL));
  1490. memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
  1491. /* Initialize the DMA & KVA meminfo queues */
  1492. INIT_LIST_HEAD(&meminfo->dma_info.qe);
  1493. INIT_LIST_HEAD(&meminfo->kva_info.qe);
  1494. bfa_iocfc_meminfo(cfg, meminfo, bfa);
  1495. for (i = 0; hal_mods[i]; i++)
  1496. hal_mods[i]->meminfo(cfg, meminfo, bfa);
  1497. /* dma info setup */
  1498. bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo());
  1499. bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo());
  1500. bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo());
  1501. bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo());
  1502. bfa_mem_dma_setup(meminfo, flash_dma,
  1503. bfa_flash_meminfo(cfg->drvcfg.min_cfg));
  1504. bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo());
  1505. bfa_mem_dma_setup(meminfo, phy_dma,
  1506. bfa_phy_meminfo(cfg->drvcfg.min_cfg));
  1507. }
  1508. /*
  1509. * Use this function to do attach the driver instance with the BFA
  1510. * library. This function will not trigger any HW initialization
  1511. * process (which will be done in bfa_init() call)
  1512. *
  1513. * This call will fail, if the cap is out of range compared to
  1514. * pre-defined values within the BFA library
  1515. *
  1516. * @param[out] bfa Pointer to bfa_t.
  1517. * @param[in] bfad Opaque handle back to the driver's IOC structure
  1518. * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
  1519. * that was used in bfa_cfg_get_meminfo().
  1520. * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
  1521. * use the bfa_cfg_get_meminfo() call to
  1522. * find the memory blocks required, allocate the
  1523. * required memory and provide the starting addresses.
  1524. * @param[in] pcidev pointer to struct bfa_pcidev_s
  1525. *
  1526. * @return
  1527. * void
  1528. *
  1529. * Special Considerations:
  1530. *
  1531. * @note
  1532. *
  1533. */
  1534. void
  1535. bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  1536. struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
  1537. {
  1538. int i;
  1539. struct bfa_mem_dma_s *dma_info, *dma_elem;
  1540. struct bfa_mem_kva_s *kva_info, *kva_elem;
  1541. struct list_head *dm_qe, *km_qe;
  1542. bfa->fcs = BFA_FALSE;
  1543. WARN_ON((cfg == NULL) || (meminfo == NULL));
  1544. /* Initialize memory pointers for iterative allocation */
  1545. dma_info = &meminfo->dma_info;
  1546. dma_info->kva_curp = dma_info->kva;
  1547. dma_info->dma_curp = dma_info->dma;
  1548. kva_info = &meminfo->kva_info;
  1549. kva_info->kva_curp = kva_info->kva;
  1550. list_for_each(dm_qe, &dma_info->qe) {
  1551. dma_elem = (struct bfa_mem_dma_s *) dm_qe;
  1552. dma_elem->kva_curp = dma_elem->kva;
  1553. dma_elem->dma_curp = dma_elem->dma;
  1554. }
  1555. list_for_each(km_qe, &kva_info->qe) {
  1556. kva_elem = (struct bfa_mem_kva_s *) km_qe;
  1557. kva_elem->kva_curp = kva_elem->kva;
  1558. }
  1559. bfa_iocfc_attach(bfa, bfad, cfg, pcidev);
  1560. for (i = 0; hal_mods[i]; i++)
  1561. hal_mods[i]->attach(bfa, bfad, cfg, pcidev);
  1562. bfa_com_port_attach(bfa);
  1563. bfa_com_ablk_attach(bfa);
  1564. bfa_com_cee_attach(bfa);
  1565. bfa_com_sfp_attach(bfa);
  1566. bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg);
  1567. bfa_com_diag_attach(bfa);
  1568. bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg);
  1569. }
  1570. /*
  1571. * Use this function to delete a BFA IOC. IOC should be stopped (by
  1572. * calling bfa_stop()) before this function call.
  1573. *
  1574. * @param[in] bfa - pointer to bfa_t.
  1575. *
  1576. * @return
  1577. * void
  1578. *
  1579. * Special Considerations:
  1580. *
  1581. * @note
  1582. */
  1583. void
  1584. bfa_detach(struct bfa_s *bfa)
  1585. {
  1586. int i;
  1587. for (i = 0; hal_mods[i]; i++)
  1588. hal_mods[i]->detach(bfa);
  1589. bfa_ioc_detach(&bfa->ioc);
  1590. }
  1591. void
  1592. bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
  1593. {
  1594. INIT_LIST_HEAD(comp_q);
  1595. list_splice_tail_init(&bfa->comp_q, comp_q);
  1596. }
  1597. void
  1598. bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
  1599. {
  1600. struct list_head *qe;
  1601. struct list_head *qen;
  1602. struct bfa_cb_qe_s *hcb_qe;
  1603. bfa_cb_cbfn_status_t cbfn;
  1604. list_for_each_safe(qe, qen, comp_q) {
  1605. hcb_qe = (struct bfa_cb_qe_s *) qe;
  1606. if (hcb_qe->pre_rmv) {
  1607. /* qe is invalid after return, dequeue before cbfn() */
  1608. list_del(qe);
  1609. cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn);
  1610. cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
  1611. } else
  1612. hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
  1613. }
  1614. }
  1615. void
  1616. bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
  1617. {
  1618. struct list_head *qe;
  1619. struct bfa_cb_qe_s *hcb_qe;
  1620. while (!list_empty(comp_q)) {
  1621. bfa_q_deq(comp_q, &qe);
  1622. hcb_qe = (struct bfa_cb_qe_s *) qe;
  1623. WARN_ON(hcb_qe->pre_rmv);
  1624. hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
  1625. }
  1626. }
  1627. /*
  1628. * Return the list of PCI vendor/device id lists supported by this
  1629. * BFA instance.
  1630. */
  1631. void
  1632. bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
  1633. {
  1634. static struct bfa_pciid_s __pciids[] = {
  1635. {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
  1636. {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
  1637. {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
  1638. {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
  1639. };
  1640. *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
  1641. *pciids = __pciids;
  1642. }
  1643. /*
  1644. * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
  1645. * into BFA layer). The OS driver can then turn back and overwrite entries that
  1646. * have been configured by the user.
  1647. *
  1648. * @param[in] cfg - pointer to bfa_ioc_cfg_t
  1649. *
  1650. * @return
  1651. * void
  1652. *
  1653. * Special Considerations:
  1654. * note
  1655. */
  1656. void
  1657. bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
  1658. {
  1659. cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
  1660. cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
  1661. cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
  1662. cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
  1663. cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
  1664. cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
  1665. cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
  1666. cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
  1667. cfg->fwcfg.num_fwtio_reqs = 0;
  1668. cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
  1669. cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
  1670. cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
  1671. cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
  1672. cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
  1673. cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
  1674. cfg->drvcfg.ioc_recover = BFA_FALSE;
  1675. cfg->drvcfg.delay_comp = BFA_FALSE;
  1676. }
  1677. void
  1678. bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
  1679. {
  1680. bfa_cfg_get_default(cfg);
  1681. cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
  1682. cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
  1683. cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
  1684. cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
  1685. cfg->fwcfg.num_rports = BFA_RPORT_MIN;
  1686. cfg->fwcfg.num_fwtio_reqs = 0;
  1687. cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
  1688. cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
  1689. cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
  1690. cfg->drvcfg.min_cfg = BFA_TRUE;
  1691. }