bfa_core.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include "bfa_modules.h"
  18. #include "bfi_ctreg.h"
  19. #include "bfad_drv.h"
  20. BFA_TRC_FILE(HAL, CORE);
  21. /*
  22. * BFA IOC FC related definitions
  23. */
  24. /*
  25. * IOC local definitions
  26. */
  27. #define BFA_IOCFC_TOV 5000 /* msecs */
  28. enum {
  29. BFA_IOCFC_ACT_NONE = 0,
  30. BFA_IOCFC_ACT_INIT = 1,
  31. BFA_IOCFC_ACT_STOP = 2,
  32. BFA_IOCFC_ACT_DISABLE = 3,
  33. };
  34. #define DEF_CFG_NUM_FABRICS 1
  35. #define DEF_CFG_NUM_LPORTS 256
  36. #define DEF_CFG_NUM_CQS 4
  37. #define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
  38. #define DEF_CFG_NUM_TSKIM_REQS 128
  39. #define DEF_CFG_NUM_FCXP_REQS 64
  40. #define DEF_CFG_NUM_UF_BUFS 64
  41. #define DEF_CFG_NUM_RPORTS 1024
  42. #define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
  43. #define DEF_CFG_NUM_TINS 256
  44. #define DEF_CFG_NUM_SGPGS 2048
  45. #define DEF_CFG_NUM_REQQ_ELEMS 256
  46. #define DEF_CFG_NUM_RSPQ_ELEMS 64
  47. #define DEF_CFG_NUM_SBOOT_TGTS 16
  48. #define DEF_CFG_NUM_SBOOT_LUNS 16
  49. /*
  50. * forward declaration for IOC FC functions
  51. */
  52. static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
  53. static void bfa_iocfc_disable_cbfn(void *bfa_arg);
  54. static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
  55. static void bfa_iocfc_reset_cbfn(void *bfa_arg);
  56. static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
  57. /*
  58. * BFA Interrupt handling functions
  59. */
  60. static void
  61. bfa_reqq_resume(struct bfa_s *bfa, int qid)
  62. {
  63. struct list_head *waitq, *qe, *qen;
  64. struct bfa_reqq_wait_s *wqe;
  65. waitq = bfa_reqq(bfa, qid);
  66. list_for_each_safe(qe, qen, waitq) {
  67. /*
  68. * Callback only as long as there is room in request queue
  69. */
  70. if (bfa_reqq_full(bfa, qid))
  71. break;
  72. list_del(qe);
  73. wqe = (struct bfa_reqq_wait_s *) qe;
  74. wqe->qresume(wqe->cbarg);
  75. }
  76. }
  77. void
  78. bfa_msix_all(struct bfa_s *bfa, int vec)
  79. {
  80. bfa_intx(bfa);
  81. }
  82. bfa_boolean_t
  83. bfa_intx(struct bfa_s *bfa)
  84. {
  85. u32 intr, qintr;
  86. int queue;
  87. intr = readl(bfa->iocfc.bfa_regs.intr_status);
  88. if (!intr)
  89. return BFA_FALSE;
  90. /*
  91. * RME completion queue interrupt
  92. */
  93. qintr = intr & __HFN_INT_RME_MASK;
  94. writel(qintr, bfa->iocfc.bfa_regs.intr_status);
  95. for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
  96. if (intr & (__HFN_INT_RME_Q0 << queue))
  97. bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
  98. }
  99. intr &= ~qintr;
  100. if (!intr)
  101. return BFA_TRUE;
  102. /*
  103. * CPE completion queue interrupt
  104. */
  105. qintr = intr & __HFN_INT_CPE_MASK;
  106. writel(qintr, bfa->iocfc.bfa_regs.intr_status);
  107. for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
  108. if (intr & (__HFN_INT_CPE_Q0 << queue))
  109. bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
  110. }
  111. intr &= ~qintr;
  112. if (!intr)
  113. return BFA_TRUE;
  114. bfa_msix_lpu_err(bfa, intr);
  115. return BFA_TRUE;
  116. }
  117. void
  118. bfa_isr_enable(struct bfa_s *bfa)
  119. {
  120. u32 intr_unmask;
  121. int pci_func = bfa_ioc_pcifn(&bfa->ioc);
  122. bfa_trc(bfa, pci_func);
  123. bfa_msix_install(bfa);
  124. intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
  125. __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
  126. __HFN_INT_LL_HALT);
  127. if (pci_func == 0)
  128. intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
  129. __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
  130. __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
  131. __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
  132. __HFN_INT_MBOX_LPU0);
  133. else
  134. intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
  135. __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
  136. __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
  137. __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
  138. __HFN_INT_MBOX_LPU1);
  139. writel(intr_unmask, bfa->iocfc.bfa_regs.intr_status);
  140. writel(~intr_unmask, bfa->iocfc.bfa_regs.intr_mask);
  141. bfa->iocfc.intr_mask = ~intr_unmask;
  142. bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
  143. }
  144. void
  145. bfa_isr_disable(struct bfa_s *bfa)
  146. {
  147. bfa_isr_mode_set(bfa, BFA_FALSE);
  148. writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
  149. bfa_msix_uninstall(bfa);
  150. }
  151. void
  152. bfa_msix_reqq(struct bfa_s *bfa, int qid)
  153. {
  154. struct list_head *waitq;
  155. qid &= (BFI_IOC_MAX_CQS - 1);
  156. bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
  157. /*
  158. * Resume any pending requests in the corresponding reqq.
  159. */
  160. waitq = bfa_reqq(bfa, qid);
  161. if (!list_empty(waitq))
  162. bfa_reqq_resume(bfa, qid);
  163. }
  164. void
  165. bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
  166. {
  167. bfa_trc(bfa, m->mhdr.msg_class);
  168. bfa_trc(bfa, m->mhdr.msg_id);
  169. bfa_trc(bfa, m->mhdr.mtag.i2htok);
  170. bfa_assert(0);
  171. bfa_trc_stop(bfa->trcmod);
  172. }
  173. void
  174. bfa_msix_rspq(struct bfa_s *bfa, int qid)
  175. {
  176. struct bfi_msg_s *m;
  177. u32 pi, ci;
  178. struct list_head *waitq;
  179. bfa_trc_fp(bfa, qid);
  180. qid &= (BFI_IOC_MAX_CQS - 1);
  181. bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
  182. ci = bfa_rspq_ci(bfa, qid);
  183. pi = bfa_rspq_pi(bfa, qid);
  184. bfa_trc_fp(bfa, ci);
  185. bfa_trc_fp(bfa, pi);
  186. if (bfa->rme_process) {
  187. while (ci != pi) {
  188. m = bfa_rspq_elem(bfa, qid, ci);
  189. bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
  190. bfa_isrs[m->mhdr.msg_class] (bfa, m);
  191. CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
  192. }
  193. }
  194. /*
  195. * update CI
  196. */
  197. bfa_rspq_ci(bfa, qid) = pi;
  198. writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
  199. mmiowb();
  200. /*
  201. * Resume any pending requests in the corresponding reqq.
  202. */
  203. waitq = bfa_reqq(bfa, qid);
  204. if (!list_empty(waitq))
  205. bfa_reqq_resume(bfa, qid);
  206. }
  207. void
  208. bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
  209. {
  210. u32 intr, curr_value;
  211. intr = readl(bfa->iocfc.bfa_regs.intr_status);
  212. if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
  213. bfa_ioc_mbox_isr(&bfa->ioc);
  214. intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
  215. __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
  216. if (intr) {
  217. if (intr & __HFN_INT_LL_HALT) {
  218. /*
  219. * If LL_HALT bit is set then FW Init Halt LL Port
  220. * Register needs to be cleared as well so Interrupt
  221. * Status Register will be cleared.
  222. */
  223. curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
  224. curr_value &= ~__FW_INIT_HALT_P;
  225. writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
  226. }
  227. if (intr & __HFN_INT_ERR_PSS) {
  228. /*
  229. * ERR_PSS bit needs to be cleared as well in case
  230. * interrups are shared so driver's interrupt handler is
  231. * still called eventhough it is already masked out.
  232. */
  233. curr_value = readl(
  234. bfa->ioc.ioc_regs.pss_err_status_reg);
  235. curr_value &= __PSS_ERR_STATUS_SET;
  236. writel(curr_value,
  237. bfa->ioc.ioc_regs.pss_err_status_reg);
  238. }
  239. writel(intr, bfa->iocfc.bfa_regs.intr_status);
  240. bfa_ioc_error_isr(&bfa->ioc);
  241. }
  242. }
  243. /*
  244. * BFA IOC FC related functions
  245. */
  246. /*
  247. * BFA IOC private functions
  248. */
  249. static void
  250. bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
  251. {
  252. int i, per_reqq_sz, per_rspq_sz;
  253. per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
  254. BFA_DMA_ALIGN_SZ);
  255. per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
  256. BFA_DMA_ALIGN_SZ);
  257. /*
  258. * Calculate CQ size
  259. */
  260. for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
  261. *dm_len = *dm_len + per_reqq_sz;
  262. *dm_len = *dm_len + per_rspq_sz;
  263. }
  264. /*
  265. * Calculate Shadow CI/PI size
  266. */
  267. for (i = 0; i < cfg->fwcfg.num_cqs; i++)
  268. *dm_len += (2 * BFA_CACHELINE_SZ);
  269. }
  270. static void
  271. bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
  272. {
  273. *dm_len +=
  274. BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
  275. *dm_len +=
  276. BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
  277. BFA_CACHELINE_SZ);
  278. }
  279. /*
  280. * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
  281. */
  282. static void
  283. bfa_iocfc_send_cfg(void *bfa_arg)
  284. {
  285. struct bfa_s *bfa = bfa_arg;
  286. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  287. struct bfi_iocfc_cfg_req_s cfg_req;
  288. struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
  289. struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
  290. int i;
  291. bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
  292. bfa_trc(bfa, cfg->fwcfg.num_cqs);
  293. bfa_iocfc_reset_queues(bfa);
  294. /*
  295. * initialize IOC configuration info
  296. */
  297. cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
  298. cfg_info->num_cqs = cfg->fwcfg.num_cqs;
  299. bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
  300. /*
  301. * dma map REQ and RSP circular queues and shadow pointers
  302. */
  303. for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
  304. bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
  305. iocfc->req_cq_ba[i].pa);
  306. bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
  307. iocfc->req_cq_shadow_ci[i].pa);
  308. cfg_info->req_cq_elems[i] =
  309. cpu_to_be16(cfg->drvcfg.num_reqq_elems);
  310. bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
  311. iocfc->rsp_cq_ba[i].pa);
  312. bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
  313. iocfc->rsp_cq_shadow_pi[i].pa);
  314. cfg_info->rsp_cq_elems[i] =
  315. cpu_to_be16(cfg->drvcfg.num_rspq_elems);
  316. }
  317. /*
  318. * Enable interrupt coalescing if it is driver init path
  319. * and not ioc disable/enable path.
  320. */
  321. if (!iocfc->cfgdone)
  322. cfg_info->intr_attr.coalesce = BFA_TRUE;
  323. iocfc->cfgdone = BFA_FALSE;
  324. /*
  325. * dma map IOC configuration itself
  326. */
  327. bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
  328. bfa_lpuid(bfa));
  329. bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
  330. bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
  331. sizeof(struct bfi_iocfc_cfg_req_s));
  332. }
  333. static void
  334. bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  335. struct bfa_pcidev_s *pcidev)
  336. {
  337. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  338. bfa->bfad = bfad;
  339. iocfc->bfa = bfa;
  340. iocfc->action = BFA_IOCFC_ACT_NONE;
  341. iocfc->cfg = *cfg;
  342. /*
  343. * Initialize chip specific handlers.
  344. */
  345. if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
  346. iocfc->hwif.hw_reginit = bfa_hwct_reginit;
  347. iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
  348. iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
  349. iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
  350. iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
  351. iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
  352. iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
  353. iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
  354. iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
  355. } else {
  356. iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
  357. iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
  358. iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
  359. iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
  360. iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
  361. iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
  362. iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
  363. iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
  364. iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
  365. }
  366. iocfc->hwif.hw_reginit(bfa);
  367. bfa->msix.nvecs = 0;
  368. }
  369. static void
  370. bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
  371. struct bfa_meminfo_s *meminfo)
  372. {
  373. u8 *dm_kva;
  374. u64 dm_pa;
  375. int i, per_reqq_sz, per_rspq_sz;
  376. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  377. int dbgsz;
  378. dm_kva = bfa_meminfo_dma_virt(meminfo);
  379. dm_pa = bfa_meminfo_dma_phys(meminfo);
  380. /*
  381. * First allocate dma memory for IOC.
  382. */
  383. bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
  384. dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
  385. dm_pa += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
  386. /*
  387. * Claim DMA-able memory for the request/response queues and for shadow
  388. * ci/pi registers
  389. */
  390. per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
  391. BFA_DMA_ALIGN_SZ);
  392. per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
  393. BFA_DMA_ALIGN_SZ);
  394. for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
  395. iocfc->req_cq_ba[i].kva = dm_kva;
  396. iocfc->req_cq_ba[i].pa = dm_pa;
  397. memset(dm_kva, 0, per_reqq_sz);
  398. dm_kva += per_reqq_sz;
  399. dm_pa += per_reqq_sz;
  400. iocfc->rsp_cq_ba[i].kva = dm_kva;
  401. iocfc->rsp_cq_ba[i].pa = dm_pa;
  402. memset(dm_kva, 0, per_rspq_sz);
  403. dm_kva += per_rspq_sz;
  404. dm_pa += per_rspq_sz;
  405. }
  406. for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
  407. iocfc->req_cq_shadow_ci[i].kva = dm_kva;
  408. iocfc->req_cq_shadow_ci[i].pa = dm_pa;
  409. dm_kva += BFA_CACHELINE_SZ;
  410. dm_pa += BFA_CACHELINE_SZ;
  411. iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
  412. iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
  413. dm_kva += BFA_CACHELINE_SZ;
  414. dm_pa += BFA_CACHELINE_SZ;
  415. }
  416. /*
  417. * Claim DMA-able memory for the config info page
  418. */
  419. bfa->iocfc.cfg_info.kva = dm_kva;
  420. bfa->iocfc.cfg_info.pa = dm_pa;
  421. bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
  422. dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
  423. dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
  424. /*
  425. * Claim DMA-able memory for the config response
  426. */
  427. bfa->iocfc.cfgrsp_dma.kva = dm_kva;
  428. bfa->iocfc.cfgrsp_dma.pa = dm_pa;
  429. bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
  430. dm_kva +=
  431. BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
  432. BFA_CACHELINE_SZ);
  433. dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
  434. BFA_CACHELINE_SZ);
  435. bfa_meminfo_dma_virt(meminfo) = dm_kva;
  436. bfa_meminfo_dma_phys(meminfo) = dm_pa;
  437. dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
  438. if (dbgsz > 0) {
  439. bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
  440. bfa_meminfo_kva(meminfo) += dbgsz;
  441. }
  442. }
  443. /*
  444. * Start BFA submodules.
  445. */
  446. static void
  447. bfa_iocfc_start_submod(struct bfa_s *bfa)
  448. {
  449. int i;
  450. bfa->rme_process = BFA_TRUE;
  451. for (i = 0; hal_mods[i]; i++)
  452. hal_mods[i]->start(bfa);
  453. }
  454. /*
  455. * Disable BFA submodules.
  456. */
  457. static void
  458. bfa_iocfc_disable_submod(struct bfa_s *bfa)
  459. {
  460. int i;
  461. for (i = 0; hal_mods[i]; i++)
  462. hal_mods[i]->iocdisable(bfa);
  463. }
  464. static void
  465. bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
  466. {
  467. struct bfa_s *bfa = bfa_arg;
  468. if (complete) {
  469. if (bfa->iocfc.cfgdone)
  470. bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
  471. else
  472. bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
  473. } else {
  474. if (bfa->iocfc.cfgdone)
  475. bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
  476. }
  477. }
  478. static void
  479. bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
  480. {
  481. struct bfa_s *bfa = bfa_arg;
  482. struct bfad_s *bfad = bfa->bfad;
  483. if (compl)
  484. complete(&bfad->comp);
  485. else
  486. bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
  487. }
  488. static void
  489. bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
  490. {
  491. struct bfa_s *bfa = bfa_arg;
  492. struct bfad_s *bfad = bfa->bfad;
  493. if (compl)
  494. complete(&bfad->disable_comp);
  495. }
  496. /*
  497. * Update BFA configuration from firmware configuration.
  498. */
  499. static void
  500. bfa_iocfc_cfgrsp(struct bfa_s *bfa)
  501. {
  502. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  503. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  504. struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
  505. fwcfg->num_cqs = fwcfg->num_cqs;
  506. fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
  507. fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
  508. fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
  509. fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
  510. fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
  511. iocfc->cfgdone = BFA_TRUE;
  512. /*
  513. * Configuration is complete - initialize/start submodules
  514. */
  515. bfa_fcport_init(bfa);
  516. if (iocfc->action == BFA_IOCFC_ACT_INIT)
  517. bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
  518. else
  519. bfa_iocfc_start_submod(bfa);
  520. }
  521. void
  522. bfa_iocfc_reset_queues(struct bfa_s *bfa)
  523. {
  524. int q;
  525. for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
  526. bfa_reqq_ci(bfa, q) = 0;
  527. bfa_reqq_pi(bfa, q) = 0;
  528. bfa_rspq_ci(bfa, q) = 0;
  529. bfa_rspq_pi(bfa, q) = 0;
  530. }
  531. }
  532. /*
  533. * IOC enable request is complete
  534. */
  535. static void
  536. bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
  537. {
  538. struct bfa_s *bfa = bfa_arg;
  539. if (status != BFA_STATUS_OK) {
  540. bfa_isr_disable(bfa);
  541. if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
  542. bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
  543. bfa_iocfc_init_cb, bfa);
  544. return;
  545. }
  546. bfa_iocfc_send_cfg(bfa);
  547. }
  548. /*
  549. * IOC disable request is complete
  550. */
  551. static void
  552. bfa_iocfc_disable_cbfn(void *bfa_arg)
  553. {
  554. struct bfa_s *bfa = bfa_arg;
  555. bfa_isr_disable(bfa);
  556. bfa_iocfc_disable_submod(bfa);
  557. if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
  558. bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
  559. bfa);
  560. else {
  561. bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE);
  562. bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
  563. bfa);
  564. }
  565. }
  566. /*
  567. * Notify sub-modules of hardware failure.
  568. */
  569. static void
  570. bfa_iocfc_hbfail_cbfn(void *bfa_arg)
  571. {
  572. struct bfa_s *bfa = bfa_arg;
  573. bfa->rme_process = BFA_FALSE;
  574. bfa_isr_disable(bfa);
  575. bfa_iocfc_disable_submod(bfa);
  576. if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
  577. bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
  578. bfa);
  579. }
  580. /*
  581. * Actions on chip-reset completion.
  582. */
  583. static void
  584. bfa_iocfc_reset_cbfn(void *bfa_arg)
  585. {
  586. struct bfa_s *bfa = bfa_arg;
  587. bfa_iocfc_reset_queues(bfa);
  588. bfa_isr_enable(bfa);
  589. }
  590. /*
  591. * Query IOC memory requirement information.
  592. */
  593. void
  594. bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
  595. u32 *dm_len)
  596. {
  597. /* dma memory for IOC */
  598. *dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
  599. bfa_iocfc_fw_cfg_sz(cfg, dm_len);
  600. bfa_iocfc_cqs_sz(cfg, dm_len);
  601. *km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
  602. }
  603. /*
  604. * Query IOC memory requirement information.
  605. */
  606. void
  607. bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  608. struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
  609. {
  610. int i;
  611. struct bfa_ioc_s *ioc = &bfa->ioc;
  612. bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
  613. bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
  614. bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
  615. bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
  616. ioc->trcmod = bfa->trcmod;
  617. bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
  618. /*
  619. * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
  620. */
  621. if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
  622. bfa_ioc_set_fcmode(&bfa->ioc);
  623. bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
  624. bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
  625. bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
  626. bfa_iocfc_mem_claim(bfa, cfg, meminfo);
  627. INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
  628. INIT_LIST_HEAD(&bfa->comp_q);
  629. for (i = 0; i < BFI_IOC_MAX_CQS; i++)
  630. INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
  631. }
  632. /*
  633. * Query IOC memory requirement information.
  634. */
  635. void
  636. bfa_iocfc_init(struct bfa_s *bfa)
  637. {
  638. bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
  639. bfa_ioc_enable(&bfa->ioc);
  640. }
  641. /*
  642. * IOC start called from bfa_start(). Called to start IOC operations
  643. * at driver instantiation for this instance.
  644. */
  645. void
  646. bfa_iocfc_start(struct bfa_s *bfa)
  647. {
  648. if (bfa->iocfc.cfgdone)
  649. bfa_iocfc_start_submod(bfa);
  650. }
  651. /*
  652. * IOC stop called from bfa_stop(). Called only when driver is unloaded
  653. * for this instance.
  654. */
  655. void
  656. bfa_iocfc_stop(struct bfa_s *bfa)
  657. {
  658. bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
  659. bfa->rme_process = BFA_FALSE;
  660. bfa_ioc_disable(&bfa->ioc);
  661. }
  662. void
  663. bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
  664. {
  665. struct bfa_s *bfa = bfaarg;
  666. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  667. union bfi_iocfc_i2h_msg_u *msg;
  668. msg = (union bfi_iocfc_i2h_msg_u *) m;
  669. bfa_trc(bfa, msg->mh.msg_id);
  670. switch (msg->mh.msg_id) {
  671. case BFI_IOCFC_I2H_CFG_REPLY:
  672. iocfc->cfg_reply = &msg->cfg_reply;
  673. bfa_iocfc_cfgrsp(bfa);
  674. break;
  675. case BFI_IOCFC_I2H_UPDATEQ_RSP:
  676. iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
  677. break;
  678. default:
  679. bfa_assert(0);
  680. }
  681. }
  682. void
  683. bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
  684. {
  685. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  686. attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
  687. attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
  688. be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
  689. be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
  690. attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
  691. be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
  692. be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
  693. attr->config = iocfc->cfg;
  694. }
  695. bfa_status_t
  696. bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
  697. {
  698. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  699. struct bfi_iocfc_set_intr_req_s *m;
  700. iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
  701. iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
  702. iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
  703. if (!bfa_iocfc_is_operational(bfa))
  704. return BFA_STATUS_OK;
  705. m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
  706. if (!m)
  707. return BFA_STATUS_DEVBUSY;
  708. bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
  709. bfa_lpuid(bfa));
  710. m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
  711. m->delay = iocfc->cfginfo->intr_attr.delay;
  712. m->latency = iocfc->cfginfo->intr_attr.latency;
  713. bfa_trc(bfa, attr->delay);
  714. bfa_trc(bfa, attr->latency);
  715. bfa_reqq_produce(bfa, BFA_REQQ_IOC);
  716. return BFA_STATUS_OK;
  717. }
  718. void
  719. bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
  720. {
  721. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  722. iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
  723. bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
  724. }
  725. /*
  726. * Enable IOC after it is disabled.
  727. */
  728. void
  729. bfa_iocfc_enable(struct bfa_s *bfa)
  730. {
  731. bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
  732. "IOC Enable");
  733. bfa_ioc_enable(&bfa->ioc);
  734. }
  735. void
  736. bfa_iocfc_disable(struct bfa_s *bfa)
  737. {
  738. bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
  739. "IOC Disable");
  740. bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
  741. bfa->rme_process = BFA_FALSE;
  742. bfa_ioc_disable(&bfa->ioc);
  743. }
  744. bfa_boolean_t
  745. bfa_iocfc_is_operational(struct bfa_s *bfa)
  746. {
  747. return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
  748. }
  749. /*
  750. * Return boot target port wwns -- read from boot information in flash.
  751. */
  752. void
  753. bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
  754. {
  755. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  756. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  757. int i;
  758. if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
  759. bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
  760. *nwwns = cfgrsp->pbc_cfg.nbluns;
  761. for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
  762. wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
  763. return;
  764. }
  765. *nwwns = cfgrsp->bootwwns.nwwns;
  766. memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
  767. }
  768. void
  769. bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg)
  770. {
  771. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  772. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  773. pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
  774. pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
  775. pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
  776. memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
  777. }
  778. int
  779. bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
  780. {
  781. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  782. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  783. memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
  784. return cfgrsp->pbc_cfg.nvports;
  785. }
  786. /*
  787. * Use this function query the memory requirement of the BFA library.
  788. * This function needs to be called before bfa_attach() to get the
  789. * memory required of the BFA layer for a given driver configuration.
  790. *
  791. * This call will fail, if the cap is out of range compared to pre-defined
  792. * values within the BFA library
  793. *
  794. * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
  795. * its configuration in this structure.
  796. * The default values for struct bfa_iocfc_cfg_s can be
  797. * fetched using bfa_cfg_get_default() API.
  798. *
  799. * If cap's boundary check fails, the library will use
  800. * the default bfa_cap_t values (and log a warning msg).
  801. *
  802. * @param[out] meminfo - pointer to bfa_meminfo_t. This content
  803. * indicates the memory type (see bfa_mem_type_t) and
  804. * amount of memory required.
  805. *
  806. * Driver should allocate the memory, populate the
  807. * starting address for each block and provide the same
  808. * structure as input parameter to bfa_attach() call.
  809. *
  810. * @return void
  811. *
  812. * Special Considerations: @note
  813. */
  814. void
  815. bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
  816. {
  817. int i;
  818. u32 km_len = 0, dm_len = 0;
  819. bfa_assert((cfg != NULL) && (meminfo != NULL));
  820. memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
  821. meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
  822. BFA_MEM_TYPE_KVA;
  823. meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
  824. BFA_MEM_TYPE_DMA;
  825. bfa_iocfc_meminfo(cfg, &km_len, &dm_len);
  826. for (i = 0; hal_mods[i]; i++)
  827. hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
  828. dm_len += bfa_port_meminfo();
  829. meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len;
  830. meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
  831. }
  832. /*
  833. * Use this function to do attach the driver instance with the BFA
  834. * library. This function will not trigger any HW initialization
  835. * process (which will be done in bfa_init() call)
  836. *
  837. * This call will fail, if the cap is out of range compared to
  838. * pre-defined values within the BFA library
  839. *
  840. * @param[out] bfa Pointer to bfa_t.
  841. * @param[in] bfad Opaque handle back to the driver's IOC structure
  842. * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
  843. * that was used in bfa_cfg_get_meminfo().
  844. * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
  845. * use the bfa_cfg_get_meminfo() call to
  846. * find the memory blocks required, allocate the
  847. * required memory and provide the starting addresses.
  848. * @param[in] pcidev pointer to struct bfa_pcidev_s
  849. *
  850. * @return
  851. * void
  852. *
  853. * Special Considerations:
  854. *
  855. * @note
  856. *
  857. */
  858. void
  859. bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  860. struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
  861. {
  862. int i;
  863. struct bfa_mem_elem_s *melem;
  864. bfa->fcs = BFA_FALSE;
  865. bfa_assert((cfg != NULL) && (meminfo != NULL));
  866. /*
  867. * initialize all memory pointers for iterative allocation
  868. */
  869. for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
  870. melem = meminfo->meminfo + i;
  871. melem->kva_curp = melem->kva;
  872. melem->dma_curp = melem->dma;
  873. }
  874. bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev);
  875. for (i = 0; hal_mods[i]; i++)
  876. hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);
  877. bfa_com_port_attach(bfa, meminfo);
  878. }
  879. /*
  880. * Use this function to delete a BFA IOC. IOC should be stopped (by
  881. * calling bfa_stop()) before this function call.
  882. *
  883. * @param[in] bfa - pointer to bfa_t.
  884. *
  885. * @return
  886. * void
  887. *
  888. * Special Considerations:
  889. *
  890. * @note
  891. */
  892. void
  893. bfa_detach(struct bfa_s *bfa)
  894. {
  895. int i;
  896. for (i = 0; hal_mods[i]; i++)
  897. hal_mods[i]->detach(bfa);
  898. bfa_ioc_detach(&bfa->ioc);
  899. }
  900. void
  901. bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
  902. {
  903. INIT_LIST_HEAD(comp_q);
  904. list_splice_tail_init(&bfa->comp_q, comp_q);
  905. }
  906. void
  907. bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
  908. {
  909. struct list_head *qe;
  910. struct list_head *qen;
  911. struct bfa_cb_qe_s *hcb_qe;
  912. list_for_each_safe(qe, qen, comp_q) {
  913. hcb_qe = (struct bfa_cb_qe_s *) qe;
  914. hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
  915. }
  916. }
  917. void
  918. bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
  919. {
  920. struct list_head *qe;
  921. struct bfa_cb_qe_s *hcb_qe;
  922. while (!list_empty(comp_q)) {
  923. bfa_q_deq(comp_q, &qe);
  924. hcb_qe = (struct bfa_cb_qe_s *) qe;
  925. hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
  926. }
  927. }
  928. /*
  929. * Return the list of PCI vendor/device id lists supported by this
  930. * BFA instance.
  931. */
  932. void
  933. bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
  934. {
  935. static struct bfa_pciid_s __pciids[] = {
  936. {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
  937. {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
  938. {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
  939. {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
  940. };
  941. *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
  942. *pciids = __pciids;
  943. }
  944. /*
  945. * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
  946. * into BFA layer). The OS driver can then turn back and overwrite entries that
  947. * have been configured by the user.
  948. *
  949. * @param[in] cfg - pointer to bfa_ioc_cfg_t
  950. *
  951. * @return
  952. * void
  953. *
  954. * Special Considerations:
  955. * note
  956. */
  957. void
  958. bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
  959. {
  960. cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
  961. cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
  962. cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
  963. cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
  964. cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
  965. cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
  966. cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
  967. cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
  968. cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
  969. cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
  970. cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
  971. cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
  972. cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
  973. cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
  974. cfg->drvcfg.ioc_recover = BFA_FALSE;
  975. cfg->drvcfg.delay_comp = BFA_FALSE;
  976. }
  977. void
  978. bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
  979. {
  980. bfa_cfg_get_default(cfg);
  981. cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
  982. cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
  983. cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
  984. cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
  985. cfg->fwcfg.num_rports = BFA_RPORT_MIN;
  986. cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
  987. cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
  988. cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
  989. cfg->drvcfg.min_cfg = BFA_TRUE;
  990. }