bfa_iocfc.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927
  1. /*
  2. * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include <cs/bfa_debug.h>
  18. #include <bfa_priv.h>
  19. #include <log/bfa_log_hal.h>
  20. #include <bfi/bfi_boot.h>
  21. #include <bfi/bfi_cbreg.h>
  22. #include <aen/bfa_aen_ioc.h>
  23. #include <defs/bfa_defs_iocfc.h>
  24. #include <defs/bfa_defs_pci.h>
  25. #include "bfa_callback_priv.h"
  26. #include "bfad_drv.h"
  27. BFA_TRC_FILE(HAL, IOCFC);
  28. /**
  29. * IOC local definitions
  30. */
  31. #define BFA_IOCFC_TOV 5000 /* msecs */
  32. enum {
  33. BFA_IOCFC_ACT_NONE = 0,
  34. BFA_IOCFC_ACT_INIT = 1,
  35. BFA_IOCFC_ACT_STOP = 2,
  36. BFA_IOCFC_ACT_DISABLE = 3,
  37. };
  38. /*
  39. * forward declarations
  40. */
  41. static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
  42. static void bfa_iocfc_disable_cbfn(void *bfa_arg);
  43. static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
  44. static void bfa_iocfc_reset_cbfn(void *bfa_arg);
  45. static void bfa_iocfc_stats_clear(void *bfa_arg);
  46. static void bfa_iocfc_stats_swap(struct bfa_fw_stats_s *d,
  47. struct bfa_fw_stats_s *s);
  48. static void bfa_iocfc_stats_clr_cb(void *bfa_arg, bfa_boolean_t complete);
  49. static void bfa_iocfc_stats_clr_timeout(void *bfa_arg);
  50. static void bfa_iocfc_stats_cb(void *bfa_arg, bfa_boolean_t complete);
  51. static void bfa_iocfc_stats_timeout(void *bfa_arg);
  52. static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
  53. /**
  54. * bfa_ioc_pvt BFA IOC private functions
  55. */
  56. static void
  57. bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
  58. {
  59. int i, per_reqq_sz, per_rspq_sz;
  60. per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
  61. BFA_DMA_ALIGN_SZ);
  62. per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
  63. BFA_DMA_ALIGN_SZ);
  64. /*
  65. * Calculate CQ size
  66. */
  67. for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
  68. *dm_len = *dm_len + per_reqq_sz;
  69. *dm_len = *dm_len + per_rspq_sz;
  70. }
  71. /*
  72. * Calculate Shadow CI/PI size
  73. */
  74. for (i = 0; i < cfg->fwcfg.num_cqs; i++)
  75. *dm_len += (2 * BFA_CACHELINE_SZ);
  76. }
  77. static void
  78. bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
  79. {
  80. *dm_len +=
  81. BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
  82. *dm_len +=
  83. BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
  84. BFA_CACHELINE_SZ);
  85. *dm_len += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
  86. }
  87. /**
  88. * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
  89. */
  90. static void
  91. bfa_iocfc_send_cfg(void *bfa_arg)
  92. {
  93. struct bfa_s *bfa = bfa_arg;
  94. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  95. struct bfi_iocfc_cfg_req_s cfg_req;
  96. struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
  97. struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
  98. int i;
  99. bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
  100. bfa_trc(bfa, cfg->fwcfg.num_cqs);
  101. bfa_iocfc_reset_queues(bfa);
  102. /**
  103. * initialize IOC configuration info
  104. */
  105. cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
  106. cfg_info->num_cqs = cfg->fwcfg.num_cqs;
  107. bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
  108. bfa_dma_be_addr_set(cfg_info->stats_addr, iocfc->stats_pa);
  109. /**
  110. * dma map REQ and RSP circular queues and shadow pointers
  111. */
  112. for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
  113. bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
  114. iocfc->req_cq_ba[i].pa);
  115. bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
  116. iocfc->req_cq_shadow_ci[i].pa);
  117. cfg_info->req_cq_elems[i] =
  118. bfa_os_htons(cfg->drvcfg.num_reqq_elems);
  119. bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
  120. iocfc->rsp_cq_ba[i].pa);
  121. bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
  122. iocfc->rsp_cq_shadow_pi[i].pa);
  123. cfg_info->rsp_cq_elems[i] =
  124. bfa_os_htons(cfg->drvcfg.num_rspq_elems);
  125. }
  126. /**
  127. * Enable interrupt coalescing if it is driver init path
  128. * and not ioc disable/enable path.
  129. */
  130. if (!iocfc->cfgdone)
  131. cfg_info->intr_attr.coalesce = BFA_TRUE;
  132. iocfc->cfgdone = BFA_FALSE;
  133. /**
  134. * dma map IOC configuration itself
  135. */
  136. bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
  137. bfa_lpuid(bfa));
  138. bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
  139. bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
  140. sizeof(struct bfi_iocfc_cfg_req_s));
  141. }
  142. static void
  143. bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  144. struct bfa_pcidev_s *pcidev)
  145. {
  146. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  147. bfa->bfad = bfad;
  148. iocfc->bfa = bfa;
  149. iocfc->action = BFA_IOCFC_ACT_NONE;
  150. bfa_os_assign(iocfc->cfg, *cfg);
  151. /**
  152. * Initialize chip specific handlers.
  153. */
  154. if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
  155. iocfc->hwif.hw_reginit = bfa_hwct_reginit;
  156. iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
  157. iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
  158. iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
  159. iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
  160. iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
  161. iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
  162. iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
  163. iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
  164. } else {
  165. iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
  166. iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
  167. iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
  168. iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
  169. iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
  170. iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
  171. iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
  172. iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
  173. iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
  174. }
  175. iocfc->hwif.hw_reginit(bfa);
  176. bfa->msix.nvecs = 0;
  177. }
  178. static void
  179. bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
  180. struct bfa_meminfo_s *meminfo)
  181. {
  182. u8 *dm_kva;
  183. u64 dm_pa;
  184. int i, per_reqq_sz, per_rspq_sz;
  185. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  186. int dbgsz;
  187. dm_kva = bfa_meminfo_dma_virt(meminfo);
  188. dm_pa = bfa_meminfo_dma_phys(meminfo);
  189. /*
  190. * First allocate dma memory for IOC.
  191. */
  192. bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
  193. dm_kva += bfa_ioc_meminfo();
  194. dm_pa += bfa_ioc_meminfo();
  195. /*
  196. * Claim DMA-able memory for the request/response queues and for shadow
  197. * ci/pi registers
  198. */
  199. per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
  200. BFA_DMA_ALIGN_SZ);
  201. per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
  202. BFA_DMA_ALIGN_SZ);
  203. for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
  204. iocfc->req_cq_ba[i].kva = dm_kva;
  205. iocfc->req_cq_ba[i].pa = dm_pa;
  206. bfa_os_memset(dm_kva, 0, per_reqq_sz);
  207. dm_kva += per_reqq_sz;
  208. dm_pa += per_reqq_sz;
  209. iocfc->rsp_cq_ba[i].kva = dm_kva;
  210. iocfc->rsp_cq_ba[i].pa = dm_pa;
  211. bfa_os_memset(dm_kva, 0, per_rspq_sz);
  212. dm_kva += per_rspq_sz;
  213. dm_pa += per_rspq_sz;
  214. }
  215. for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
  216. iocfc->req_cq_shadow_ci[i].kva = dm_kva;
  217. iocfc->req_cq_shadow_ci[i].pa = dm_pa;
  218. dm_kva += BFA_CACHELINE_SZ;
  219. dm_pa += BFA_CACHELINE_SZ;
  220. iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
  221. iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
  222. dm_kva += BFA_CACHELINE_SZ;
  223. dm_pa += BFA_CACHELINE_SZ;
  224. }
  225. /*
  226. * Claim DMA-able memory for the config info page
  227. */
  228. bfa->iocfc.cfg_info.kva = dm_kva;
  229. bfa->iocfc.cfg_info.pa = dm_pa;
  230. bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
  231. dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
  232. dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
  233. /*
  234. * Claim DMA-able memory for the config response
  235. */
  236. bfa->iocfc.cfgrsp_dma.kva = dm_kva;
  237. bfa->iocfc.cfgrsp_dma.pa = dm_pa;
  238. bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
  239. dm_kva +=
  240. BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
  241. BFA_CACHELINE_SZ);
  242. dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
  243. BFA_CACHELINE_SZ);
  244. /*
  245. * Claim DMA-able memory for iocfc stats
  246. */
  247. bfa->iocfc.stats_kva = dm_kva;
  248. bfa->iocfc.stats_pa = dm_pa;
  249. bfa->iocfc.fw_stats = (struct bfa_fw_stats_s *) dm_kva;
  250. dm_kva += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
  251. dm_pa += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
  252. bfa_meminfo_dma_virt(meminfo) = dm_kva;
  253. bfa_meminfo_dma_phys(meminfo) = dm_pa;
  254. dbgsz = bfa_ioc_debug_trcsz(bfa_auto_recover);
  255. if (dbgsz > 0) {
  256. bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
  257. bfa_meminfo_kva(meminfo) += dbgsz;
  258. }
  259. }
  260. /**
  261. * Start BFA submodules.
  262. */
  263. static void
  264. bfa_iocfc_start_submod(struct bfa_s *bfa)
  265. {
  266. int i;
  267. bfa->rme_process = BFA_TRUE;
  268. for (i = 0; hal_mods[i]; i++)
  269. hal_mods[i]->start(bfa);
  270. }
  271. /**
  272. * Disable BFA submodules.
  273. */
  274. static void
  275. bfa_iocfc_disable_submod(struct bfa_s *bfa)
  276. {
  277. int i;
  278. for (i = 0; hal_mods[i]; i++)
  279. hal_mods[i]->iocdisable(bfa);
  280. }
  281. static void
  282. bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
  283. {
  284. struct bfa_s *bfa = bfa_arg;
  285. if (complete) {
  286. if (bfa->iocfc.cfgdone)
  287. bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
  288. else
  289. bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
  290. } else {
  291. if (bfa->iocfc.cfgdone)
  292. bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
  293. }
  294. }
  295. static void
  296. bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
  297. {
  298. struct bfa_s *bfa = bfa_arg;
  299. struct bfad_s *bfad = bfa->bfad;
  300. if (compl)
  301. complete(&bfad->comp);
  302. else
  303. bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
  304. }
  305. static void
  306. bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
  307. {
  308. struct bfa_s *bfa = bfa_arg;
  309. struct bfad_s *bfad = bfa->bfad;
  310. if (compl)
  311. complete(&bfad->disable_comp);
  312. }
  313. /**
  314. * Update BFA configuration from firmware configuration.
  315. */
  316. static void
  317. bfa_iocfc_cfgrsp(struct bfa_s *bfa)
  318. {
  319. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  320. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  321. struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
  322. fwcfg->num_cqs = fwcfg->num_cqs;
  323. fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs);
  324. fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs);
  325. fwcfg->num_fcxp_reqs = bfa_os_ntohs(fwcfg->num_fcxp_reqs);
  326. fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs);
  327. fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports);
  328. iocfc->cfgdone = BFA_TRUE;
  329. /**
  330. * Configuration is complete - initialize/start submodules
  331. */
  332. bfa_fcport_init(bfa);
  333. if (iocfc->action == BFA_IOCFC_ACT_INIT)
  334. bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
  335. else
  336. bfa_iocfc_start_submod(bfa);
  337. }
  338. static void
  339. bfa_iocfc_stats_clear(void *bfa_arg)
  340. {
  341. struct bfa_s *bfa = bfa_arg;
  342. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  343. struct bfi_iocfc_stats_req_s stats_req;
  344. bfa_timer_start(bfa, &iocfc->stats_timer,
  345. bfa_iocfc_stats_clr_timeout, bfa,
  346. BFA_IOCFC_TOV);
  347. bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CLEAR_STATS_REQ,
  348. bfa_lpuid(bfa));
  349. bfa_ioc_mbox_send(&bfa->ioc, &stats_req,
  350. sizeof(struct bfi_iocfc_stats_req_s));
  351. }
  352. static void
  353. bfa_iocfc_stats_swap(struct bfa_fw_stats_s *d, struct bfa_fw_stats_s *s)
  354. {
  355. u32 *dip = (u32 *) d;
  356. u32 *sip = (u32 *) s;
  357. int i;
  358. for (i = 0; i < (sizeof(struct bfa_fw_stats_s) / sizeof(u32)); i++)
  359. dip[i] = bfa_os_ntohl(sip[i]);
  360. }
  361. static void
  362. bfa_iocfc_stats_clr_cb(void *bfa_arg, bfa_boolean_t complete)
  363. {
  364. struct bfa_s *bfa = bfa_arg;
  365. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  366. if (complete) {
  367. bfa_ioc_clr_stats(&bfa->ioc);
  368. iocfc->stats_cbfn(iocfc->stats_cbarg, iocfc->stats_status);
  369. } else {
  370. iocfc->stats_busy = BFA_FALSE;
  371. iocfc->stats_status = BFA_STATUS_OK;
  372. }
  373. }
  374. static void
  375. bfa_iocfc_stats_clr_timeout(void *bfa_arg)
  376. {
  377. struct bfa_s *bfa = bfa_arg;
  378. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  379. bfa_trc(bfa, 0);
  380. iocfc->stats_status = BFA_STATUS_ETIMER;
  381. bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_clr_cb, bfa);
  382. }
  383. static void
  384. bfa_iocfc_stats_cb(void *bfa_arg, bfa_boolean_t complete)
  385. {
  386. struct bfa_s *bfa = bfa_arg;
  387. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  388. if (complete) {
  389. if (iocfc->stats_status == BFA_STATUS_OK) {
  390. bfa_os_memset(iocfc->stats_ret, 0,
  391. sizeof(*iocfc->stats_ret));
  392. bfa_iocfc_stats_swap(&iocfc->stats_ret->fw_stats,
  393. iocfc->fw_stats);
  394. }
  395. iocfc->stats_cbfn(iocfc->stats_cbarg, iocfc->stats_status);
  396. } else {
  397. iocfc->stats_busy = BFA_FALSE;
  398. iocfc->stats_status = BFA_STATUS_OK;
  399. }
  400. }
  401. static void
  402. bfa_iocfc_stats_timeout(void *bfa_arg)
  403. {
  404. struct bfa_s *bfa = bfa_arg;
  405. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  406. bfa_trc(bfa, 0);
  407. iocfc->stats_status = BFA_STATUS_ETIMER;
  408. bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb, bfa);
  409. }
  410. static void
  411. bfa_iocfc_stats_query(struct bfa_s *bfa)
  412. {
  413. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  414. struct bfi_iocfc_stats_req_s stats_req;
  415. bfa_timer_start(bfa, &iocfc->stats_timer,
  416. bfa_iocfc_stats_timeout, bfa, BFA_IOCFC_TOV);
  417. bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_GET_STATS_REQ,
  418. bfa_lpuid(bfa));
  419. bfa_ioc_mbox_send(&bfa->ioc, &stats_req,
  420. sizeof(struct bfi_iocfc_stats_req_s));
  421. }
  422. void
  423. bfa_iocfc_reset_queues(struct bfa_s *bfa)
  424. {
  425. int q;
  426. for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
  427. bfa_reqq_ci(bfa, q) = 0;
  428. bfa_reqq_pi(bfa, q) = 0;
  429. bfa_rspq_ci(bfa, q) = 0;
  430. bfa_rspq_pi(bfa, q) = 0;
  431. }
  432. }
  433. /**
  434. * IOC enable request is complete
  435. */
  436. static void
  437. bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
  438. {
  439. struct bfa_s *bfa = bfa_arg;
  440. if (status != BFA_STATUS_OK) {
  441. bfa_isr_disable(bfa);
  442. if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
  443. bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
  444. bfa_iocfc_init_cb, bfa);
  445. return;
  446. }
  447. bfa_iocfc_send_cfg(bfa);
  448. }
  449. /**
  450. * IOC disable request is complete
  451. */
  452. static void
  453. bfa_iocfc_disable_cbfn(void *bfa_arg)
  454. {
  455. struct bfa_s *bfa = bfa_arg;
  456. bfa_isr_disable(bfa);
  457. bfa_iocfc_disable_submod(bfa);
  458. if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
  459. bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
  460. bfa);
  461. else {
  462. bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE);
  463. bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
  464. bfa);
  465. }
  466. }
  467. /**
  468. * Notify sub-modules of hardware failure.
  469. */
  470. static void
  471. bfa_iocfc_hbfail_cbfn(void *bfa_arg)
  472. {
  473. struct bfa_s *bfa = bfa_arg;
  474. bfa->rme_process = BFA_FALSE;
  475. bfa_isr_disable(bfa);
  476. bfa_iocfc_disable_submod(bfa);
  477. if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
  478. bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
  479. bfa);
  480. }
  481. /**
  482. * Actions on chip-reset completion.
  483. */
  484. static void
  485. bfa_iocfc_reset_cbfn(void *bfa_arg)
  486. {
  487. struct bfa_s *bfa = bfa_arg;
  488. bfa_iocfc_reset_queues(bfa);
  489. bfa_isr_enable(bfa);
  490. }
  491. /**
  492. * bfa_ioc_public
  493. */
  494. /**
  495. * Query IOC memory requirement information.
  496. */
  497. void
  498. bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
  499. u32 *dm_len)
  500. {
  501. /* dma memory for IOC */
  502. *dm_len += bfa_ioc_meminfo();
  503. bfa_iocfc_fw_cfg_sz(cfg, dm_len);
  504. bfa_iocfc_cqs_sz(cfg, dm_len);
  505. *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
  506. }
  507. /**
  508. * Query IOC memory requirement information.
  509. */
  510. void
  511. bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  512. struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
  513. {
  514. int i;
  515. bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
  516. bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
  517. bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
  518. bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
  519. bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod,
  520. bfa->trcmod, bfa->aen, bfa->logm);
  521. /**
  522. * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
  523. */
  524. if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
  525. bfa_ioc_set_fcmode(&bfa->ioc);
  526. bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
  527. bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
  528. bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
  529. bfa_iocfc_mem_claim(bfa, cfg, meminfo);
  530. bfa_timer_init(&bfa->timer_mod);
  531. INIT_LIST_HEAD(&bfa->comp_q);
  532. for (i = 0; i < BFI_IOC_MAX_CQS; i++)
  533. INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
  534. }
  535. /**
  536. * Query IOC memory requirement information.
  537. */
  538. void
  539. bfa_iocfc_detach(struct bfa_s *bfa)
  540. {
  541. bfa_ioc_detach(&bfa->ioc);
  542. }
  543. /**
  544. * Query IOC memory requirement information.
  545. */
  546. void
  547. bfa_iocfc_init(struct bfa_s *bfa)
  548. {
  549. bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
  550. bfa_ioc_enable(&bfa->ioc);
  551. }
  552. /**
  553. * IOC start called from bfa_start(). Called to start IOC operations
  554. * at driver instantiation for this instance.
  555. */
  556. void
  557. bfa_iocfc_start(struct bfa_s *bfa)
  558. {
  559. if (bfa->iocfc.cfgdone)
  560. bfa_iocfc_start_submod(bfa);
  561. }
  562. /**
  563. * IOC stop called from bfa_stop(). Called only when driver is unloaded
  564. * for this instance.
  565. */
  566. void
  567. bfa_iocfc_stop(struct bfa_s *bfa)
  568. {
  569. bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
  570. bfa->rme_process = BFA_FALSE;
  571. bfa_ioc_disable(&bfa->ioc);
  572. }
  573. void
  574. bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
  575. {
  576. struct bfa_s *bfa = bfaarg;
  577. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  578. union bfi_iocfc_i2h_msg_u *msg;
  579. msg = (union bfi_iocfc_i2h_msg_u *) m;
  580. bfa_trc(bfa, msg->mh.msg_id);
  581. switch (msg->mh.msg_id) {
  582. case BFI_IOCFC_I2H_CFG_REPLY:
  583. iocfc->cfg_reply = &msg->cfg_reply;
  584. bfa_iocfc_cfgrsp(bfa);
  585. break;
  586. case BFI_IOCFC_I2H_GET_STATS_RSP:
  587. if (iocfc->stats_busy == BFA_FALSE
  588. || iocfc->stats_status == BFA_STATUS_ETIMER)
  589. break;
  590. bfa_timer_stop(&iocfc->stats_timer);
  591. iocfc->stats_status = BFA_STATUS_OK;
  592. bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb,
  593. bfa);
  594. break;
  595. case BFI_IOCFC_I2H_CLEAR_STATS_RSP:
  596. /*
  597. * check for timer pop before processing the rsp
  598. */
  599. if (iocfc->stats_busy == BFA_FALSE
  600. || iocfc->stats_status == BFA_STATUS_ETIMER)
  601. break;
  602. bfa_timer_stop(&iocfc->stats_timer);
  603. iocfc->stats_status = BFA_STATUS_OK;
  604. bfa_cb_queue(bfa, &iocfc->stats_hcb_qe,
  605. bfa_iocfc_stats_clr_cb, bfa);
  606. break;
  607. case BFI_IOCFC_I2H_UPDATEQ_RSP:
  608. iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
  609. break;
  610. default:
  611. bfa_assert(0);
  612. }
  613. }
  614. #ifndef BFA_BIOS_BUILD
  615. void
  616. bfa_adapter_get_attr(struct bfa_s *bfa, struct bfa_adapter_attr_s *ad_attr)
  617. {
  618. bfa_ioc_get_adapter_attr(&bfa->ioc, ad_attr);
  619. }
  620. u64
  621. bfa_adapter_get_id(struct bfa_s *bfa)
  622. {
  623. return bfa_ioc_get_adid(&bfa->ioc);
  624. }
  625. void
  626. bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
  627. {
  628. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  629. attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
  630. attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
  631. bfa_os_ntohs(iocfc->cfginfo->intr_attr.delay) :
  632. bfa_os_ntohs(iocfc->cfgrsp->intr_attr.delay);
  633. attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
  634. bfa_os_ntohs(iocfc->cfginfo->intr_attr.latency) :
  635. bfa_os_ntohs(iocfc->cfgrsp->intr_attr.latency);
  636. attr->config = iocfc->cfg;
  637. }
  638. bfa_status_t
  639. bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
  640. {
  641. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  642. struct bfi_iocfc_set_intr_req_s *m;
  643. iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
  644. iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay);
  645. iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency);
  646. if (!bfa_iocfc_is_operational(bfa))
  647. return BFA_STATUS_OK;
  648. m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
  649. if (!m)
  650. return BFA_STATUS_DEVBUSY;
  651. bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
  652. bfa_lpuid(bfa));
  653. m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
  654. m->delay = iocfc->cfginfo->intr_attr.delay;
  655. m->latency = iocfc->cfginfo->intr_attr.latency;
  656. bfa_trc(bfa, attr->delay);
  657. bfa_trc(bfa, attr->latency);
  658. bfa_reqq_produce(bfa, BFA_REQQ_IOC);
  659. return BFA_STATUS_OK;
  660. }
  661. void
  662. bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
  663. {
  664. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  665. iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
  666. bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
  667. }
  668. bfa_status_t
  669. bfa_iocfc_get_stats(struct bfa_s *bfa, struct bfa_iocfc_stats_s *stats,
  670. bfa_cb_ioc_t cbfn, void *cbarg)
  671. {
  672. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  673. if (iocfc->stats_busy) {
  674. bfa_trc(bfa, iocfc->stats_busy);
  675. return BFA_STATUS_DEVBUSY;
  676. }
  677. if (!bfa_iocfc_is_operational(bfa)) {
  678. bfa_trc(bfa, 0);
  679. return BFA_STATUS_IOC_NON_OP;
  680. }
  681. iocfc->stats_busy = BFA_TRUE;
  682. iocfc->stats_ret = stats;
  683. iocfc->stats_cbfn = cbfn;
  684. iocfc->stats_cbarg = cbarg;
  685. bfa_iocfc_stats_query(bfa);
  686. return BFA_STATUS_OK;
  687. }
  688. bfa_status_t
  689. bfa_iocfc_clear_stats(struct bfa_s *bfa, bfa_cb_ioc_t cbfn, void *cbarg)
  690. {
  691. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  692. if (iocfc->stats_busy) {
  693. bfa_trc(bfa, iocfc->stats_busy);
  694. return BFA_STATUS_DEVBUSY;
  695. }
  696. if (!bfa_iocfc_is_operational(bfa)) {
  697. bfa_trc(bfa, 0);
  698. return BFA_STATUS_IOC_NON_OP;
  699. }
  700. iocfc->stats_busy = BFA_TRUE;
  701. iocfc->stats_cbfn = cbfn;
  702. iocfc->stats_cbarg = cbarg;
  703. bfa_iocfc_stats_clear(bfa);
  704. return BFA_STATUS_OK;
  705. }
  706. /**
  707. * Enable IOC after it is disabled.
  708. */
  709. void
  710. bfa_iocfc_enable(struct bfa_s *bfa)
  711. {
  712. bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
  713. "IOC Enable");
  714. bfa_ioc_enable(&bfa->ioc);
  715. }
  716. void
  717. bfa_iocfc_disable(struct bfa_s *bfa)
  718. {
  719. bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
  720. "IOC Disable");
  721. bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
  722. bfa->rme_process = BFA_FALSE;
  723. bfa_ioc_disable(&bfa->ioc);
  724. }
  725. bfa_boolean_t
  726. bfa_iocfc_is_operational(struct bfa_s *bfa)
  727. {
  728. return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
  729. }
  730. /**
  731. * Return boot target port wwns -- read from boot information in flash.
  732. */
  733. void
  734. bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
  735. {
  736. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  737. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  738. int i;
  739. if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
  740. bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
  741. *nwwns = cfgrsp->pbc_cfg.nbluns;
  742. for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
  743. wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
  744. return;
  745. }
  746. *nwwns = cfgrsp->bootwwns.nwwns;
  747. memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
  748. }
  749. void
  750. bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg)
  751. {
  752. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  753. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  754. pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
  755. pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
  756. pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
  757. memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
  758. }
  759. int
  760. bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
  761. {
  762. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  763. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  764. memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
  765. return cfgrsp->pbc_cfg.nvports;
  766. }
  767. #endif