bfa_iocfc.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925
  1. /*
  2. * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include <cs/bfa_debug.h>
  18. #include <bfa_priv.h>
  19. #include <log/bfa_log_hal.h>
  20. #include <bfi/bfi_boot.h>
  21. #include <bfi/bfi_cbreg.h>
  22. #include <aen/bfa_aen_ioc.h>
  23. #include <defs/bfa_defs_iocfc.h>
  24. #include <defs/bfa_defs_pci.h>
  25. #include "bfa_callback_priv.h"
  26. #include "bfad_drv.h"
  27. BFA_TRC_FILE(HAL, IOCFC);
  28. /**
  29. * IOC local definitions
  30. */
  31. #define BFA_IOCFC_TOV 5000 /* msecs */
  32. enum {
  33. BFA_IOCFC_ACT_NONE = 0,
  34. BFA_IOCFC_ACT_INIT = 1,
  35. BFA_IOCFC_ACT_STOP = 2,
  36. BFA_IOCFC_ACT_DISABLE = 3,
  37. };
  38. /*
  39. * forward declarations
  40. */
  41. static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
  42. static void bfa_iocfc_disable_cbfn(void *bfa_arg);
  43. static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
  44. static void bfa_iocfc_reset_cbfn(void *bfa_arg);
  45. static void bfa_iocfc_stats_clear(void *bfa_arg);
  46. static void bfa_iocfc_stats_swap(struct bfa_fw_stats_s *d,
  47. struct bfa_fw_stats_s *s);
  48. static void bfa_iocfc_stats_clr_cb(void *bfa_arg, bfa_boolean_t complete);
  49. static void bfa_iocfc_stats_clr_timeout(void *bfa_arg);
  50. static void bfa_iocfc_stats_cb(void *bfa_arg, bfa_boolean_t complete);
  51. static void bfa_iocfc_stats_timeout(void *bfa_arg);
  52. static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
  53. /**
  54. * bfa_ioc_pvt BFA IOC private functions
  55. */
  56. static void
  57. bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
  58. {
  59. int i, per_reqq_sz, per_rspq_sz;
  60. per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
  61. BFA_DMA_ALIGN_SZ);
  62. per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
  63. BFA_DMA_ALIGN_SZ);
  64. /*
  65. * Calculate CQ size
  66. */
  67. for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
  68. *dm_len = *dm_len + per_reqq_sz;
  69. *dm_len = *dm_len + per_rspq_sz;
  70. }
  71. /*
  72. * Calculate Shadow CI/PI size
  73. */
  74. for (i = 0; i < cfg->fwcfg.num_cqs; i++)
  75. *dm_len += (2 * BFA_CACHELINE_SZ);
  76. }
  77. static void
  78. bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
  79. {
  80. *dm_len +=
  81. BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
  82. *dm_len +=
  83. BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
  84. BFA_CACHELINE_SZ);
  85. *dm_len += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
  86. }
  87. /**
  88. * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
  89. */
  90. static void
  91. bfa_iocfc_send_cfg(void *bfa_arg)
  92. {
  93. struct bfa_s *bfa = bfa_arg;
  94. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  95. struct bfi_iocfc_cfg_req_s cfg_req;
  96. struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
  97. struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
  98. int i;
  99. bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
  100. bfa_trc(bfa, cfg->fwcfg.num_cqs);
  101. bfa_iocfc_reset_queues(bfa);
  102. /**
  103. * initialize IOC configuration info
  104. */
  105. cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
  106. cfg_info->num_cqs = cfg->fwcfg.num_cqs;
  107. bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
  108. bfa_dma_be_addr_set(cfg_info->stats_addr, iocfc->stats_pa);
  109. /**
  110. * dma map REQ and RSP circular queues and shadow pointers
  111. */
  112. for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
  113. bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
  114. iocfc->req_cq_ba[i].pa);
  115. bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
  116. iocfc->req_cq_shadow_ci[i].pa);
  117. cfg_info->req_cq_elems[i] =
  118. bfa_os_htons(cfg->drvcfg.num_reqq_elems);
  119. bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
  120. iocfc->rsp_cq_ba[i].pa);
  121. bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
  122. iocfc->rsp_cq_shadow_pi[i].pa);
  123. cfg_info->rsp_cq_elems[i] =
  124. bfa_os_htons(cfg->drvcfg.num_rspq_elems);
  125. }
  126. /**
  127. * Enable interrupt coalescing if it is driver init path
  128. * and not ioc disable/enable path.
  129. */
  130. if (!iocfc->cfgdone)
  131. cfg_info->intr_attr.coalesce = BFA_TRUE;
  132. iocfc->cfgdone = BFA_FALSE;
  133. /**
  134. * dma map IOC configuration itself
  135. */
  136. bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
  137. bfa_lpuid(bfa));
  138. bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
  139. bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
  140. sizeof(struct bfi_iocfc_cfg_req_s));
  141. }
  142. static void
  143. bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  144. struct bfa_pcidev_s *pcidev)
  145. {
  146. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  147. bfa->bfad = bfad;
  148. iocfc->bfa = bfa;
  149. iocfc->action = BFA_IOCFC_ACT_NONE;
  150. bfa_os_assign(iocfc->cfg, *cfg);
  151. /**
  152. * Initialize chip specific handlers.
  153. */
  154. if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
  155. iocfc->hwif.hw_reginit = bfa_hwct_reginit;
  156. iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
  157. iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
  158. iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
  159. iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
  160. iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
  161. iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
  162. iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
  163. } else {
  164. iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
  165. iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
  166. iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
  167. iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
  168. iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
  169. iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
  170. iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
  171. iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
  172. }
  173. iocfc->hwif.hw_reginit(bfa);
  174. bfa->msix.nvecs = 0;
  175. }
  176. static void
  177. bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
  178. struct bfa_meminfo_s *meminfo)
  179. {
  180. u8 *dm_kva;
  181. u64 dm_pa;
  182. int i, per_reqq_sz, per_rspq_sz;
  183. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  184. int dbgsz;
  185. dm_kva = bfa_meminfo_dma_virt(meminfo);
  186. dm_pa = bfa_meminfo_dma_phys(meminfo);
  187. /*
  188. * First allocate dma memory for IOC.
  189. */
  190. bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
  191. dm_kva += bfa_ioc_meminfo();
  192. dm_pa += bfa_ioc_meminfo();
  193. /*
  194. * Claim DMA-able memory for the request/response queues and for shadow
  195. * ci/pi registers
  196. */
  197. per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
  198. BFA_DMA_ALIGN_SZ);
  199. per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
  200. BFA_DMA_ALIGN_SZ);
  201. for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
  202. iocfc->req_cq_ba[i].kva = dm_kva;
  203. iocfc->req_cq_ba[i].pa = dm_pa;
  204. bfa_os_memset(dm_kva, 0, per_reqq_sz);
  205. dm_kva += per_reqq_sz;
  206. dm_pa += per_reqq_sz;
  207. iocfc->rsp_cq_ba[i].kva = dm_kva;
  208. iocfc->rsp_cq_ba[i].pa = dm_pa;
  209. bfa_os_memset(dm_kva, 0, per_rspq_sz);
  210. dm_kva += per_rspq_sz;
  211. dm_pa += per_rspq_sz;
  212. }
  213. for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
  214. iocfc->req_cq_shadow_ci[i].kva = dm_kva;
  215. iocfc->req_cq_shadow_ci[i].pa = dm_pa;
  216. dm_kva += BFA_CACHELINE_SZ;
  217. dm_pa += BFA_CACHELINE_SZ;
  218. iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
  219. iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
  220. dm_kva += BFA_CACHELINE_SZ;
  221. dm_pa += BFA_CACHELINE_SZ;
  222. }
  223. /*
  224. * Claim DMA-able memory for the config info page
  225. */
  226. bfa->iocfc.cfg_info.kva = dm_kva;
  227. bfa->iocfc.cfg_info.pa = dm_pa;
  228. bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
  229. dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
  230. dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
  231. /*
  232. * Claim DMA-able memory for the config response
  233. */
  234. bfa->iocfc.cfgrsp_dma.kva = dm_kva;
  235. bfa->iocfc.cfgrsp_dma.pa = dm_pa;
  236. bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
  237. dm_kva +=
  238. BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
  239. BFA_CACHELINE_SZ);
  240. dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
  241. BFA_CACHELINE_SZ);
  242. /*
  243. * Claim DMA-able memory for iocfc stats
  244. */
  245. bfa->iocfc.stats_kva = dm_kva;
  246. bfa->iocfc.stats_pa = dm_pa;
  247. bfa->iocfc.fw_stats = (struct bfa_fw_stats_s *) dm_kva;
  248. dm_kva += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
  249. dm_pa += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
  250. bfa_meminfo_dma_virt(meminfo) = dm_kva;
  251. bfa_meminfo_dma_phys(meminfo) = dm_pa;
  252. dbgsz = bfa_ioc_debug_trcsz(bfa_auto_recover);
  253. if (dbgsz > 0) {
  254. bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
  255. bfa_meminfo_kva(meminfo) += dbgsz;
  256. }
  257. }
  258. /**
  259. * Start BFA submodules.
  260. */
  261. static void
  262. bfa_iocfc_start_submod(struct bfa_s *bfa)
  263. {
  264. int i;
  265. bfa->rme_process = BFA_TRUE;
  266. for (i = 0; hal_mods[i]; i++)
  267. hal_mods[i]->start(bfa);
  268. }
  269. /**
  270. * Disable BFA submodules.
  271. */
  272. static void
  273. bfa_iocfc_disable_submod(struct bfa_s *bfa)
  274. {
  275. int i;
  276. for (i = 0; hal_mods[i]; i++)
  277. hal_mods[i]->iocdisable(bfa);
  278. }
  279. static void
  280. bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
  281. {
  282. struct bfa_s *bfa = bfa_arg;
  283. if (complete) {
  284. if (bfa->iocfc.cfgdone)
  285. bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
  286. else
  287. bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
  288. } else {
  289. if (bfa->iocfc.cfgdone)
  290. bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
  291. }
  292. }
  293. static void
  294. bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
  295. {
  296. struct bfa_s *bfa = bfa_arg;
  297. struct bfad_s *bfad = bfa->bfad;
  298. if (compl)
  299. complete(&bfad->comp);
  300. else
  301. bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
  302. }
  303. static void
  304. bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
  305. {
  306. struct bfa_s *bfa = bfa_arg;
  307. struct bfad_s *bfad = bfa->bfad;
  308. if (compl)
  309. complete(&bfad->disable_comp);
  310. }
  311. /**
  312. * Update BFA configuration from firmware configuration.
  313. */
  314. static void
  315. bfa_iocfc_cfgrsp(struct bfa_s *bfa)
  316. {
  317. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  318. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  319. struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
  320. fwcfg->num_cqs = fwcfg->num_cqs;
  321. fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs);
  322. fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs);
  323. fwcfg->num_fcxp_reqs = bfa_os_ntohs(fwcfg->num_fcxp_reqs);
  324. fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs);
  325. fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports);
  326. iocfc->cfgdone = BFA_TRUE;
  327. /**
  328. * Configuration is complete - initialize/start submodules
  329. */
  330. bfa_fcport_init(bfa);
  331. if (iocfc->action == BFA_IOCFC_ACT_INIT)
  332. bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
  333. else
  334. bfa_iocfc_start_submod(bfa);
  335. }
  336. static void
  337. bfa_iocfc_stats_clear(void *bfa_arg)
  338. {
  339. struct bfa_s *bfa = bfa_arg;
  340. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  341. struct bfi_iocfc_stats_req_s stats_req;
  342. bfa_timer_start(bfa, &iocfc->stats_timer,
  343. bfa_iocfc_stats_clr_timeout, bfa,
  344. BFA_IOCFC_TOV);
  345. bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CLEAR_STATS_REQ,
  346. bfa_lpuid(bfa));
  347. bfa_ioc_mbox_send(&bfa->ioc, &stats_req,
  348. sizeof(struct bfi_iocfc_stats_req_s));
  349. }
  350. static void
  351. bfa_iocfc_stats_swap(struct bfa_fw_stats_s *d, struct bfa_fw_stats_s *s)
  352. {
  353. u32 *dip = (u32 *) d;
  354. u32 *sip = (u32 *) s;
  355. int i;
  356. for (i = 0; i < (sizeof(struct bfa_fw_stats_s) / sizeof(u32)); i++)
  357. dip[i] = bfa_os_ntohl(sip[i]);
  358. }
  359. static void
  360. bfa_iocfc_stats_clr_cb(void *bfa_arg, bfa_boolean_t complete)
  361. {
  362. struct bfa_s *bfa = bfa_arg;
  363. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  364. if (complete) {
  365. bfa_ioc_clr_stats(&bfa->ioc);
  366. iocfc->stats_cbfn(iocfc->stats_cbarg, iocfc->stats_status);
  367. } else {
  368. iocfc->stats_busy = BFA_FALSE;
  369. iocfc->stats_status = BFA_STATUS_OK;
  370. }
  371. }
  372. static void
  373. bfa_iocfc_stats_clr_timeout(void *bfa_arg)
  374. {
  375. struct bfa_s *bfa = bfa_arg;
  376. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  377. bfa_trc(bfa, 0);
  378. iocfc->stats_status = BFA_STATUS_ETIMER;
  379. bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_clr_cb, bfa);
  380. }
  381. static void
  382. bfa_iocfc_stats_cb(void *bfa_arg, bfa_boolean_t complete)
  383. {
  384. struct bfa_s *bfa = bfa_arg;
  385. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  386. if (complete) {
  387. if (iocfc->stats_status == BFA_STATUS_OK) {
  388. bfa_os_memset(iocfc->stats_ret, 0,
  389. sizeof(*iocfc->stats_ret));
  390. bfa_iocfc_stats_swap(&iocfc->stats_ret->fw_stats,
  391. iocfc->fw_stats);
  392. }
  393. iocfc->stats_cbfn(iocfc->stats_cbarg, iocfc->stats_status);
  394. } else {
  395. iocfc->stats_busy = BFA_FALSE;
  396. iocfc->stats_status = BFA_STATUS_OK;
  397. }
  398. }
  399. static void
  400. bfa_iocfc_stats_timeout(void *bfa_arg)
  401. {
  402. struct bfa_s *bfa = bfa_arg;
  403. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  404. bfa_trc(bfa, 0);
  405. iocfc->stats_status = BFA_STATUS_ETIMER;
  406. bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb, bfa);
  407. }
  408. static void
  409. bfa_iocfc_stats_query(struct bfa_s *bfa)
  410. {
  411. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  412. struct bfi_iocfc_stats_req_s stats_req;
  413. bfa_timer_start(bfa, &iocfc->stats_timer,
  414. bfa_iocfc_stats_timeout, bfa, BFA_IOCFC_TOV);
  415. bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_GET_STATS_REQ,
  416. bfa_lpuid(bfa));
  417. bfa_ioc_mbox_send(&bfa->ioc, &stats_req,
  418. sizeof(struct bfi_iocfc_stats_req_s));
  419. }
  420. void
  421. bfa_iocfc_reset_queues(struct bfa_s *bfa)
  422. {
  423. int q;
  424. for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
  425. bfa_reqq_ci(bfa, q) = 0;
  426. bfa_reqq_pi(bfa, q) = 0;
  427. bfa_rspq_ci(bfa, q) = 0;
  428. bfa_rspq_pi(bfa, q) = 0;
  429. }
  430. }
  431. /**
  432. * IOC enable request is complete
  433. */
  434. static void
  435. bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
  436. {
  437. struct bfa_s *bfa = bfa_arg;
  438. if (status != BFA_STATUS_OK) {
  439. bfa_isr_disable(bfa);
  440. if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
  441. bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
  442. bfa_iocfc_init_cb, bfa);
  443. return;
  444. }
  445. bfa_iocfc_send_cfg(bfa);
  446. }
  447. /**
  448. * IOC disable request is complete
  449. */
  450. static void
  451. bfa_iocfc_disable_cbfn(void *bfa_arg)
  452. {
  453. struct bfa_s *bfa = bfa_arg;
  454. bfa_isr_disable(bfa);
  455. bfa_iocfc_disable_submod(bfa);
  456. if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
  457. bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
  458. bfa);
  459. else {
  460. bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE);
  461. bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
  462. bfa);
  463. }
  464. }
  465. /**
  466. * Notify sub-modules of hardware failure.
  467. */
  468. static void
  469. bfa_iocfc_hbfail_cbfn(void *bfa_arg)
  470. {
  471. struct bfa_s *bfa = bfa_arg;
  472. bfa->rme_process = BFA_FALSE;
  473. bfa_isr_disable(bfa);
  474. bfa_iocfc_disable_submod(bfa);
  475. if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
  476. bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
  477. bfa);
  478. }
  479. /**
  480. * Actions on chip-reset completion.
  481. */
  482. static void
  483. bfa_iocfc_reset_cbfn(void *bfa_arg)
  484. {
  485. struct bfa_s *bfa = bfa_arg;
  486. bfa_iocfc_reset_queues(bfa);
  487. bfa_isr_enable(bfa);
  488. }
  489. /**
  490. * bfa_ioc_public
  491. */
  492. /**
  493. * Query IOC memory requirement information.
  494. */
  495. void
  496. bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
  497. u32 *dm_len)
  498. {
  499. /* dma memory for IOC */
  500. *dm_len += bfa_ioc_meminfo();
  501. bfa_iocfc_fw_cfg_sz(cfg, dm_len);
  502. bfa_iocfc_cqs_sz(cfg, dm_len);
  503. *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
  504. }
  505. /**
  506. * Query IOC memory requirement information.
  507. */
  508. void
  509. bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  510. struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
  511. {
  512. int i;
  513. bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
  514. bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
  515. bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
  516. bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
  517. bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod,
  518. bfa->trcmod, bfa->aen, bfa->logm);
  519. /**
  520. * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
  521. */
  522. if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
  523. bfa_ioc_set_fcmode(&bfa->ioc);
  524. bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
  525. bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
  526. bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
  527. bfa_iocfc_mem_claim(bfa, cfg, meminfo);
  528. bfa_timer_init(&bfa->timer_mod);
  529. INIT_LIST_HEAD(&bfa->comp_q);
  530. for (i = 0; i < BFI_IOC_MAX_CQS; i++)
  531. INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
  532. }
  533. /**
  534. * Query IOC memory requirement information.
  535. */
  536. void
  537. bfa_iocfc_detach(struct bfa_s *bfa)
  538. {
  539. bfa_ioc_detach(&bfa->ioc);
  540. }
  541. /**
  542. * Query IOC memory requirement information.
  543. */
  544. void
  545. bfa_iocfc_init(struct bfa_s *bfa)
  546. {
  547. bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
  548. bfa_ioc_enable(&bfa->ioc);
  549. }
  550. /**
  551. * IOC start called from bfa_start(). Called to start IOC operations
  552. * at driver instantiation for this instance.
  553. */
  554. void
  555. bfa_iocfc_start(struct bfa_s *bfa)
  556. {
  557. if (bfa->iocfc.cfgdone)
  558. bfa_iocfc_start_submod(bfa);
  559. }
  560. /**
  561. * IOC stop called from bfa_stop(). Called only when driver is unloaded
  562. * for this instance.
  563. */
  564. void
  565. bfa_iocfc_stop(struct bfa_s *bfa)
  566. {
  567. bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
  568. bfa->rme_process = BFA_FALSE;
  569. bfa_ioc_disable(&bfa->ioc);
  570. }
  571. void
  572. bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
  573. {
  574. struct bfa_s *bfa = bfaarg;
  575. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  576. union bfi_iocfc_i2h_msg_u *msg;
  577. msg = (union bfi_iocfc_i2h_msg_u *) m;
  578. bfa_trc(bfa, msg->mh.msg_id);
  579. switch (msg->mh.msg_id) {
  580. case BFI_IOCFC_I2H_CFG_REPLY:
  581. iocfc->cfg_reply = &msg->cfg_reply;
  582. bfa_iocfc_cfgrsp(bfa);
  583. break;
  584. case BFI_IOCFC_I2H_GET_STATS_RSP:
  585. if (iocfc->stats_busy == BFA_FALSE
  586. || iocfc->stats_status == BFA_STATUS_ETIMER)
  587. break;
  588. bfa_timer_stop(&iocfc->stats_timer);
  589. iocfc->stats_status = BFA_STATUS_OK;
  590. bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb,
  591. bfa);
  592. break;
  593. case BFI_IOCFC_I2H_CLEAR_STATS_RSP:
  594. /*
  595. * check for timer pop before processing the rsp
  596. */
  597. if (iocfc->stats_busy == BFA_FALSE
  598. || iocfc->stats_status == BFA_STATUS_ETIMER)
  599. break;
  600. bfa_timer_stop(&iocfc->stats_timer);
  601. iocfc->stats_status = BFA_STATUS_OK;
  602. bfa_cb_queue(bfa, &iocfc->stats_hcb_qe,
  603. bfa_iocfc_stats_clr_cb, bfa);
  604. break;
  605. case BFI_IOCFC_I2H_UPDATEQ_RSP:
  606. iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
  607. break;
  608. default:
  609. bfa_assert(0);
  610. }
  611. }
  612. #ifndef BFA_BIOS_BUILD
  613. void
  614. bfa_adapter_get_attr(struct bfa_s *bfa, struct bfa_adapter_attr_s *ad_attr)
  615. {
  616. bfa_ioc_get_adapter_attr(&bfa->ioc, ad_attr);
  617. }
  618. u64
  619. bfa_adapter_get_id(struct bfa_s *bfa)
  620. {
  621. return bfa_ioc_get_adid(&bfa->ioc);
  622. }
  623. void
  624. bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
  625. {
  626. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  627. attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
  628. attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
  629. bfa_os_ntohs(iocfc->cfginfo->intr_attr.delay) :
  630. bfa_os_ntohs(iocfc->cfgrsp->intr_attr.delay);
  631. attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
  632. bfa_os_ntohs(iocfc->cfginfo->intr_attr.latency) :
  633. bfa_os_ntohs(iocfc->cfgrsp->intr_attr.latency);
  634. attr->config = iocfc->cfg;
  635. }
  636. bfa_status_t
  637. bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
  638. {
  639. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  640. struct bfi_iocfc_set_intr_req_s *m;
  641. iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
  642. iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay);
  643. iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency);
  644. if (!bfa_iocfc_is_operational(bfa))
  645. return BFA_STATUS_OK;
  646. m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
  647. if (!m)
  648. return BFA_STATUS_DEVBUSY;
  649. bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
  650. bfa_lpuid(bfa));
  651. m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
  652. m->delay = iocfc->cfginfo->intr_attr.delay;
  653. m->latency = iocfc->cfginfo->intr_attr.latency;
  654. bfa_trc(bfa, attr->delay);
  655. bfa_trc(bfa, attr->latency);
  656. bfa_reqq_produce(bfa, BFA_REQQ_IOC);
  657. return BFA_STATUS_OK;
  658. }
  659. void
  660. bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
  661. {
  662. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  663. iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
  664. bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
  665. }
  666. bfa_status_t
  667. bfa_iocfc_get_stats(struct bfa_s *bfa, struct bfa_iocfc_stats_s *stats,
  668. bfa_cb_ioc_t cbfn, void *cbarg)
  669. {
  670. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  671. if (iocfc->stats_busy) {
  672. bfa_trc(bfa, iocfc->stats_busy);
  673. return BFA_STATUS_DEVBUSY;
  674. }
  675. if (!bfa_iocfc_is_operational(bfa)) {
  676. bfa_trc(bfa, 0);
  677. return BFA_STATUS_IOC_NON_OP;
  678. }
  679. iocfc->stats_busy = BFA_TRUE;
  680. iocfc->stats_ret = stats;
  681. iocfc->stats_cbfn = cbfn;
  682. iocfc->stats_cbarg = cbarg;
  683. bfa_iocfc_stats_query(bfa);
  684. return BFA_STATUS_OK;
  685. }
  686. bfa_status_t
  687. bfa_iocfc_clear_stats(struct bfa_s *bfa, bfa_cb_ioc_t cbfn, void *cbarg)
  688. {
  689. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  690. if (iocfc->stats_busy) {
  691. bfa_trc(bfa, iocfc->stats_busy);
  692. return BFA_STATUS_DEVBUSY;
  693. }
  694. if (!bfa_iocfc_is_operational(bfa)) {
  695. bfa_trc(bfa, 0);
  696. return BFA_STATUS_IOC_NON_OP;
  697. }
  698. iocfc->stats_busy = BFA_TRUE;
  699. iocfc->stats_cbfn = cbfn;
  700. iocfc->stats_cbarg = cbarg;
  701. bfa_iocfc_stats_clear(bfa);
  702. return BFA_STATUS_OK;
  703. }
  704. /**
  705. * Enable IOC after it is disabled.
  706. */
  707. void
  708. bfa_iocfc_enable(struct bfa_s *bfa)
  709. {
  710. bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
  711. "IOC Enable");
  712. bfa_ioc_enable(&bfa->ioc);
  713. }
  714. void
  715. bfa_iocfc_disable(struct bfa_s *bfa)
  716. {
  717. bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
  718. "IOC Disable");
  719. bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
  720. bfa->rme_process = BFA_FALSE;
  721. bfa_ioc_disable(&bfa->ioc);
  722. }
  723. bfa_boolean_t
  724. bfa_iocfc_is_operational(struct bfa_s *bfa)
  725. {
  726. return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
  727. }
  728. /**
  729. * Return boot target port wwns -- read from boot information in flash.
  730. */
  731. void
  732. bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
  733. {
  734. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  735. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  736. int i;
  737. if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
  738. bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
  739. *nwwns = cfgrsp->pbc_cfg.nbluns;
  740. for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
  741. wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
  742. return;
  743. }
  744. *nwwns = cfgrsp->bootwwns.nwwns;
  745. memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
  746. }
  747. void
  748. bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg)
  749. {
  750. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  751. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  752. pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
  753. pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
  754. pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
  755. memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
  756. }
  757. int
  758. bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
  759. {
  760. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  761. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  762. memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
  763. return cfgrsp->pbc_cfg.nvports;
  764. }
  765. #endif