bfa_iocfc.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886
  1. /*
  2. * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include <cs/bfa_debug.h>
  18. #include <bfa_priv.h>
  19. #include <log/bfa_log_hal.h>
  20. #include <bfi/bfi_boot.h>
  21. #include <bfi/bfi_cbreg.h>
  22. #include <aen/bfa_aen_ioc.h>
  23. #include <defs/bfa_defs_iocfc.h>
  24. #include <defs/bfa_defs_pci.h>
  25. #include "bfa_callback_priv.h"
  26. #include "bfad_drv.h"
  27. BFA_TRC_FILE(HAL, IOCFC);
  28. /**
  29. * IOC local definitions
  30. */
  31. #define BFA_IOCFC_TOV 5000 /* msecs */
  32. enum {
  33. BFA_IOCFC_ACT_NONE = 0,
  34. BFA_IOCFC_ACT_INIT = 1,
  35. BFA_IOCFC_ACT_STOP = 2,
  36. BFA_IOCFC_ACT_DISABLE = 3,
  37. };
  38. /*
  39. * forward declarations
  40. */
  41. static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
  42. static void bfa_iocfc_disable_cbfn(void *bfa_arg);
  43. static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
  44. static void bfa_iocfc_reset_cbfn(void *bfa_arg);
  45. static void bfa_iocfc_stats_clear(void *bfa_arg);
  46. static void bfa_iocfc_stats_swap(struct bfa_fw_stats_s *d,
  47. struct bfa_fw_stats_s *s);
  48. static void bfa_iocfc_stats_clr_cb(void *bfa_arg, bfa_boolean_t complete);
  49. static void bfa_iocfc_stats_clr_timeout(void *bfa_arg);
  50. static void bfa_iocfc_stats_cb(void *bfa_arg, bfa_boolean_t complete);
  51. static void bfa_iocfc_stats_timeout(void *bfa_arg);
  52. static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
  53. /**
  54. * bfa_ioc_pvt BFA IOC private functions
  55. */
  56. static void
  57. bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
  58. {
  59. int i, per_reqq_sz, per_rspq_sz;
  60. per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
  61. BFA_DMA_ALIGN_SZ);
  62. per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
  63. BFA_DMA_ALIGN_SZ);
  64. /*
  65. * Calculate CQ size
  66. */
  67. for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
  68. *dm_len = *dm_len + per_reqq_sz;
  69. *dm_len = *dm_len + per_rspq_sz;
  70. }
  71. /*
  72. * Calculate Shadow CI/PI size
  73. */
  74. for (i = 0; i < cfg->fwcfg.num_cqs; i++)
  75. *dm_len += (2 * BFA_CACHELINE_SZ);
  76. }
  77. static void
  78. bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
  79. {
  80. *dm_len +=
  81. BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
  82. *dm_len +=
  83. BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
  84. BFA_CACHELINE_SZ);
  85. *dm_len += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
  86. }
  87. /**
  88. * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
  89. */
  90. static void
  91. bfa_iocfc_send_cfg(void *bfa_arg)
  92. {
  93. struct bfa_s *bfa = bfa_arg;
  94. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  95. struct bfi_iocfc_cfg_req_s cfg_req;
  96. struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
  97. struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
  98. int i;
  99. bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
  100. bfa_trc(bfa, cfg->fwcfg.num_cqs);
  101. iocfc->cfgdone = BFA_FALSE;
  102. bfa_iocfc_reset_queues(bfa);
  103. /**
  104. * initialize IOC configuration info
  105. */
  106. cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
  107. cfg_info->num_cqs = cfg->fwcfg.num_cqs;
  108. bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
  109. bfa_dma_be_addr_set(cfg_info->stats_addr, iocfc->stats_pa);
  110. /**
  111. * dma map REQ and RSP circular queues and shadow pointers
  112. */
  113. for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
  114. bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
  115. iocfc->req_cq_ba[i].pa);
  116. bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
  117. iocfc->req_cq_shadow_ci[i].pa);
  118. cfg_info->req_cq_elems[i] =
  119. bfa_os_htons(cfg->drvcfg.num_reqq_elems);
  120. bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
  121. iocfc->rsp_cq_ba[i].pa);
  122. bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
  123. iocfc->rsp_cq_shadow_pi[i].pa);
  124. cfg_info->rsp_cq_elems[i] =
  125. bfa_os_htons(cfg->drvcfg.num_rspq_elems);
  126. }
  127. /**
  128. * dma map IOC configuration itself
  129. */
  130. bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
  131. bfa_lpuid(bfa));
  132. bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
  133. bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
  134. sizeof(struct bfi_iocfc_cfg_req_s));
  135. }
  136. static void
  137. bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  138. struct bfa_pcidev_s *pcidev)
  139. {
  140. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  141. bfa->bfad = bfad;
  142. iocfc->bfa = bfa;
  143. iocfc->action = BFA_IOCFC_ACT_NONE;
  144. bfa_os_assign(iocfc->cfg, *cfg);
  145. /**
  146. * Initialize chip specific handlers.
  147. */
  148. if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT) {
  149. iocfc->hwif.hw_reginit = bfa_hwct_reginit;
  150. iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
  151. iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
  152. iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
  153. iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
  154. iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
  155. iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
  156. iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
  157. } else {
  158. iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
  159. iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
  160. iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
  161. iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
  162. iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
  163. iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
  164. iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
  165. iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
  166. }
  167. iocfc->hwif.hw_reginit(bfa);
  168. bfa->msix.nvecs = 0;
  169. }
  170. static void
  171. bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
  172. struct bfa_meminfo_s *meminfo)
  173. {
  174. u8 *dm_kva;
  175. u64 dm_pa;
  176. int i, per_reqq_sz, per_rspq_sz;
  177. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  178. int dbgsz;
  179. dm_kva = bfa_meminfo_dma_virt(meminfo);
  180. dm_pa = bfa_meminfo_dma_phys(meminfo);
  181. /*
  182. * First allocate dma memory for IOC.
  183. */
  184. bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
  185. dm_kva += bfa_ioc_meminfo();
  186. dm_pa += bfa_ioc_meminfo();
  187. /*
  188. * Claim DMA-able memory for the request/response queues and for shadow
  189. * ci/pi registers
  190. */
  191. per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
  192. BFA_DMA_ALIGN_SZ);
  193. per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
  194. BFA_DMA_ALIGN_SZ);
  195. for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
  196. iocfc->req_cq_ba[i].kva = dm_kva;
  197. iocfc->req_cq_ba[i].pa = dm_pa;
  198. bfa_os_memset(dm_kva, 0, per_reqq_sz);
  199. dm_kva += per_reqq_sz;
  200. dm_pa += per_reqq_sz;
  201. iocfc->rsp_cq_ba[i].kva = dm_kva;
  202. iocfc->rsp_cq_ba[i].pa = dm_pa;
  203. bfa_os_memset(dm_kva, 0, per_rspq_sz);
  204. dm_kva += per_rspq_sz;
  205. dm_pa += per_rspq_sz;
  206. }
  207. for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
  208. iocfc->req_cq_shadow_ci[i].kva = dm_kva;
  209. iocfc->req_cq_shadow_ci[i].pa = dm_pa;
  210. dm_kva += BFA_CACHELINE_SZ;
  211. dm_pa += BFA_CACHELINE_SZ;
  212. iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
  213. iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
  214. dm_kva += BFA_CACHELINE_SZ;
  215. dm_pa += BFA_CACHELINE_SZ;
  216. }
  217. /*
  218. * Claim DMA-able memory for the config info page
  219. */
  220. bfa->iocfc.cfg_info.kva = dm_kva;
  221. bfa->iocfc.cfg_info.pa = dm_pa;
  222. bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
  223. dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
  224. dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
  225. /*
  226. * Claim DMA-able memory for the config response
  227. */
  228. bfa->iocfc.cfgrsp_dma.kva = dm_kva;
  229. bfa->iocfc.cfgrsp_dma.pa = dm_pa;
  230. bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
  231. dm_kva +=
  232. BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
  233. BFA_CACHELINE_SZ);
  234. dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
  235. BFA_CACHELINE_SZ);
  236. /*
  237. * Claim DMA-able memory for iocfc stats
  238. */
  239. bfa->iocfc.stats_kva = dm_kva;
  240. bfa->iocfc.stats_pa = dm_pa;
  241. bfa->iocfc.fw_stats = (struct bfa_fw_stats_s *) dm_kva;
  242. dm_kva += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
  243. dm_pa += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
  244. bfa_meminfo_dma_virt(meminfo) = dm_kva;
  245. bfa_meminfo_dma_phys(meminfo) = dm_pa;
  246. dbgsz = bfa_ioc_debug_trcsz(bfa_auto_recover);
  247. if (dbgsz > 0) {
  248. bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
  249. bfa_meminfo_kva(meminfo) += dbgsz;
  250. }
  251. }
  252. /**
  253. * BFA submodules initialization completion notification.
  254. */
  255. static void
  256. bfa_iocfc_initdone_submod(struct bfa_s *bfa)
  257. {
  258. int i;
  259. for (i = 0; hal_mods[i]; i++)
  260. hal_mods[i]->initdone(bfa);
  261. }
  262. /**
  263. * Start BFA submodules.
  264. */
  265. static void
  266. bfa_iocfc_start_submod(struct bfa_s *bfa)
  267. {
  268. int i;
  269. bfa->rme_process = BFA_TRUE;
  270. for (i = 0; hal_mods[i]; i++)
  271. hal_mods[i]->start(bfa);
  272. }
  273. /**
  274. * Disable BFA submodules.
  275. */
  276. static void
  277. bfa_iocfc_disable_submod(struct bfa_s *bfa)
  278. {
  279. int i;
  280. for (i = 0; hal_mods[i]; i++)
  281. hal_mods[i]->iocdisable(bfa);
  282. }
  283. static void
  284. bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
  285. {
  286. struct bfa_s *bfa = bfa_arg;
  287. if (complete) {
  288. if (bfa->iocfc.cfgdone)
  289. bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
  290. else
  291. bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
  292. } else {
  293. if (bfa->iocfc.cfgdone)
  294. bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
  295. }
  296. }
  297. static void
  298. bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
  299. {
  300. struct bfa_s *bfa = bfa_arg;
  301. struct bfad_s *bfad = bfa->bfad;
  302. if (compl)
  303. complete(&bfad->comp);
  304. else
  305. bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
  306. }
  307. static void
  308. bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
  309. {
  310. struct bfa_s *bfa = bfa_arg;
  311. struct bfad_s *bfad = bfa->bfad;
  312. if (compl)
  313. complete(&bfad->disable_comp);
  314. }
  315. /**
  316. * Update BFA configuration from firmware configuration.
  317. */
  318. static void
  319. bfa_iocfc_cfgrsp(struct bfa_s *bfa)
  320. {
  321. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  322. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  323. struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
  324. struct bfi_iocfc_cfg_s *cfginfo = iocfc->cfginfo;
  325. fwcfg->num_cqs = fwcfg->num_cqs;
  326. fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs);
  327. fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs);
  328. fwcfg->num_fcxp_reqs = bfa_os_ntohs(fwcfg->num_fcxp_reqs);
  329. fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs);
  330. fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports);
  331. cfginfo->intr_attr.coalesce = cfgrsp->intr_attr.coalesce;
  332. cfginfo->intr_attr.delay = bfa_os_ntohs(cfgrsp->intr_attr.delay);
  333. cfginfo->intr_attr.latency = bfa_os_ntohs(cfgrsp->intr_attr.latency);
  334. iocfc->cfgdone = BFA_TRUE;
  335. /**
  336. * Configuration is complete - initialize/start submodules
  337. */
  338. if (iocfc->action == BFA_IOCFC_ACT_INIT)
  339. bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
  340. else
  341. bfa_iocfc_start_submod(bfa);
  342. }
  343. static void
  344. bfa_iocfc_stats_clear(void *bfa_arg)
  345. {
  346. struct bfa_s *bfa = bfa_arg;
  347. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  348. struct bfi_iocfc_stats_req_s stats_req;
  349. bfa_timer_start(bfa, &iocfc->stats_timer,
  350. bfa_iocfc_stats_clr_timeout, bfa,
  351. BFA_IOCFC_TOV);
  352. bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CLEAR_STATS_REQ,
  353. bfa_lpuid(bfa));
  354. bfa_ioc_mbox_send(&bfa->ioc, &stats_req,
  355. sizeof(struct bfi_iocfc_stats_req_s));
  356. }
  357. static void
  358. bfa_iocfc_stats_swap(struct bfa_fw_stats_s *d, struct bfa_fw_stats_s *s)
  359. {
  360. u32 *dip = (u32 *) d;
  361. u32 *sip = (u32 *) s;
  362. int i;
  363. for (i = 0; i < (sizeof(struct bfa_fw_stats_s) / sizeof(u32)); i++)
  364. dip[i] = bfa_os_ntohl(sip[i]);
  365. }
  366. static void
  367. bfa_iocfc_stats_clr_cb(void *bfa_arg, bfa_boolean_t complete)
  368. {
  369. struct bfa_s *bfa = bfa_arg;
  370. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  371. if (complete) {
  372. bfa_ioc_clr_stats(&bfa->ioc);
  373. iocfc->stats_cbfn(iocfc->stats_cbarg, iocfc->stats_status);
  374. } else {
  375. iocfc->stats_busy = BFA_FALSE;
  376. iocfc->stats_status = BFA_STATUS_OK;
  377. }
  378. }
  379. static void
  380. bfa_iocfc_stats_clr_timeout(void *bfa_arg)
  381. {
  382. struct bfa_s *bfa = bfa_arg;
  383. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  384. bfa_trc(bfa, 0);
  385. iocfc->stats_status = BFA_STATUS_ETIMER;
  386. bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_clr_cb, bfa);
  387. }
  388. static void
  389. bfa_iocfc_stats_cb(void *bfa_arg, bfa_boolean_t complete)
  390. {
  391. struct bfa_s *bfa = bfa_arg;
  392. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  393. if (complete) {
  394. if (iocfc->stats_status == BFA_STATUS_OK) {
  395. bfa_os_memset(iocfc->stats_ret, 0,
  396. sizeof(*iocfc->stats_ret));
  397. bfa_iocfc_stats_swap(&iocfc->stats_ret->fw_stats,
  398. iocfc->fw_stats);
  399. }
  400. iocfc->stats_cbfn(iocfc->stats_cbarg, iocfc->stats_status);
  401. } else {
  402. iocfc->stats_busy = BFA_FALSE;
  403. iocfc->stats_status = BFA_STATUS_OK;
  404. }
  405. }
  406. static void
  407. bfa_iocfc_stats_timeout(void *bfa_arg)
  408. {
  409. struct bfa_s *bfa = bfa_arg;
  410. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  411. bfa_trc(bfa, 0);
  412. iocfc->stats_status = BFA_STATUS_ETIMER;
  413. bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb, bfa);
  414. }
  415. static void
  416. bfa_iocfc_stats_query(struct bfa_s *bfa)
  417. {
  418. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  419. struct bfi_iocfc_stats_req_s stats_req;
  420. bfa_timer_start(bfa, &iocfc->stats_timer,
  421. bfa_iocfc_stats_timeout, bfa, BFA_IOCFC_TOV);
  422. bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_GET_STATS_REQ,
  423. bfa_lpuid(bfa));
  424. bfa_ioc_mbox_send(&bfa->ioc, &stats_req,
  425. sizeof(struct bfi_iocfc_stats_req_s));
  426. }
  427. void
  428. bfa_iocfc_reset_queues(struct bfa_s *bfa)
  429. {
  430. int q;
  431. for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
  432. bfa_reqq_ci(bfa, q) = 0;
  433. bfa_reqq_pi(bfa, q) = 0;
  434. bfa_rspq_ci(bfa, q) = 0;
  435. bfa_rspq_pi(bfa, q) = 0;
  436. }
  437. }
  438. /**
  439. * IOC enable request is complete
  440. */
  441. static void
  442. bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
  443. {
  444. struct bfa_s *bfa = bfa_arg;
  445. if (status != BFA_STATUS_OK) {
  446. bfa_isr_disable(bfa);
  447. if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
  448. bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
  449. bfa_iocfc_init_cb, bfa);
  450. return;
  451. }
  452. bfa_iocfc_initdone_submod(bfa);
  453. bfa_iocfc_send_cfg(bfa);
  454. }
  455. /**
  456. * IOC disable request is complete
  457. */
  458. static void
  459. bfa_iocfc_disable_cbfn(void *bfa_arg)
  460. {
  461. struct bfa_s *bfa = bfa_arg;
  462. bfa_isr_disable(bfa);
  463. bfa_iocfc_disable_submod(bfa);
  464. if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
  465. bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
  466. bfa);
  467. else {
  468. bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE);
  469. bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
  470. bfa);
  471. }
  472. }
  473. /**
  474. * Notify sub-modules of hardware failure.
  475. */
  476. static void
  477. bfa_iocfc_hbfail_cbfn(void *bfa_arg)
  478. {
  479. struct bfa_s *bfa = bfa_arg;
  480. bfa->rme_process = BFA_FALSE;
  481. bfa_isr_disable(bfa);
  482. bfa_iocfc_disable_submod(bfa);
  483. if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
  484. bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
  485. bfa);
  486. }
  487. /**
  488. * Actions on chip-reset completion.
  489. */
  490. static void
  491. bfa_iocfc_reset_cbfn(void *bfa_arg)
  492. {
  493. struct bfa_s *bfa = bfa_arg;
  494. bfa_iocfc_reset_queues(bfa);
  495. bfa_isr_enable(bfa);
  496. }
  497. /**
  498. * bfa_ioc_public
  499. */
  500. /**
  501. * Query IOC memory requirement information.
  502. */
  503. void
  504. bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
  505. u32 *dm_len)
  506. {
  507. /* dma memory for IOC */
  508. *dm_len += bfa_ioc_meminfo();
  509. bfa_iocfc_fw_cfg_sz(cfg, dm_len);
  510. bfa_iocfc_cqs_sz(cfg, dm_len);
  511. *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
  512. }
  513. /**
  514. * Query IOC memory requirement information.
  515. */
  516. void
  517. bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  518. struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
  519. {
  520. int i;
  521. bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
  522. bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
  523. bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
  524. bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
  525. bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod,
  526. bfa->trcmod, bfa->aen, bfa->logm);
  527. /**
  528. * Choose FC (ssid: 0x1C) v/s FCoE (ssid: 0x14) mode.
  529. */
  530. if (0)
  531. bfa_ioc_set_fcmode(&bfa->ioc);
  532. bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
  533. bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
  534. bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
  535. bfa_iocfc_mem_claim(bfa, cfg, meminfo);
  536. bfa_timer_init(&bfa->timer_mod);
  537. INIT_LIST_HEAD(&bfa->comp_q);
  538. for (i = 0; i < BFI_IOC_MAX_CQS; i++)
  539. INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
  540. }
  541. /**
  542. * Query IOC memory requirement information.
  543. */
  544. void
  545. bfa_iocfc_detach(struct bfa_s *bfa)
  546. {
  547. bfa_ioc_detach(&bfa->ioc);
  548. }
  549. /**
  550. * Query IOC memory requirement information.
  551. */
  552. void
  553. bfa_iocfc_init(struct bfa_s *bfa)
  554. {
  555. bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
  556. bfa_ioc_enable(&bfa->ioc);
  557. }
  558. /**
  559. * IOC start called from bfa_start(). Called to start IOC operations
  560. * at driver instantiation for this instance.
  561. */
  562. void
  563. bfa_iocfc_start(struct bfa_s *bfa)
  564. {
  565. if (bfa->iocfc.cfgdone)
  566. bfa_iocfc_start_submod(bfa);
  567. }
  568. /**
  569. * IOC stop called from bfa_stop(). Called only when driver is unloaded
  570. * for this instance.
  571. */
  572. void
  573. bfa_iocfc_stop(struct bfa_s *bfa)
  574. {
  575. bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
  576. bfa->rme_process = BFA_FALSE;
  577. bfa_ioc_disable(&bfa->ioc);
  578. }
  579. void
  580. bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
  581. {
  582. struct bfa_s *bfa = bfaarg;
  583. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  584. union bfi_iocfc_i2h_msg_u *msg;
  585. msg = (union bfi_iocfc_i2h_msg_u *) m;
  586. bfa_trc(bfa, msg->mh.msg_id);
  587. switch (msg->mh.msg_id) {
  588. case BFI_IOCFC_I2H_CFG_REPLY:
  589. iocfc->cfg_reply = &msg->cfg_reply;
  590. bfa_iocfc_cfgrsp(bfa);
  591. break;
  592. case BFI_IOCFC_I2H_GET_STATS_RSP:
  593. if (iocfc->stats_busy == BFA_FALSE
  594. || iocfc->stats_status == BFA_STATUS_ETIMER)
  595. break;
  596. bfa_timer_stop(&iocfc->stats_timer);
  597. iocfc->stats_status = BFA_STATUS_OK;
  598. bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb,
  599. bfa);
  600. break;
  601. case BFI_IOCFC_I2H_CLEAR_STATS_RSP:
  602. /*
  603. * check for timer pop before processing the rsp
  604. */
  605. if (iocfc->stats_busy == BFA_FALSE
  606. || iocfc->stats_status == BFA_STATUS_ETIMER)
  607. break;
  608. bfa_timer_stop(&iocfc->stats_timer);
  609. iocfc->stats_status = BFA_STATUS_OK;
  610. bfa_cb_queue(bfa, &iocfc->stats_hcb_qe,
  611. bfa_iocfc_stats_clr_cb, bfa);
  612. break;
  613. case BFI_IOCFC_I2H_UPDATEQ_RSP:
  614. iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
  615. break;
  616. default:
  617. bfa_assert(0);
  618. }
  619. }
  620. #ifndef BFA_BIOS_BUILD
  621. void
  622. bfa_adapter_get_attr(struct bfa_s *bfa, struct bfa_adapter_attr_s *ad_attr)
  623. {
  624. bfa_ioc_get_adapter_attr(&bfa->ioc, ad_attr);
  625. }
  626. u64
  627. bfa_adapter_get_id(struct bfa_s *bfa)
  628. {
  629. return bfa_ioc_get_adid(&bfa->ioc);
  630. }
  631. void
  632. bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
  633. {
  634. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  635. attr->intr_attr = iocfc->cfginfo->intr_attr;
  636. attr->config = iocfc->cfg;
  637. }
  638. bfa_status_t
  639. bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
  640. {
  641. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  642. struct bfi_iocfc_set_intr_req_s *m;
  643. iocfc->cfginfo->intr_attr = *attr;
  644. if (!bfa_iocfc_is_operational(bfa))
  645. return BFA_STATUS_OK;
  646. m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
  647. if (!m)
  648. return BFA_STATUS_DEVBUSY;
  649. bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
  650. bfa_lpuid(bfa));
  651. m->coalesce = attr->coalesce;
  652. m->delay = bfa_os_htons(attr->delay);
  653. m->latency = bfa_os_htons(attr->latency);
  654. bfa_trc(bfa, attr->delay);
  655. bfa_trc(bfa, attr->latency);
  656. bfa_reqq_produce(bfa, BFA_REQQ_IOC);
  657. return BFA_STATUS_OK;
  658. }
  659. void
  660. bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
  661. {
  662. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  663. iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
  664. bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
  665. }
  666. bfa_status_t
  667. bfa_iocfc_get_stats(struct bfa_s *bfa, struct bfa_iocfc_stats_s *stats,
  668. bfa_cb_ioc_t cbfn, void *cbarg)
  669. {
  670. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  671. if (iocfc->stats_busy) {
  672. bfa_trc(bfa, iocfc->stats_busy);
  673. return BFA_STATUS_DEVBUSY;
  674. }
  675. if (!bfa_iocfc_is_operational(bfa)) {
  676. bfa_trc(bfa, 0);
  677. return BFA_STATUS_IOC_NON_OP;
  678. }
  679. iocfc->stats_busy = BFA_TRUE;
  680. iocfc->stats_ret = stats;
  681. iocfc->stats_cbfn = cbfn;
  682. iocfc->stats_cbarg = cbarg;
  683. bfa_iocfc_stats_query(bfa);
  684. return BFA_STATUS_OK;
  685. }
  686. bfa_status_t
  687. bfa_iocfc_clear_stats(struct bfa_s *bfa, bfa_cb_ioc_t cbfn, void *cbarg)
  688. {
  689. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  690. if (iocfc->stats_busy) {
  691. bfa_trc(bfa, iocfc->stats_busy);
  692. return BFA_STATUS_DEVBUSY;
  693. }
  694. if (!bfa_iocfc_is_operational(bfa)) {
  695. bfa_trc(bfa, 0);
  696. return BFA_STATUS_IOC_NON_OP;
  697. }
  698. iocfc->stats_busy = BFA_TRUE;
  699. iocfc->stats_cbfn = cbfn;
  700. iocfc->stats_cbarg = cbarg;
  701. bfa_iocfc_stats_clear(bfa);
  702. return BFA_STATUS_OK;
  703. }
  704. /**
  705. * Enable IOC after it is disabled.
  706. */
  707. void
  708. bfa_iocfc_enable(struct bfa_s *bfa)
  709. {
  710. bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
  711. "IOC Enable");
  712. bfa_ioc_enable(&bfa->ioc);
  713. }
  714. void
  715. bfa_iocfc_disable(struct bfa_s *bfa)
  716. {
  717. bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
  718. "IOC Disable");
  719. bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
  720. bfa->rme_process = BFA_FALSE;
  721. bfa_ioc_disable(&bfa->ioc);
  722. }
  723. bfa_boolean_t
  724. bfa_iocfc_is_operational(struct bfa_s *bfa)
  725. {
  726. return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
  727. }
  728. /**
  729. * Return boot target port wwns -- read from boot information in flash.
  730. */
  731. void
  732. bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t **wwns)
  733. {
  734. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  735. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  736. *nwwns = cfgrsp->bootwwns.nwwns;
  737. *wwns = cfgrsp->bootwwns.wwn;
  738. }
  739. #endif