bfa_ioc.c 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382
  1. /*
  2. * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include <bfa.h>
  18. #include <bfa_ioc.h>
  19. #include <bfa_fwimg_priv.h>
  20. #include <bfa_trcmod_priv.h>
  21. #include <cs/bfa_debug.h>
  22. #include <bfi/bfi_ioc.h>
  23. #include <bfi/bfi_ctreg.h>
  24. #include <aen/bfa_aen_ioc.h>
  25. #include <aen/bfa_aen.h>
  26. #include <log/bfa_log_hal.h>
  27. #include <defs/bfa_defs_pci.h>
  28. BFA_TRC_FILE(HAL, IOC);
  29. /**
  30. * IOC local definitions
  31. */
  32. #define BFA_IOC_TOV 2000 /* msecs */
  33. #define BFA_IOC_HB_TOV 1000 /* msecs */
  34. #define BFA_IOC_HB_FAIL_MAX 4
  35. #define BFA_IOC_HWINIT_MAX 2
  36. #define BFA_IOC_FWIMG_MINSZ (16 * 1024)
  37. #define BFA_IOC_TOV_RECOVER (BFA_IOC_HB_FAIL_MAX * BFA_IOC_HB_TOV \
  38. + BFA_IOC_TOV)
  39. #define bfa_ioc_timer_start(__ioc) \
  40. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
  41. bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
  42. #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
  43. #define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS)
  44. #define BFA_DBG_FWTRC_LEN \
  45. (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \
  46. (sizeof(struct bfa_trc_mod_s) - \
  47. BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
  48. #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
  49. #define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
  50. #define BFA_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS)
  51. #define BFA_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
  52. #define BFA_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
  53. bfa_boolean_t bfa_auto_recover = BFA_FALSE;
  54. /*
  55. * forward declarations
  56. */
  57. static void bfa_ioc_aen_post(struct bfa_ioc_s *bfa,
  58. enum bfa_ioc_aen_event event);
  59. static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
  60. static void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc);
  61. static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
  62. static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
  63. static void bfa_ioc_timeout(void *ioc);
  64. static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
  65. static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
  66. static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
  67. static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
  68. static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
  69. static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
  70. static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
  71. static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
  72. static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
  73. static bfa_boolean_t bfa_ioc_firmware_lock(struct bfa_ioc_s *ioc);
  74. static void bfa_ioc_firmware_unlock(struct bfa_ioc_s *ioc);
  75. static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
  76. static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
  77. /**
  78. * bfa_ioc_sm
  79. */
  80. /**
  81. * IOC state machine events
  82. */
  83. enum ioc_event {
  84. IOC_E_ENABLE = 1, /* IOC enable request */
  85. IOC_E_DISABLE = 2, /* IOC disable request */
  86. IOC_E_TIMEOUT = 3, /* f/w response timeout */
  87. IOC_E_FWREADY = 4, /* f/w initialization done */
  88. IOC_E_FWRSP_GETATTR = 5, /* IOC get attribute response */
  89. IOC_E_FWRSP_ENABLE = 6, /* enable f/w response */
  90. IOC_E_FWRSP_DISABLE = 7, /* disable f/w response */
  91. IOC_E_HBFAIL = 8, /* heartbeat failure */
  92. IOC_E_HWERROR = 9, /* hardware error interrupt */
  93. IOC_E_SEMLOCKED = 10, /* h/w semaphore is locked */
  94. IOC_E_DETACH = 11, /* driver detach cleanup */
  95. };
  96. bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
  97. bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc_s, enum ioc_event);
  98. bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc_s, enum ioc_event);
  99. bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc_s, enum ioc_event);
  100. bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc_s, enum ioc_event);
  101. bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
  102. bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
  103. bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
  104. bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event);
  105. bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc_s, enum ioc_event);
  106. bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
  107. bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
  108. static struct bfa_sm_table_s ioc_sm_table[] = {
  109. {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
  110. {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
  111. {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
  112. {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
  113. {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
  114. {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
  115. {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
  116. {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
  117. {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
  118. {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
  119. {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
  120. {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
  121. };
  122. /**
  123. * Reset entry actions -- initialize state machine
  124. */
  125. static void
  126. bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
  127. {
  128. ioc->retry_count = 0;
  129. ioc->auto_recover = bfa_auto_recover;
  130. }
  131. /**
  132. * Beginning state. IOC is in reset state.
  133. */
  134. static void
  135. bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
  136. {
  137. bfa_trc(ioc, event);
  138. switch (event) {
  139. case IOC_E_ENABLE:
  140. bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
  141. break;
  142. case IOC_E_DISABLE:
  143. bfa_ioc_disable_comp(ioc);
  144. break;
  145. case IOC_E_DETACH:
  146. break;
  147. default:
  148. bfa_sm_fault(ioc, event);
  149. }
  150. }
  151. /**
  152. * Semaphore should be acquired for version check.
  153. */
  154. static void
  155. bfa_ioc_sm_fwcheck_entry(struct bfa_ioc_s *ioc)
  156. {
  157. bfa_ioc_hw_sem_get(ioc);
  158. }
  159. /**
  160. * Awaiting h/w semaphore to continue with version check.
  161. */
  162. static void
  163. bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event)
  164. {
  165. bfa_trc(ioc, event);
  166. switch (event) {
  167. case IOC_E_SEMLOCKED:
  168. if (bfa_ioc_firmware_lock(ioc)) {
  169. ioc->retry_count = 0;
  170. bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
  171. } else {
  172. bfa_ioc_hw_sem_release(ioc);
  173. bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
  174. }
  175. break;
  176. case IOC_E_DISABLE:
  177. bfa_ioc_disable_comp(ioc);
  178. /*
  179. * fall through
  180. */
  181. case IOC_E_DETACH:
  182. bfa_ioc_hw_sem_get_cancel(ioc);
  183. bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
  184. break;
  185. case IOC_E_FWREADY:
  186. break;
  187. default:
  188. bfa_sm_fault(ioc, event);
  189. }
  190. }
  191. /**
  192. * Notify enable completion callback and generate mismatch AEN.
  193. */
  194. static void
  195. bfa_ioc_sm_mismatch_entry(struct bfa_ioc_s *ioc)
  196. {
  197. /**
  198. * Provide enable completion callback and AEN notification only once.
  199. */
  200. if (ioc->retry_count == 0) {
  201. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  202. bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
  203. }
  204. ioc->retry_count++;
  205. bfa_ioc_timer_start(ioc);
  206. }
  207. /**
  208. * Awaiting firmware version match.
  209. */
  210. static void
  211. bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event)
  212. {
  213. bfa_trc(ioc, event);
  214. switch (event) {
  215. case IOC_E_TIMEOUT:
  216. bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
  217. break;
  218. case IOC_E_DISABLE:
  219. bfa_ioc_disable_comp(ioc);
  220. /*
  221. * fall through
  222. */
  223. case IOC_E_DETACH:
  224. bfa_ioc_timer_stop(ioc);
  225. bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
  226. break;
  227. case IOC_E_FWREADY:
  228. break;
  229. default:
  230. bfa_sm_fault(ioc, event);
  231. }
  232. }
  233. /**
  234. * Request for semaphore.
  235. */
  236. static void
  237. bfa_ioc_sm_semwait_entry(struct bfa_ioc_s *ioc)
  238. {
  239. bfa_ioc_hw_sem_get(ioc);
  240. }
  241. /**
  242. * Awaiting semaphore for h/w initialzation.
  243. */
  244. static void
  245. bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event)
  246. {
  247. bfa_trc(ioc, event);
  248. switch (event) {
  249. case IOC_E_SEMLOCKED:
  250. ioc->retry_count = 0;
  251. bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
  252. break;
  253. case IOC_E_DISABLE:
  254. bfa_ioc_hw_sem_get_cancel(ioc);
  255. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
  256. break;
  257. default:
  258. bfa_sm_fault(ioc, event);
  259. }
  260. }
  261. static void
  262. bfa_ioc_sm_hwinit_entry(struct bfa_ioc_s *ioc)
  263. {
  264. bfa_ioc_timer_start(ioc);
  265. bfa_ioc_reset(ioc, BFA_FALSE);
  266. }
  267. /**
  268. * Hardware is being initialized. Interrupts are enabled.
  269. * Holding hardware semaphore lock.
  270. */
  271. static void
  272. bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event)
  273. {
  274. bfa_trc(ioc, event);
  275. switch (event) {
  276. case IOC_E_FWREADY:
  277. bfa_ioc_timer_stop(ioc);
  278. bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
  279. break;
  280. case IOC_E_HWERROR:
  281. bfa_ioc_timer_stop(ioc);
  282. /*
  283. * fall through
  284. */
  285. case IOC_E_TIMEOUT:
  286. ioc->retry_count++;
  287. if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
  288. bfa_ioc_timer_start(ioc);
  289. bfa_ioc_reset(ioc, BFA_TRUE);
  290. break;
  291. }
  292. bfa_ioc_hw_sem_release(ioc);
  293. bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
  294. break;
  295. case IOC_E_DISABLE:
  296. bfa_ioc_hw_sem_release(ioc);
  297. bfa_ioc_timer_stop(ioc);
  298. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
  299. break;
  300. default:
  301. bfa_sm_fault(ioc, event);
  302. }
  303. }
  304. static void
  305. bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
  306. {
  307. bfa_ioc_timer_start(ioc);
  308. bfa_ioc_send_enable(ioc);
  309. }
  310. /**
  311. * Host IOC function is being enabled, awaiting response from firmware.
  312. * Semaphore is acquired.
  313. */
  314. static void
  315. bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
  316. {
  317. bfa_trc(ioc, event);
  318. switch (event) {
  319. case IOC_E_FWRSP_ENABLE:
  320. bfa_ioc_timer_stop(ioc);
  321. bfa_ioc_hw_sem_release(ioc);
  322. bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
  323. break;
  324. case IOC_E_HWERROR:
  325. bfa_ioc_timer_stop(ioc);
  326. /*
  327. * fall through
  328. */
  329. case IOC_E_TIMEOUT:
  330. ioc->retry_count++;
  331. if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
  332. bfa_reg_write(ioc->ioc_regs.ioc_fwstate,
  333. BFI_IOC_UNINIT);
  334. bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
  335. break;
  336. }
  337. bfa_ioc_hw_sem_release(ioc);
  338. bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
  339. break;
  340. case IOC_E_DISABLE:
  341. bfa_ioc_timer_stop(ioc);
  342. bfa_ioc_hw_sem_release(ioc);
  343. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
  344. break;
  345. case IOC_E_FWREADY:
  346. bfa_ioc_send_enable(ioc);
  347. break;
  348. default:
  349. bfa_sm_fault(ioc, event);
  350. }
  351. }
  352. static void
  353. bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
  354. {
  355. bfa_ioc_timer_start(ioc);
  356. bfa_ioc_send_getattr(ioc);
  357. }
  358. /**
  359. * IOC configuration in progress. Timer is active.
  360. */
  361. static void
  362. bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
  363. {
  364. bfa_trc(ioc, event);
  365. switch (event) {
  366. case IOC_E_FWRSP_GETATTR:
  367. bfa_ioc_timer_stop(ioc);
  368. bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
  369. break;
  370. case IOC_E_HWERROR:
  371. bfa_ioc_timer_stop(ioc);
  372. /*
  373. * fall through
  374. */
  375. case IOC_E_TIMEOUT:
  376. bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
  377. break;
  378. case IOC_E_DISABLE:
  379. bfa_ioc_timer_stop(ioc);
  380. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
  381. break;
  382. default:
  383. bfa_sm_fault(ioc, event);
  384. }
  385. }
  386. static void
  387. bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
  388. {
  389. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
  390. bfa_ioc_hb_monitor(ioc);
  391. bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
  392. }
  393. static void
  394. bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
  395. {
  396. bfa_trc(ioc, event);
  397. switch (event) {
  398. case IOC_E_ENABLE:
  399. break;
  400. case IOC_E_DISABLE:
  401. bfa_ioc_hb_stop(ioc);
  402. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  403. break;
  404. case IOC_E_HWERROR:
  405. case IOC_E_FWREADY:
  406. /**
  407. * Hard error or IOC recovery by other function.
  408. * Treat it same as heartbeat failure.
  409. */
  410. bfa_ioc_hb_stop(ioc);
  411. /*
  412. * !!! fall through !!!
  413. */
  414. case IOC_E_HBFAIL:
  415. bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
  416. break;
  417. default:
  418. bfa_sm_fault(ioc, event);
  419. }
  420. }
  421. static void
  422. bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
  423. {
  424. bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
  425. bfa_ioc_timer_start(ioc);
  426. bfa_ioc_send_disable(ioc);
  427. }
  428. /**
  429. * IOC is being disabled
  430. */
  431. static void
  432. bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
  433. {
  434. bfa_trc(ioc, event);
  435. switch (event) {
  436. case IOC_E_HWERROR:
  437. case IOC_E_FWRSP_DISABLE:
  438. bfa_ioc_timer_stop(ioc);
  439. /*
  440. * !!! fall through !!!
  441. */
  442. case IOC_E_TIMEOUT:
  443. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
  444. break;
  445. default:
  446. bfa_sm_fault(ioc, event);
  447. }
  448. }
  449. /**
  450. * IOC disable completion entry.
  451. */
  452. static void
  453. bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
  454. {
  455. bfa_ioc_disable_comp(ioc);
  456. }
  457. static void
  458. bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
  459. {
  460. bfa_trc(ioc, event);
  461. switch (event) {
  462. case IOC_E_ENABLE:
  463. bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
  464. break;
  465. case IOC_E_DISABLE:
  466. ioc->cbfn->disable_cbfn(ioc->bfa);
  467. break;
  468. case IOC_E_FWREADY:
  469. break;
  470. case IOC_E_DETACH:
  471. bfa_ioc_firmware_unlock(ioc);
  472. bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
  473. break;
  474. default:
  475. bfa_sm_fault(ioc, event);
  476. }
  477. }
  478. static void
  479. bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
  480. {
  481. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  482. bfa_ioc_timer_start(ioc);
  483. }
  484. /**
  485. * Hardware initialization failed.
  486. */
  487. static void
  488. bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
  489. {
  490. bfa_trc(ioc, event);
  491. switch (event) {
  492. case IOC_E_DISABLE:
  493. bfa_ioc_timer_stop(ioc);
  494. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
  495. break;
  496. case IOC_E_DETACH:
  497. bfa_ioc_timer_stop(ioc);
  498. bfa_ioc_firmware_unlock(ioc);
  499. bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
  500. break;
  501. case IOC_E_TIMEOUT:
  502. bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
  503. break;
  504. default:
  505. bfa_sm_fault(ioc, event);
  506. }
  507. }
  508. static void
  509. bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s *ioc)
  510. {
  511. struct list_head *qe;
  512. struct bfa_ioc_hbfail_notify_s *notify;
  513. /**
  514. * Mark IOC as failed in hardware and stop firmware.
  515. */
  516. bfa_ioc_lpu_stop(ioc);
  517. bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_HBFAIL);
  518. if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) {
  519. bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P);
  520. /*
  521. * Wait for halt to take effect
  522. */
  523. bfa_reg_read(ioc->ioc_regs.ll_halt);
  524. }
  525. /**
  526. * Notify driver and common modules registered for notification.
  527. */
  528. ioc->cbfn->hbfail_cbfn(ioc->bfa);
  529. list_for_each(qe, &ioc->hb_notify_q) {
  530. notify = (struct bfa_ioc_hbfail_notify_s *)qe;
  531. notify->cbfn(notify->cbarg);
  532. }
  533. /**
  534. * Flush any queued up mailbox requests.
  535. */
  536. bfa_ioc_mbox_hbfail(ioc);
  537. bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
  538. /**
  539. * Trigger auto-recovery after a delay.
  540. */
  541. if (ioc->auto_recover) {
  542. bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer,
  543. bfa_ioc_timeout, ioc, BFA_IOC_TOV_RECOVER);
  544. }
  545. }
  546. /**
  547. * IOC heartbeat failure.
  548. */
  549. static void
  550. bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event)
  551. {
  552. bfa_trc(ioc, event);
  553. switch (event) {
  554. case IOC_E_ENABLE:
  555. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  556. break;
  557. case IOC_E_DISABLE:
  558. if (ioc->auto_recover)
  559. bfa_ioc_timer_stop(ioc);
  560. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
  561. break;
  562. case IOC_E_TIMEOUT:
  563. bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
  564. break;
  565. case IOC_E_FWREADY:
  566. /**
  567. * Recovery is already initiated by other function.
  568. */
  569. break;
  570. default:
  571. bfa_sm_fault(ioc, event);
  572. }
  573. }
  574. /**
  575. * bfa_ioc_pvt BFA IOC private functions
  576. */
  577. static void
  578. bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
  579. {
  580. struct list_head *qe;
  581. struct bfa_ioc_hbfail_notify_s *notify;
  582. ioc->cbfn->disable_cbfn(ioc->bfa);
  583. /**
  584. * Notify common modules registered for notification.
  585. */
  586. list_for_each(qe, &ioc->hb_notify_q) {
  587. notify = (struct bfa_ioc_hbfail_notify_s *)qe;
  588. notify->cbfn(notify->cbarg);
  589. }
  590. }
  591. static void
  592. bfa_ioc_sem_timeout(void *ioc_arg)
  593. {
  594. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
  595. bfa_ioc_hw_sem_get(ioc);
  596. }
  597. static void
  598. bfa_ioc_usage_sem_get(struct bfa_ioc_s *ioc)
  599. {
  600. u32 r32;
  601. int cnt = 0;
  602. #define BFA_SEM_SPINCNT 1000
  603. do {
  604. r32 = bfa_reg_read(ioc->ioc_regs.ioc_usage_sem_reg);
  605. cnt++;
  606. if (cnt > BFA_SEM_SPINCNT)
  607. break;
  608. } while (r32 != 0);
  609. bfa_assert(cnt < BFA_SEM_SPINCNT);
  610. }
  611. static void
  612. bfa_ioc_usage_sem_release(struct bfa_ioc_s *ioc)
  613. {
  614. bfa_reg_write(ioc->ioc_regs.ioc_usage_sem_reg, 1);
  615. }
  616. static void
  617. bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
  618. {
  619. u32 r32;
  620. /**
  621. * First read to the semaphore register will return 0, subsequent reads
  622. * will return 1. Semaphore is released by writing 0 to the register
  623. */
  624. r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
  625. if (r32 == 0) {
  626. bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
  627. return;
  628. }
  629. bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout,
  630. ioc, BFA_IOC_TOV);
  631. }
  632. static void
  633. bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
  634. {
  635. bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1);
  636. }
  637. static void
  638. bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
  639. {
  640. bfa_timer_stop(&ioc->sem_timer);
  641. }
  642. /**
  643. * Initialize LPU local memory (aka secondary memory / SRAM)
  644. */
  645. static void
  646. bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
  647. {
  648. u32 pss_ctl;
  649. int i;
  650. #define PSS_LMEM_INIT_TIME 10000
  651. pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
  652. pss_ctl &= ~__PSS_LMEM_RESET;
  653. pss_ctl |= __PSS_LMEM_INIT_EN;
  654. pss_ctl |= __PSS_I2C_CLK_DIV(3UL); /* i2c workaround 12.5khz clock */
  655. bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
  656. /**
  657. * wait for memory initialization to be complete
  658. */
  659. i = 0;
  660. do {
  661. pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
  662. i++;
  663. } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
  664. /**
  665. * If memory initialization is not successful, IOC timeout will catch
  666. * such failures.
  667. */
  668. bfa_assert(pss_ctl & __PSS_LMEM_INIT_DONE);
  669. bfa_trc(ioc, pss_ctl);
  670. pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
  671. bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
  672. }
  673. static void
  674. bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
  675. {
  676. u32 pss_ctl;
  677. /**
  678. * Take processor out of reset.
  679. */
  680. pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
  681. pss_ctl &= ~__PSS_LPU0_RESET;
  682. bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
  683. }
  684. static void
  685. bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
  686. {
  687. u32 pss_ctl;
  688. /**
  689. * Put processors in reset.
  690. */
  691. pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
  692. pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
  693. bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
  694. }
  695. /**
  696. * Get driver and firmware versions.
  697. */
  698. static void
  699. bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
  700. {
  701. u32 pgnum, pgoff;
  702. u32 loff = 0;
  703. int i;
  704. u32 *fwsig = (u32 *) fwhdr;
  705. pgnum = bfa_ioc_smem_pgnum(ioc, loff);
  706. pgoff = bfa_ioc_smem_pgoff(ioc, loff);
  707. bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
  708. for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
  709. i++) {
  710. fwsig[i] = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
  711. loff += sizeof(u32);
  712. }
  713. }
  714. static u32 *
  715. bfa_ioc_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off)
  716. {
  717. if (ioc->ctdev)
  718. return bfi_image_ct_get_chunk(off);
  719. return bfi_image_cb_get_chunk(off);
  720. }
  721. static u32
  722. bfa_ioc_fwimg_get_size(struct bfa_ioc_s *ioc)
  723. {
  724. return (ioc->ctdev) ? bfi_image_ct_size : bfi_image_cb_size;
  725. }
  726. /**
  727. * Returns TRUE if same.
  728. */
  729. static bfa_boolean_t
  730. bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
  731. {
  732. struct bfi_ioc_image_hdr_s *drv_fwhdr;
  733. int i;
  734. drv_fwhdr =
  735. (struct bfi_ioc_image_hdr_s *)bfa_ioc_fwimg_get_chunk(ioc, 0);
  736. for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
  737. if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
  738. bfa_trc(ioc, i);
  739. bfa_trc(ioc, fwhdr->md5sum[i]);
  740. bfa_trc(ioc, drv_fwhdr->md5sum[i]);
  741. return BFA_FALSE;
  742. }
  743. }
  744. bfa_trc(ioc, fwhdr->md5sum[0]);
  745. return BFA_TRUE;
  746. }
  747. /**
  748. * Return true if current running version is valid. Firmware signature and
  749. * execution context (driver/bios) must match.
  750. */
  751. static bfa_boolean_t
  752. bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc)
  753. {
  754. struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
  755. /**
  756. * If bios/efi boot (flash based) -- return true
  757. */
  758. if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
  759. return BFA_TRUE;
  760. bfa_ioc_fwver_get(ioc, &fwhdr);
  761. drv_fwhdr =
  762. (struct bfi_ioc_image_hdr_s *)bfa_ioc_fwimg_get_chunk(ioc, 0);
  763. if (fwhdr.signature != drv_fwhdr->signature) {
  764. bfa_trc(ioc, fwhdr.signature);
  765. bfa_trc(ioc, drv_fwhdr->signature);
  766. return BFA_FALSE;
  767. }
  768. if (fwhdr.exec != drv_fwhdr->exec) {
  769. bfa_trc(ioc, fwhdr.exec);
  770. bfa_trc(ioc, drv_fwhdr->exec);
  771. return BFA_FALSE;
  772. }
  773. return bfa_ioc_fwver_cmp(ioc, &fwhdr);
  774. }
  775. /**
  776. * Return true if firmware of current driver matches the running firmware.
  777. */
  778. static bfa_boolean_t
  779. bfa_ioc_firmware_lock(struct bfa_ioc_s *ioc)
  780. {
  781. enum bfi_ioc_state ioc_fwstate;
  782. u32 usecnt;
  783. struct bfi_ioc_image_hdr_s fwhdr;
  784. /**
  785. * Firmware match check is relevant only for CNA.
  786. */
  787. if (!ioc->cna)
  788. return BFA_TRUE;
  789. /**
  790. * If bios boot (flash based) -- do not increment usage count
  791. */
  792. if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
  793. return BFA_TRUE;
  794. bfa_ioc_usage_sem_get(ioc);
  795. usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
  796. /**
  797. * If usage count is 0, always return TRUE.
  798. */
  799. if (usecnt == 0) {
  800. bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1);
  801. bfa_ioc_usage_sem_release(ioc);
  802. bfa_trc(ioc, usecnt);
  803. return BFA_TRUE;
  804. }
  805. ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
  806. bfa_trc(ioc, ioc_fwstate);
  807. /**
  808. * Use count cannot be non-zero and chip in uninitialized state.
  809. */
  810. bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
  811. /**
  812. * Check if another driver with a different firmware is active
  813. */
  814. bfa_ioc_fwver_get(ioc, &fwhdr);
  815. if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
  816. bfa_ioc_usage_sem_release(ioc);
  817. bfa_trc(ioc, usecnt);
  818. return BFA_FALSE;
  819. }
  820. /**
  821. * Same firmware version. Increment the reference count.
  822. */
  823. usecnt++;
  824. bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
  825. bfa_ioc_usage_sem_release(ioc);
  826. bfa_trc(ioc, usecnt);
  827. return BFA_TRUE;
  828. }
  829. static void
  830. bfa_ioc_firmware_unlock(struct bfa_ioc_s *ioc)
  831. {
  832. u32 usecnt;
  833. /**
  834. * Firmware lock is relevant only for CNA.
  835. * If bios boot (flash based) -- do not decrement usage count
  836. */
  837. if (!ioc->cna || (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ))
  838. return;
  839. /**
  840. * decrement usage count
  841. */
  842. bfa_ioc_usage_sem_get(ioc);
  843. usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
  844. bfa_assert(usecnt > 0);
  845. usecnt--;
  846. bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
  847. bfa_trc(ioc, usecnt);
  848. bfa_ioc_usage_sem_release(ioc);
  849. }
  850. /**
  851. * Conditionally flush any pending message from firmware at start.
  852. */
  853. static void
  854. bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
  855. {
  856. u32 r32;
  857. r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
  858. if (r32)
  859. bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
  860. }
  861. static void
  862. bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
  863. {
  864. enum bfi_ioc_state ioc_fwstate;
  865. bfa_boolean_t fwvalid;
  866. ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
  867. if (force)
  868. ioc_fwstate = BFI_IOC_UNINIT;
  869. bfa_trc(ioc, ioc_fwstate);
  870. /**
  871. * check if firmware is valid
  872. */
  873. fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
  874. BFA_FALSE : bfa_ioc_fwver_valid(ioc);
  875. if (!fwvalid) {
  876. bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
  877. return;
  878. }
  879. /**
  880. * If hardware initialization is in progress (initialized by other IOC),
  881. * just wait for an initialization completion interrupt.
  882. */
  883. if (ioc_fwstate == BFI_IOC_INITING) {
  884. bfa_trc(ioc, ioc_fwstate);
  885. ioc->cbfn->reset_cbfn(ioc->bfa);
  886. return;
  887. }
  888. /**
  889. * If IOC function is disabled and firmware version is same,
  890. * just re-enable IOC.
  891. */
  892. if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
  893. bfa_trc(ioc, ioc_fwstate);
  894. /**
  895. * When using MSI-X any pending firmware ready event should
  896. * be flushed. Otherwise MSI-X interrupts are not delivered.
  897. */
  898. bfa_ioc_msgflush(ioc);
  899. ioc->cbfn->reset_cbfn(ioc->bfa);
  900. bfa_fsm_send_event(ioc, IOC_E_FWREADY);
  901. return;
  902. }
  903. /**
  904. * Initialize the h/w for any other states.
  905. */
  906. bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
  907. }
  908. static void
  909. bfa_ioc_timeout(void *ioc_arg)
  910. {
  911. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
  912. bfa_trc(ioc, 0);
  913. bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
  914. }
  915. void
  916. bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
  917. {
  918. u32 *msgp = (u32 *) ioc_msg;
  919. u32 i;
  920. bfa_trc(ioc, msgp[0]);
  921. bfa_trc(ioc, len);
  922. bfa_assert(len <= BFI_IOC_MSGLEN_MAX);
  923. /*
  924. * first write msg to mailbox registers
  925. */
  926. for (i = 0; i < len / sizeof(u32); i++)
  927. bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32),
  928. bfa_os_wtole(msgp[i]));
  929. for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
  930. bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 0);
  931. /*
  932. * write 1 to mailbox CMD to trigger LPU event
  933. */
  934. bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1);
  935. (void)bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
  936. }
  937. static void
  938. bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
  939. {
  940. struct bfi_ioc_ctrl_req_s enable_req;
  941. bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
  942. bfa_ioc_portid(ioc));
  943. enable_req.ioc_class = ioc->ioc_mc;
  944. bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
  945. }
  946. static void
  947. bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
  948. {
  949. struct bfi_ioc_ctrl_req_s disable_req;
  950. bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
  951. bfa_ioc_portid(ioc));
  952. bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
  953. }
  954. static void
  955. bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
  956. {
  957. struct bfi_ioc_getattr_req_s attr_req;
  958. bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
  959. bfa_ioc_portid(ioc));
  960. bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
  961. bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
  962. }
  963. static void
  964. bfa_ioc_hb_check(void *cbarg)
  965. {
  966. struct bfa_ioc_s *ioc = cbarg;
  967. u32 hb_count;
  968. hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
  969. if (ioc->hb_count == hb_count) {
  970. ioc->hb_fail++;
  971. } else {
  972. ioc->hb_count = hb_count;
  973. ioc->hb_fail = 0;
  974. }
  975. if (ioc->hb_fail >= BFA_IOC_HB_FAIL_MAX) {
  976. bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE, hb_count);
  977. ioc->hb_fail = 0;
  978. bfa_ioc_recover(ioc);
  979. return;
  980. }
  981. bfa_ioc_mbox_poll(ioc);
  982. bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc,
  983. BFA_IOC_HB_TOV);
  984. }
  985. static void
  986. bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
  987. {
  988. ioc->hb_fail = 0;
  989. ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
  990. bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc,
  991. BFA_IOC_HB_TOV);
  992. }
  993. static void
  994. bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
  995. {
  996. bfa_timer_stop(&ioc->ioc_timer);
  997. }
  998. /**
  999. * Host to LPU mailbox message addresses
  1000. */
  1001. static struct {
  1002. u32 hfn_mbox, lpu_mbox, hfn_pgn;
  1003. } iocreg_fnreg[] = {
  1004. {
  1005. HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0}, {
  1006. HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1}, {
  1007. HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2}, {
  1008. HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3}
  1009. };
  1010. /**
  1011. * Host <-> LPU mailbox command/status registers - port 0
  1012. */
  1013. static struct {
  1014. u32 hfn, lpu;
  1015. } iocreg_mbcmd_p0[] = {
  1016. {
  1017. HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT}, {
  1018. HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT}, {
  1019. HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT}, {
  1020. HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT}
  1021. };
  1022. /**
  1023. * Host <-> LPU mailbox command/status registers - port 1
  1024. */
  1025. static struct {
  1026. u32 hfn, lpu;
  1027. } iocreg_mbcmd_p1[] = {
  1028. {
  1029. HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT}, {
  1030. HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT}, {
  1031. HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT}, {
  1032. HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT}
  1033. };
  1034. /**
  1035. * Shared IRQ handling in INTX mode
  1036. */
  1037. static struct {
  1038. u32 isr, msk;
  1039. } iocreg_shirq_next[] = {
  1040. {
  1041. HOSTFN1_INT_STATUS, HOSTFN1_INT_MSK}, {
  1042. HOSTFN2_INT_STATUS, HOSTFN2_INT_MSK}, {
  1043. HOSTFN3_INT_STATUS, HOSTFN3_INT_MSK}, {
  1044. HOSTFN0_INT_STATUS, HOSTFN0_INT_MSK},};
  1045. static void
  1046. bfa_ioc_reg_init(struct bfa_ioc_s *ioc)
  1047. {
  1048. bfa_os_addr_t rb;
  1049. int pcifn = bfa_ioc_pcifn(ioc);
  1050. rb = bfa_ioc_bar0(ioc);
  1051. ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
  1052. ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
  1053. ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
  1054. if (ioc->port_id == 0) {
  1055. ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
  1056. ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
  1057. ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
  1058. ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
  1059. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
  1060. } else {
  1061. ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
  1062. ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
  1063. ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
  1064. ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
  1065. ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
  1066. }
  1067. /**
  1068. * Shared IRQ handling in INTX mode
  1069. */
  1070. ioc->ioc_regs.shirq_isr_next = rb + iocreg_shirq_next[pcifn].isr;
  1071. ioc->ioc_regs.shirq_msk_next = rb + iocreg_shirq_next[pcifn].msk;
  1072. /*
  1073. * PSS control registers
  1074. */
  1075. ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
  1076. ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
  1077. ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
  1078. /*
  1079. * IOC semaphore registers and serialization
  1080. */
  1081. ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
  1082. ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
  1083. ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
  1084. /**
  1085. * sram memory access
  1086. */
  1087. ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
  1088. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB;
  1089. if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT)
  1090. ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
  1091. }
  1092. /**
  1093. * Initiate a full firmware download.
  1094. */
  1095. static void
  1096. bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
  1097. u32 boot_param)
  1098. {
  1099. u32 *fwimg;
  1100. u32 pgnum, pgoff;
  1101. u32 loff = 0;
  1102. u32 chunkno = 0;
  1103. u32 i;
  1104. /**
  1105. * Initialize LMEM first before code download
  1106. */
  1107. bfa_ioc_lmem_init(ioc);
  1108. /**
  1109. * Flash based firmware boot
  1110. */
  1111. bfa_trc(ioc, bfa_ioc_fwimg_get_size(ioc));
  1112. if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
  1113. boot_type = BFI_BOOT_TYPE_FLASH;
  1114. fwimg = bfa_ioc_fwimg_get_chunk(ioc, chunkno);
  1115. fwimg[BFI_BOOT_TYPE_OFF / sizeof(u32)] = bfa_os_swap32(boot_type);
  1116. fwimg[BFI_BOOT_PARAM_OFF / sizeof(u32)] =
  1117. bfa_os_swap32(boot_param);
  1118. pgnum = bfa_ioc_smem_pgnum(ioc, loff);
  1119. pgoff = bfa_ioc_smem_pgoff(ioc, loff);
  1120. bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
  1121. for (i = 0; i < bfa_ioc_fwimg_get_size(ioc); i++) {
  1122. if (BFA_FLASH_CHUNK_NO(i) != chunkno) {
  1123. chunkno = BFA_FLASH_CHUNK_NO(i);
  1124. fwimg = bfa_ioc_fwimg_get_chunk(ioc,
  1125. BFA_FLASH_CHUNK_ADDR(chunkno));
  1126. }
  1127. /**
  1128. * write smem
  1129. */
  1130. bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
  1131. fwimg[BFA_FLASH_OFFSET_IN_CHUNK(i)]);
  1132. loff += sizeof(u32);
  1133. /**
  1134. * handle page offset wrap around
  1135. */
  1136. loff = PSS_SMEM_PGOFF(loff);
  1137. if (loff == 0) {
  1138. pgnum++;
  1139. bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
  1140. }
  1141. }
  1142. bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
  1143. bfa_ioc_smem_pgnum(ioc, 0));
  1144. }
  1145. static void
  1146. bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
  1147. {
  1148. bfa_ioc_hwinit(ioc, force);
  1149. }
  1150. /**
  1151. * Update BFA configuration from firmware configuration.
  1152. */
  1153. static void
  1154. bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
  1155. {
  1156. struct bfi_ioc_attr_s *attr = ioc->attr;
  1157. attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop);
  1158. attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize);
  1159. bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
  1160. }
  1161. /**
  1162. * Attach time initialization of mbox logic.
  1163. */
  1164. static void
  1165. bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
  1166. {
  1167. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1168. int mc;
  1169. INIT_LIST_HEAD(&mod->cmd_q);
  1170. for (mc = 0; mc < BFI_MC_MAX; mc++) {
  1171. mod->mbhdlr[mc].cbfn = NULL;
  1172. mod->mbhdlr[mc].cbarg = ioc->bfa;
  1173. }
  1174. }
  1175. /**
  1176. * Mbox poll timer -- restarts any pending mailbox requests.
  1177. */
  1178. static void
  1179. bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
  1180. {
  1181. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1182. struct bfa_mbox_cmd_s *cmd;
  1183. u32 stat;
  1184. /**
  1185. * If no command pending, do nothing
  1186. */
  1187. if (list_empty(&mod->cmd_q))
  1188. return;
  1189. /**
  1190. * If previous command is not yet fetched by firmware, do nothing
  1191. */
  1192. stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
  1193. if (stat)
  1194. return;
  1195. /**
  1196. * Enqueue command to firmware.
  1197. */
  1198. bfa_q_deq(&mod->cmd_q, &cmd);
  1199. bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
  1200. }
  1201. /**
  1202. * Cleanup any pending requests.
  1203. */
  1204. static void
  1205. bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
  1206. {
  1207. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1208. struct bfa_mbox_cmd_s *cmd;
  1209. while (!list_empty(&mod->cmd_q))
  1210. bfa_q_deq(&mod->cmd_q, &cmd);
  1211. }
  1212. /**
  1213. * Initialize IOC to port mapping.
  1214. */
  1215. #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
  1216. static void
  1217. bfa_ioc_map_port(struct bfa_ioc_s *ioc)
  1218. {
  1219. bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
  1220. u32 r32;
  1221. /**
  1222. * For crossbow, port id is same as pci function.
  1223. */
  1224. if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_CT) {
  1225. ioc->port_id = bfa_ioc_pcifn(ioc);
  1226. return;
  1227. }
  1228. /**
  1229. * For catapult, base port id on personality register and IOC type
  1230. */
  1231. r32 = bfa_reg_read(rb + FNC_PERS_REG);
  1232. r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
  1233. ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
  1234. bfa_trc(ioc, bfa_ioc_pcifn(ioc));
  1235. bfa_trc(ioc, ioc->port_id);
  1236. }
  1237. /**
  1238. * bfa_ioc_public
  1239. */
  1240. /**
  1241. * Set interrupt mode for a function: INTX or MSIX
  1242. */
  1243. void
  1244. bfa_ioc_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
  1245. {
  1246. bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
  1247. u32 r32, mode;
  1248. r32 = bfa_reg_read(rb + FNC_PERS_REG);
  1249. bfa_trc(ioc, r32);
  1250. mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
  1251. __F0_INTX_STATUS;
  1252. /**
  1253. * If already in desired mode, do not change anything
  1254. */
  1255. if (!msix && mode)
  1256. return;
  1257. if (msix)
  1258. mode = __F0_INTX_STATUS_MSIX;
  1259. else
  1260. mode = __F0_INTX_STATUS_INTA;
  1261. r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  1262. r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
  1263. bfa_trc(ioc, r32);
  1264. bfa_reg_write(rb + FNC_PERS_REG, r32);
  1265. }
  1266. bfa_status_t
  1267. bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
  1268. {
  1269. bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
  1270. u32 pll_sclk, pll_fclk, r32;
  1271. if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) {
  1272. pll_sclk =
  1273. __APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN |
  1274. __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(0U) |
  1275. __APP_PLL_312_JITLMT0_1(3U) |
  1276. __APP_PLL_312_CNTLMT0_1(1U);
  1277. pll_fclk =
  1278. __APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN |
  1279. __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(0U) |
  1280. __APP_PLL_425_JITLMT0_1(3U) |
  1281. __APP_PLL_425_CNTLMT0_1(1U);
  1282. /**
  1283. * For catapult, choose operational mode FC/FCoE
  1284. */
  1285. if (ioc->fcmode) {
  1286. bfa_reg_write((rb + OP_MODE), 0);
  1287. bfa_reg_write((rb + ETH_MAC_SER_REG),
  1288. __APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2
  1289. | __APP_EMS_CHANNEL_SEL);
  1290. } else {
  1291. ioc->pllinit = BFA_TRUE;
  1292. bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
  1293. bfa_reg_write((rb + ETH_MAC_SER_REG),
  1294. __APP_EMS_REFCKBUFEN1);
  1295. }
  1296. } else {
  1297. pll_sclk =
  1298. __APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN |
  1299. __APP_PLL_312_P0_1(3U) | __APP_PLL_312_JITLMT0_1(3U) |
  1300. __APP_PLL_312_CNTLMT0_1(3U);
  1301. pll_fclk =
  1302. __APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN |
  1303. __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
  1304. __APP_PLL_425_JITLMT0_1(3U) |
  1305. __APP_PLL_425_CNTLMT0_1(3U);
  1306. }
  1307. bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
  1308. bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
  1309. bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
  1310. bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
  1311. bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
  1312. bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
  1313. bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
  1314. bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
  1315. bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
  1316. __APP_PLL_312_LOGIC_SOFT_RESET);
  1317. bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
  1318. __APP_PLL_312_BYPASS | __APP_PLL_312_LOGIC_SOFT_RESET);
  1319. bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
  1320. __APP_PLL_425_LOGIC_SOFT_RESET);
  1321. bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
  1322. __APP_PLL_425_BYPASS | __APP_PLL_425_LOGIC_SOFT_RESET);
  1323. bfa_os_udelay(2);
  1324. bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
  1325. __APP_PLL_312_LOGIC_SOFT_RESET);
  1326. bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
  1327. __APP_PLL_425_LOGIC_SOFT_RESET);
  1328. bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
  1329. pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET);
  1330. bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
  1331. pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET);
  1332. /**
  1333. * Wait for PLLs to lock.
  1334. */
  1335. bfa_os_udelay(2000);
  1336. bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
  1337. bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
  1338. bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk);
  1339. bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk);
  1340. if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) {
  1341. bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
  1342. bfa_os_udelay(1000);
  1343. r32 = bfa_reg_read((rb + MBIST_STAT_REG));
  1344. bfa_trc(ioc, r32);
  1345. }
  1346. return BFA_STATUS_OK;
  1347. }
  1348. /**
  1349. * Interface used by diag module to do firmware boot with memory test
  1350. * as the entry vector.
  1351. */
  1352. void
  1353. bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param)
  1354. {
  1355. bfa_os_addr_t rb;
  1356. bfa_ioc_stats(ioc, ioc_boots);
  1357. if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
  1358. return;
  1359. /**
  1360. * Initialize IOC state of all functions on a chip reset.
  1361. */
  1362. rb = ioc->pcidev.pci_bar_kva;
  1363. if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
  1364. bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST);
  1365. bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST);
  1366. } else {
  1367. bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_INITING);
  1368. bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING);
  1369. }
  1370. bfa_ioc_download_fw(ioc, boot_type, boot_param);
  1371. /**
  1372. * Enable interrupts just before starting LPU
  1373. */
  1374. ioc->cbfn->reset_cbfn(ioc->bfa);
  1375. bfa_ioc_lpu_start(ioc);
  1376. }
  1377. /**
  1378. * Enable/disable IOC failure auto recovery.
  1379. */
  1380. void
  1381. bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
  1382. {
  1383. bfa_auto_recover = BFA_FALSE;
  1384. }
  1385. bfa_boolean_t
  1386. bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
  1387. {
  1388. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
  1389. }
  1390. void
  1391. bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
  1392. {
  1393. u32 *msgp = mbmsg;
  1394. u32 r32;
  1395. int i;
  1396. /**
  1397. * read the MBOX msg
  1398. */
  1399. for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
  1400. i++) {
  1401. r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox +
  1402. i * sizeof(u32));
  1403. msgp[i] = bfa_os_htonl(r32);
  1404. }
  1405. /**
  1406. * turn off mailbox interrupt by clearing mailbox status
  1407. */
  1408. bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
  1409. bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
  1410. }
  1411. void
  1412. bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
  1413. {
  1414. union bfi_ioc_i2h_msg_u *msg;
  1415. msg = (union bfi_ioc_i2h_msg_u *)m;
  1416. bfa_ioc_stats(ioc, ioc_isrs);
  1417. switch (msg->mh.msg_id) {
  1418. case BFI_IOC_I2H_HBEAT:
  1419. break;
  1420. case BFI_IOC_I2H_READY_EVENT:
  1421. bfa_fsm_send_event(ioc, IOC_E_FWREADY);
  1422. break;
  1423. case BFI_IOC_I2H_ENABLE_REPLY:
  1424. bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
  1425. break;
  1426. case BFI_IOC_I2H_DISABLE_REPLY:
  1427. bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
  1428. break;
  1429. case BFI_IOC_I2H_GETATTR_REPLY:
  1430. bfa_ioc_getattr_reply(ioc);
  1431. break;
  1432. default:
  1433. bfa_trc(ioc, msg->mh.msg_id);
  1434. bfa_assert(0);
  1435. }
  1436. }
  1437. /**
  1438. * IOC attach time initialization and setup.
  1439. *
  1440. * @param[in] ioc memory for IOC
  1441. * @param[in] bfa driver instance structure
  1442. * @param[in] trcmod kernel trace module
  1443. * @param[in] aen kernel aen event module
  1444. * @param[in] logm kernel logging module
  1445. */
  1446. void
  1447. bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
  1448. struct bfa_timer_mod_s *timer_mod, struct bfa_trc_mod_s *trcmod,
  1449. struct bfa_aen_s *aen, struct bfa_log_mod_s *logm)
  1450. {
  1451. ioc->bfa = bfa;
  1452. ioc->cbfn = cbfn;
  1453. ioc->timer_mod = timer_mod;
  1454. ioc->trcmod = trcmod;
  1455. ioc->aen = aen;
  1456. ioc->logm = logm;
  1457. ioc->fcmode = BFA_FALSE;
  1458. ioc->pllinit = BFA_FALSE;
  1459. ioc->dbg_fwsave_once = BFA_TRUE;
  1460. bfa_ioc_mbox_attach(ioc);
  1461. INIT_LIST_HEAD(&ioc->hb_notify_q);
  1462. bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
  1463. }
  1464. /**
  1465. * Driver detach time IOC cleanup.
  1466. */
  1467. void
  1468. bfa_ioc_detach(struct bfa_ioc_s *ioc)
  1469. {
  1470. bfa_fsm_send_event(ioc, IOC_E_DETACH);
  1471. }
  1472. /**
  1473. * Setup IOC PCI properties.
  1474. *
  1475. * @param[in] pcidev PCI device information for this IOC
  1476. */
  1477. void
  1478. bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
  1479. enum bfi_mclass mc)
  1480. {
  1481. ioc->ioc_mc = mc;
  1482. ioc->pcidev = *pcidev;
  1483. ioc->ctdev = (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT);
  1484. ioc->cna = ioc->ctdev && !ioc->fcmode;
  1485. bfa_ioc_map_port(ioc);
  1486. bfa_ioc_reg_init(ioc);
  1487. }
  1488. /**
  1489. * Initialize IOC dma memory
  1490. *
  1491. * @param[in] dm_kva kernel virtual address of IOC dma memory
  1492. * @param[in] dm_pa physical address of IOC dma memory
  1493. */
  1494. void
  1495. bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
  1496. {
  1497. /**
  1498. * dma memory for firmware attribute
  1499. */
  1500. ioc->attr_dma.kva = dm_kva;
  1501. ioc->attr_dma.pa = dm_pa;
  1502. ioc->attr = (struct bfi_ioc_attr_s *)dm_kva;
  1503. }
  1504. /**
  1505. * Return size of dma memory required.
  1506. */
  1507. u32
  1508. bfa_ioc_meminfo(void)
  1509. {
  1510. return BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
  1511. }
  1512. void
  1513. bfa_ioc_enable(struct bfa_ioc_s *ioc)
  1514. {
  1515. bfa_ioc_stats(ioc, ioc_enables);
  1516. ioc->dbg_fwsave_once = BFA_TRUE;
  1517. bfa_fsm_send_event(ioc, IOC_E_ENABLE);
  1518. }
  1519. void
  1520. bfa_ioc_disable(struct bfa_ioc_s *ioc)
  1521. {
  1522. bfa_ioc_stats(ioc, ioc_disables);
  1523. bfa_fsm_send_event(ioc, IOC_E_DISABLE);
  1524. }
  1525. /**
  1526. * Returns memory required for saving firmware trace in case of crash.
  1527. * Driver must call this interface to allocate memory required for
  1528. * automatic saving of firmware trace. Driver should call
  1529. * bfa_ioc_debug_memclaim() right after bfa_ioc_attach() to setup this
  1530. * trace memory.
  1531. */
  1532. int
  1533. bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
  1534. {
  1535. return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
  1536. }
  1537. /**
  1538. * Initialize memory for saving firmware trace. Driver must initialize
  1539. * trace memory before call bfa_ioc_enable().
  1540. */
  1541. void
  1542. bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
  1543. {
  1544. bfa_assert(ioc->auto_recover);
  1545. ioc->dbg_fwsave = dbg_fwsave;
  1546. ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover);
  1547. }
  1548. u32
  1549. bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr)
  1550. {
  1551. return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
  1552. }
  1553. u32
  1554. bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
  1555. {
  1556. return PSS_SMEM_PGOFF(fmaddr);
  1557. }
  1558. /**
  1559. * Register mailbox message handler functions
  1560. *
  1561. * @param[in] ioc IOC instance
  1562. * @param[in] mcfuncs message class handler functions
  1563. */
  1564. void
  1565. bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
  1566. {
  1567. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1568. int mc;
  1569. for (mc = 0; mc < BFI_MC_MAX; mc++)
  1570. mod->mbhdlr[mc].cbfn = mcfuncs[mc];
  1571. }
  1572. /**
  1573. * Register mailbox message handler function, to be called by common modules
  1574. */
  1575. void
  1576. bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
  1577. bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
  1578. {
  1579. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1580. mod->mbhdlr[mc].cbfn = cbfn;
  1581. mod->mbhdlr[mc].cbarg = cbarg;
  1582. }
  1583. /**
  1584. * Queue a mailbox command request to firmware. Waits if mailbox is busy.
  1585. * Responsibility of caller to serialize
  1586. *
  1587. * @param[in] ioc IOC instance
  1588. * @param[i] cmd Mailbox command
  1589. */
  1590. void
  1591. bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
  1592. {
  1593. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1594. u32 stat;
  1595. /**
  1596. * If a previous command is pending, queue new command
  1597. */
  1598. if (!list_empty(&mod->cmd_q)) {
  1599. list_add_tail(&cmd->qe, &mod->cmd_q);
  1600. return;
  1601. }
  1602. /**
  1603. * If mailbox is busy, queue command for poll timer
  1604. */
  1605. stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
  1606. if (stat) {
  1607. list_add_tail(&cmd->qe, &mod->cmd_q);
  1608. return;
  1609. }
  1610. /**
  1611. * mailbox is free -- queue command to firmware
  1612. */
  1613. bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
  1614. }
  1615. /**
  1616. * Handle mailbox interrupts
  1617. */
  1618. void
  1619. bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
  1620. {
  1621. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1622. struct bfi_mbmsg_s m;
  1623. int mc;
  1624. bfa_ioc_msgget(ioc, &m);
  1625. /**
  1626. * Treat IOC message class as special.
  1627. */
  1628. mc = m.mh.msg_class;
  1629. if (mc == BFI_MC_IOC) {
  1630. bfa_ioc_isr(ioc, &m);
  1631. return;
  1632. }
  1633. if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
  1634. return;
  1635. mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
  1636. }
  1637. void
  1638. bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
  1639. {
  1640. bfa_fsm_send_event(ioc, IOC_E_HWERROR);
  1641. }
  1642. #ifndef BFA_BIOS_BUILD
  1643. /**
  1644. * return true if IOC is disabled
  1645. */
  1646. bfa_boolean_t
  1647. bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
  1648. {
  1649. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling)
  1650. || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
  1651. }
  1652. /**
  1653. * return true if IOC firmware is different.
  1654. */
  1655. bfa_boolean_t
  1656. bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
  1657. {
  1658. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset)
  1659. || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck)
  1660. || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch);
  1661. }
  1662. #define bfa_ioc_state_disabled(__sm) \
  1663. (((__sm) == BFI_IOC_UNINIT) || \
  1664. ((__sm) == BFI_IOC_INITING) || \
  1665. ((__sm) == BFI_IOC_HWINIT) || \
  1666. ((__sm) == BFI_IOC_DISABLED) || \
  1667. ((__sm) == BFI_IOC_HBFAIL) || \
  1668. ((__sm) == BFI_IOC_CFG_DISABLED))
  1669. /**
  1670. * Check if adapter is disabled -- both IOCs should be in a disabled
  1671. * state.
  1672. */
  1673. bfa_boolean_t
  1674. bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
  1675. {
  1676. u32 ioc_state;
  1677. bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
  1678. if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
  1679. return BFA_FALSE;
  1680. ioc_state = bfa_reg_read(rb + BFA_IOC0_STATE_REG);
  1681. if (!bfa_ioc_state_disabled(ioc_state))
  1682. return BFA_FALSE;
  1683. ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG);
  1684. if (!bfa_ioc_state_disabled(ioc_state))
  1685. return BFA_FALSE;
  1686. return BFA_TRUE;
  1687. }
  1688. /**
  1689. * Add to IOC heartbeat failure notification queue. To be used by common
  1690. * modules such as
  1691. */
  1692. void
  1693. bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
  1694. struct bfa_ioc_hbfail_notify_s *notify)
  1695. {
  1696. list_add_tail(&notify->qe, &ioc->hb_notify_q);
  1697. }
  1698. #define BFA_MFG_NAME "Brocade"
  1699. void
  1700. bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
  1701. struct bfa_adapter_attr_s *ad_attr)
  1702. {
  1703. struct bfi_ioc_attr_s *ioc_attr;
  1704. char model[BFA_ADAPTER_MODEL_NAME_LEN];
  1705. ioc_attr = ioc->attr;
  1706. bfa_os_memcpy((void *)&ad_attr->serial_num,
  1707. (void *)ioc_attr->brcd_serialnum,
  1708. BFA_ADAPTER_SERIAL_NUM_LEN);
  1709. bfa_os_memcpy(&ad_attr->fw_ver, ioc_attr->fw_version, BFA_VERSION_LEN);
  1710. bfa_os_memcpy(&ad_attr->optrom_ver, ioc_attr->optrom_version,
  1711. BFA_VERSION_LEN);
  1712. bfa_os_memcpy(&ad_attr->manufacturer, BFA_MFG_NAME,
  1713. BFA_ADAPTER_MFG_NAME_LEN);
  1714. bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd,
  1715. sizeof(struct bfa_mfg_vpd_s));
  1716. ad_attr->nports = BFI_ADAPTER_GETP(NPORTS, ioc_attr->adapter_prop);
  1717. ad_attr->max_speed = BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop);
  1718. /**
  1719. * model name
  1720. */
  1721. if (BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop) == 10) {
  1722. strcpy(model, "BR-10?0");
  1723. model[5] = '0' + ad_attr->nports;
  1724. } else {
  1725. strcpy(model, "Brocade-??5");
  1726. model[8] =
  1727. '0' + BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop);
  1728. model[9] = '0' + ad_attr->nports;
  1729. }
  1730. if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
  1731. ad_attr->prototype = 1;
  1732. else
  1733. ad_attr->prototype = 0;
  1734. bfa_os_memcpy(&ad_attr->model, model, BFA_ADAPTER_MODEL_NAME_LEN);
  1735. bfa_os_memcpy(&ad_attr->model_descr, &ad_attr->model,
  1736. BFA_ADAPTER_MODEL_NAME_LEN);
  1737. ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
  1738. ad_attr->mac = bfa_ioc_get_mac(ioc);
  1739. ad_attr->pcie_gen = ioc_attr->pcie_gen;
  1740. ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
  1741. ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
  1742. ad_attr->asic_rev = ioc_attr->asic_rev;
  1743. ad_attr->hw_ver[0] = 'R';
  1744. ad_attr->hw_ver[1] = 'e';
  1745. ad_attr->hw_ver[2] = 'v';
  1746. ad_attr->hw_ver[3] = '-';
  1747. ad_attr->hw_ver[4] = ioc_attr->asic_rev;
  1748. ad_attr->hw_ver[5] = '\0';
  1749. ad_attr->cna_capable = ioc->cna;
  1750. }
  1751. void
  1752. bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
  1753. {
  1754. bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
  1755. ioc_attr->state = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
  1756. ioc_attr->port_id = ioc->port_id;
  1757. if (!ioc->ctdev)
  1758. ioc_attr->ioc_type = BFA_IOC_TYPE_FC;
  1759. else if (ioc->ioc_mc == BFI_MC_IOCFC)
  1760. ioc_attr->ioc_type = BFA_IOC_TYPE_FCoE;
  1761. else if (ioc->ioc_mc == BFI_MC_LL)
  1762. ioc_attr->ioc_type = BFA_IOC_TYPE_LL;
  1763. bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
  1764. ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
  1765. ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
  1766. ioc_attr->pci_attr.chip_rev[0] = 'R';
  1767. ioc_attr->pci_attr.chip_rev[1] = 'e';
  1768. ioc_attr->pci_attr.chip_rev[2] = 'v';
  1769. ioc_attr->pci_attr.chip_rev[3] = '-';
  1770. ioc_attr->pci_attr.chip_rev[4] = ioc_attr->adapter_attr.asic_rev;
  1771. ioc_attr->pci_attr.chip_rev[5] = '\0';
  1772. }
  1773. /**
  1774. * hal_wwn_public
  1775. */
  1776. wwn_t
  1777. bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc)
  1778. {
  1779. union {
  1780. wwn_t wwn;
  1781. u8 byte[sizeof(wwn_t)];
  1782. }
  1783. w;
  1784. w.wwn = ioc->attr->mfg_wwn;
  1785. if (bfa_ioc_portid(ioc) == 1)
  1786. w.byte[7]++;
  1787. return w.wwn;
  1788. }
  1789. wwn_t
  1790. bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc)
  1791. {
  1792. union {
  1793. wwn_t wwn;
  1794. u8 byte[sizeof(wwn_t)];
  1795. }
  1796. w;
  1797. w.wwn = ioc->attr->mfg_wwn;
  1798. if (bfa_ioc_portid(ioc) == 1)
  1799. w.byte[7]++;
  1800. w.byte[0] = 0x20;
  1801. return w.wwn;
  1802. }
  1803. wwn_t
  1804. bfa_ioc_get_wwn_naa5(struct bfa_ioc_s *ioc, u16 inst)
  1805. {
  1806. union {
  1807. wwn_t wwn;
  1808. u8 byte[sizeof(wwn_t)];
  1809. }
  1810. w , w5;
  1811. bfa_trc(ioc, inst);
  1812. w.wwn = ioc->attr->mfg_wwn;
  1813. w5.byte[0] = 0x50 | w.byte[2] >> 4;
  1814. w5.byte[1] = w.byte[2] << 4 | w.byte[3] >> 4;
  1815. w5.byte[2] = w.byte[3] << 4 | w.byte[4] >> 4;
  1816. w5.byte[3] = w.byte[4] << 4 | w.byte[5] >> 4;
  1817. w5.byte[4] = w.byte[5] << 4 | w.byte[6] >> 4;
  1818. w5.byte[5] = w.byte[6] << 4 | w.byte[7] >> 4;
  1819. w5.byte[6] = w.byte[7] << 4 | (inst & 0x0f00) >> 8;
  1820. w5.byte[7] = (inst & 0xff);
  1821. return w5.wwn;
  1822. }
  1823. u64
  1824. bfa_ioc_get_adid(struct bfa_ioc_s *ioc)
  1825. {
  1826. return ioc->attr->mfg_wwn;
  1827. }
  1828. mac_t
  1829. bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
  1830. {
  1831. mac_t mac;
  1832. mac = ioc->attr->mfg_mac;
  1833. mac.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
  1834. return mac;
  1835. }
  1836. void
  1837. bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
  1838. {
  1839. ioc->fcmode = BFA_TRUE;
  1840. ioc->port_id = bfa_ioc_pcifn(ioc);
  1841. }
  1842. bfa_boolean_t
  1843. bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
  1844. {
  1845. return ioc->fcmode || (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_CT);
  1846. }
  1847. /**
  1848. * Return true if interrupt should be claimed.
  1849. */
  1850. bfa_boolean_t
  1851. bfa_ioc_intx_claim(struct bfa_ioc_s *ioc)
  1852. {
  1853. u32 isr, msk;
  1854. /**
  1855. * Always claim if not catapult.
  1856. */
  1857. if (!ioc->ctdev)
  1858. return BFA_TRUE;
  1859. /**
  1860. * FALSE if next device is claiming interrupt.
  1861. * TRUE if next device is not interrupting or not present.
  1862. */
  1863. msk = bfa_reg_read(ioc->ioc_regs.shirq_msk_next);
  1864. isr = bfa_reg_read(ioc->ioc_regs.shirq_isr_next);
  1865. return !(isr & ~msk);
  1866. }
  1867. /**
  1868. * Send AEN notification
  1869. */
  1870. static void
  1871. bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
  1872. {
  1873. union bfa_aen_data_u aen_data;
  1874. struct bfa_log_mod_s *logmod = ioc->logm;
  1875. s32 inst_num = 0;
  1876. struct bfa_ioc_attr_s ioc_attr;
  1877. switch (event) {
  1878. case BFA_IOC_AEN_HBGOOD:
  1879. bfa_log(logmod, BFA_AEN_IOC_HBGOOD, inst_num);
  1880. break;
  1881. case BFA_IOC_AEN_HBFAIL:
  1882. bfa_log(logmod, BFA_AEN_IOC_HBFAIL, inst_num);
  1883. break;
  1884. case BFA_IOC_AEN_ENABLE:
  1885. bfa_log(logmod, BFA_AEN_IOC_ENABLE, inst_num);
  1886. break;
  1887. case BFA_IOC_AEN_DISABLE:
  1888. bfa_log(logmod, BFA_AEN_IOC_DISABLE, inst_num);
  1889. break;
  1890. case BFA_IOC_AEN_FWMISMATCH:
  1891. bfa_log(logmod, BFA_AEN_IOC_FWMISMATCH, inst_num);
  1892. break;
  1893. default:
  1894. break;
  1895. }
  1896. memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn));
  1897. memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac));
  1898. bfa_ioc_get_attr(ioc, &ioc_attr);
  1899. switch (ioc_attr.ioc_type) {
  1900. case BFA_IOC_TYPE_FC:
  1901. aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
  1902. break;
  1903. case BFA_IOC_TYPE_FCoE:
  1904. aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
  1905. aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
  1906. break;
  1907. case BFA_IOC_TYPE_LL:
  1908. aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
  1909. break;
  1910. default:
  1911. bfa_assert(ioc_attr.ioc_type == BFA_IOC_TYPE_FC);
  1912. break;
  1913. }
  1914. aen_data.ioc.ioc_type = ioc_attr.ioc_type;
  1915. }
  1916. /**
  1917. * Retrieve saved firmware trace from a prior IOC failure.
  1918. */
  1919. bfa_status_t
  1920. bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
  1921. {
  1922. int tlen;
  1923. if (ioc->dbg_fwsave_len == 0)
  1924. return BFA_STATUS_ENOFSAVE;
  1925. tlen = *trclen;
  1926. if (tlen > ioc->dbg_fwsave_len)
  1927. tlen = ioc->dbg_fwsave_len;
  1928. bfa_os_memcpy(trcdata, ioc->dbg_fwsave, tlen);
  1929. *trclen = tlen;
  1930. return BFA_STATUS_OK;
  1931. }
  1932. /**
  1933. * Retrieve saved firmware trace from a prior IOC failure.
  1934. */
  1935. bfa_status_t
  1936. bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
  1937. {
  1938. u32 pgnum;
  1939. u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
  1940. int i, tlen;
  1941. u32 *tbuf = trcdata, r32;
  1942. bfa_trc(ioc, *trclen);
  1943. pgnum = bfa_ioc_smem_pgnum(ioc, loff);
  1944. loff = bfa_ioc_smem_pgoff(ioc, loff);
  1945. bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
  1946. tlen = *trclen;
  1947. if (tlen > BFA_DBG_FWTRC_LEN)
  1948. tlen = BFA_DBG_FWTRC_LEN;
  1949. tlen /= sizeof(u32);
  1950. bfa_trc(ioc, tlen);
  1951. for (i = 0; i < tlen; i++) {
  1952. r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
  1953. tbuf[i] = bfa_os_ntohl(r32);
  1954. loff += sizeof(u32);
  1955. /**
  1956. * handle page offset wrap around
  1957. */
  1958. loff = PSS_SMEM_PGOFF(loff);
  1959. if (loff == 0) {
  1960. pgnum++;
  1961. bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
  1962. }
  1963. }
  1964. bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
  1965. bfa_ioc_smem_pgnum(ioc, 0));
  1966. bfa_trc(ioc, pgnum);
  1967. *trclen = tlen * sizeof(u32);
  1968. return BFA_STATUS_OK;
  1969. }
  1970. /**
  1971. * Save firmware trace if configured.
  1972. */
  1973. static void
  1974. bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
  1975. {
  1976. int tlen;
  1977. if (ioc->dbg_fwsave_len) {
  1978. tlen = ioc->dbg_fwsave_len;
  1979. bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
  1980. }
  1981. }
  1982. /**
  1983. * Firmware failure detected. Start recovery actions.
  1984. */
  1985. static void
  1986. bfa_ioc_recover(struct bfa_ioc_s *ioc)
  1987. {
  1988. if (ioc->dbg_fwsave_once) {
  1989. ioc->dbg_fwsave_once = BFA_FALSE;
  1990. bfa_ioc_debug_save(ioc);
  1991. }
  1992. bfa_ioc_stats(ioc, ioc_hbfails);
  1993. bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
  1994. }
  1995. #else
  1996. static void
  1997. bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
  1998. {
  1999. }
  2000. static void
  2001. bfa_ioc_recover(struct bfa_ioc_s *ioc)
  2002. {
  2003. bfa_assert(0);
  2004. }
  2005. #endif