bfa_ioc.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include "bfad_drv.h"
  18. #include "bfa_ioc.h"
  19. #include "bfi_ctreg.h"
  20. #include "bfa_defs.h"
  21. #include "bfa_defs_svc.h"
  22. BFA_TRC_FILE(CNA, IOC);
  23. /*
  24. * IOC local definitions
  25. */
  26. #define BFA_IOC_TOV 3000 /* msecs */
  27. #define BFA_IOC_HWSEM_TOV 500 /* msecs */
  28. #define BFA_IOC_HB_TOV 500 /* msecs */
  29. #define BFA_IOC_HWINIT_MAX 2
  30. #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
  31. #define bfa_ioc_timer_start(__ioc) \
  32. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
  33. bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
  34. #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
  35. #define bfa_hb_timer_start(__ioc) \
  36. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
  37. bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
  38. #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
  39. #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
  40. /*
  41. * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
  42. */
  43. #define bfa_ioc_firmware_lock(__ioc) \
  44. ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
  45. #define bfa_ioc_firmware_unlock(__ioc) \
  46. ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
  47. #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
  48. #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
  49. #define bfa_ioc_notify_hbfail(__ioc) \
  50. ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
  51. #ifdef BFA_IOC_IS_UEFI
  52. #define bfa_ioc_is_bios_optrom(__ioc) (0)
  53. #define bfa_ioc_is_uefi(__ioc) BFA_IOC_IS_UEFI
  54. #else
  55. #define bfa_ioc_is_bios_optrom(__ioc) \
  56. (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
  57. #define bfa_ioc_is_uefi(__ioc) (0)
  58. #endif
  59. #define bfa_ioc_mbox_cmd_pending(__ioc) \
  60. (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
  61. readl((__ioc)->ioc_regs.hfn_mbox_cmd))
  62. bfa_boolean_t bfa_auto_recover = BFA_TRUE;
  63. /*
  64. * forward declarations
  65. */
  66. static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
  67. static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
  68. static void bfa_ioc_timeout(void *ioc);
  69. static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
  70. static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
  71. static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
  72. static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
  73. static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
  74. static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
  75. static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
  76. static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
  77. static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
  78. static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
  79. static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
  80. static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
  81. static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
  82. /*
  83. * IOC state machine definitions/declarations
  84. */
  85. enum ioc_event {
  86. IOC_E_RESET = 1, /* IOC reset request */
  87. IOC_E_ENABLE = 2, /* IOC enable request */
  88. IOC_E_DISABLE = 3, /* IOC disable request */
  89. IOC_E_DETACH = 4, /* driver detach cleanup */
  90. IOC_E_ENABLED = 5, /* f/w enabled */
  91. IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
  92. IOC_E_DISABLED = 7, /* f/w disabled */
  93. IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
  94. IOC_E_HBFAIL = 9, /* heartbeat failure */
  95. IOC_E_HWERROR = 10, /* hardware error interrupt */
  96. IOC_E_TIMEOUT = 11, /* timeout */
  97. };
  98. bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
  99. bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
  100. bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
  101. bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
  102. bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
  103. bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
  104. bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
  105. bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
  106. bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
  107. static struct bfa_sm_table_s ioc_sm_table[] = {
  108. {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
  109. {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
  110. {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
  111. {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
  112. {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
  113. {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
  114. {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
  115. {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
  116. {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
  117. };
  118. /*
  119. * IOCPF state machine definitions/declarations
  120. */
  121. #define bfa_iocpf_timer_start(__ioc) \
  122. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
  123. bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
  124. #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
  125. #define bfa_iocpf_recovery_timer_start(__ioc) \
  126. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
  127. bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER)
  128. #define bfa_sem_timer_start(__ioc) \
  129. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
  130. bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
  131. #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
  132. /*
  133. * Forward declareations for iocpf state machine
  134. */
  135. static void bfa_iocpf_timeout(void *ioc_arg);
  136. static void bfa_iocpf_sem_timeout(void *ioc_arg);
  137. /*
  138. * IOCPF state machine events
  139. */
  140. enum iocpf_event {
  141. IOCPF_E_ENABLE = 1, /* IOCPF enable request */
  142. IOCPF_E_DISABLE = 2, /* IOCPF disable request */
  143. IOCPF_E_STOP = 3, /* stop on driver detach */
  144. IOCPF_E_FWREADY = 4, /* f/w initialization done */
  145. IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
  146. IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
  147. IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
  148. IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
  149. IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
  150. IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
  151. IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
  152. };
  153. /*
  154. * IOCPF states
  155. */
  156. enum bfa_iocpf_state {
  157. BFA_IOCPF_RESET = 1, /* IOC is in reset state */
  158. BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
  159. BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
  160. BFA_IOCPF_READY = 4, /* IOCPF is initialized */
  161. BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
  162. BFA_IOCPF_FAIL = 6, /* IOCPF failed */
  163. BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
  164. BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
  165. BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
  166. };
  167. bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
  168. bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
  169. bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
  170. bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
  171. bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
  172. bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
  173. bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
  174. bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
  175. bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
  176. bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
  177. bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
  178. static struct bfa_sm_table_s iocpf_sm_table[] = {
  179. {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
  180. {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
  181. {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
  182. {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
  183. {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
  184. {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
  185. {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
  186. {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
  187. {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
  188. {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
  189. {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
  190. };
  191. /*
  192. * IOC State Machine
  193. */
  194. /*
  195. * Beginning state. IOC uninit state.
  196. */
  197. static void
  198. bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
  199. {
  200. }
  201. /*
  202. * IOC is in uninit state.
  203. */
  204. static void
  205. bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
  206. {
  207. bfa_trc(ioc, event);
  208. switch (event) {
  209. case IOC_E_RESET:
  210. bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
  211. break;
  212. default:
  213. bfa_sm_fault(ioc, event);
  214. }
  215. }
  216. /*
  217. * Reset entry actions -- initialize state machine
  218. */
  219. static void
  220. bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
  221. {
  222. bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
  223. }
  224. /*
  225. * IOC is in reset state.
  226. */
  227. static void
  228. bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
  229. {
  230. bfa_trc(ioc, event);
  231. switch (event) {
  232. case IOC_E_ENABLE:
  233. bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
  234. break;
  235. case IOC_E_DISABLE:
  236. bfa_ioc_disable_comp(ioc);
  237. break;
  238. case IOC_E_DETACH:
  239. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  240. break;
  241. default:
  242. bfa_sm_fault(ioc, event);
  243. }
  244. }
  245. static void
  246. bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
  247. {
  248. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
  249. }
  250. /*
  251. * Host IOC function is being enabled, awaiting response from firmware.
  252. * Semaphore is acquired.
  253. */
  254. static void
  255. bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
  256. {
  257. bfa_trc(ioc, event);
  258. switch (event) {
  259. case IOC_E_ENABLED:
  260. bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
  261. break;
  262. case IOC_E_PFFAILED:
  263. /* !!! fall through !!! */
  264. case IOC_E_HWERROR:
  265. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  266. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
  267. if (event != IOC_E_PFFAILED)
  268. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
  269. break;
  270. case IOC_E_DISABLE:
  271. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  272. break;
  273. case IOC_E_DETACH:
  274. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  275. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
  276. break;
  277. case IOC_E_ENABLE:
  278. break;
  279. default:
  280. bfa_sm_fault(ioc, event);
  281. }
  282. }
  283. static void
  284. bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
  285. {
  286. bfa_ioc_timer_start(ioc);
  287. bfa_ioc_send_getattr(ioc);
  288. }
  289. /*
  290. * IOC configuration in progress. Timer is active.
  291. */
  292. static void
  293. bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
  294. {
  295. bfa_trc(ioc, event);
  296. switch (event) {
  297. case IOC_E_FWRSP_GETATTR:
  298. bfa_ioc_timer_stop(ioc);
  299. bfa_ioc_check_attr_wwns(ioc);
  300. bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
  301. break;
  302. break;
  303. case IOC_E_PFFAILED:
  304. case IOC_E_HWERROR:
  305. bfa_ioc_timer_stop(ioc);
  306. /* !!! fall through !!! */
  307. case IOC_E_TIMEOUT:
  308. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  309. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
  310. if (event != IOC_E_PFFAILED)
  311. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
  312. break;
  313. case IOC_E_DISABLE:
  314. bfa_ioc_timer_stop(ioc);
  315. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  316. break;
  317. case IOC_E_ENABLE:
  318. break;
  319. default:
  320. bfa_sm_fault(ioc, event);
  321. }
  322. }
  323. static void
  324. bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
  325. {
  326. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  327. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
  328. bfa_ioc_hb_monitor(ioc);
  329. BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
  330. }
  331. static void
  332. bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
  333. {
  334. bfa_trc(ioc, event);
  335. switch (event) {
  336. case IOC_E_ENABLE:
  337. break;
  338. case IOC_E_DISABLE:
  339. bfa_hb_timer_stop(ioc);
  340. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  341. break;
  342. case IOC_E_PFFAILED:
  343. case IOC_E_HWERROR:
  344. bfa_hb_timer_stop(ioc);
  345. /* !!! fall through !!! */
  346. case IOC_E_HBFAIL:
  347. bfa_ioc_fail_notify(ioc);
  348. if (ioc->iocpf.auto_recover)
  349. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
  350. else
  351. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  352. if (event != IOC_E_PFFAILED)
  353. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
  354. break;
  355. default:
  356. bfa_sm_fault(ioc, event);
  357. }
  358. }
  359. static void
  360. bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
  361. {
  362. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  363. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
  364. BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
  365. }
  366. /*
  367. * IOC is being disabled
  368. */
  369. static void
  370. bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
  371. {
  372. bfa_trc(ioc, event);
  373. switch (event) {
  374. case IOC_E_DISABLED:
  375. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
  376. break;
  377. case IOC_E_HWERROR:
  378. /*
  379. * No state change. Will move to disabled state
  380. * after iocpf sm completes failure processing and
  381. * moves to disabled state.
  382. */
  383. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
  384. break;
  385. default:
  386. bfa_sm_fault(ioc, event);
  387. }
  388. }
  389. /*
  390. * IOC disable completion entry.
  391. */
  392. static void
  393. bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
  394. {
  395. bfa_ioc_disable_comp(ioc);
  396. }
  397. static void
  398. bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
  399. {
  400. bfa_trc(ioc, event);
  401. switch (event) {
  402. case IOC_E_ENABLE:
  403. bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
  404. break;
  405. case IOC_E_DISABLE:
  406. ioc->cbfn->disable_cbfn(ioc->bfa);
  407. break;
  408. case IOC_E_DETACH:
  409. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  410. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
  411. break;
  412. default:
  413. bfa_sm_fault(ioc, event);
  414. }
  415. }
  416. static void
  417. bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
  418. {
  419. bfa_trc(ioc, 0);
  420. }
  421. /*
  422. * Hardware initialization failed.
  423. */
  424. static void
  425. bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
  426. {
  427. bfa_trc(ioc, event);
  428. switch (event) {
  429. case IOC_E_ENABLED:
  430. bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
  431. break;
  432. case IOC_E_PFFAILED:
  433. case IOC_E_HWERROR:
  434. /*
  435. * Initialization retry failed.
  436. */
  437. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  438. if (event != IOC_E_PFFAILED)
  439. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
  440. break;
  441. case IOC_E_ENABLE:
  442. break;
  443. case IOC_E_DISABLE:
  444. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  445. break;
  446. case IOC_E_DETACH:
  447. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  448. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
  449. break;
  450. default:
  451. bfa_sm_fault(ioc, event);
  452. }
  453. }
  454. static void
  455. bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
  456. {
  457. bfa_trc(ioc, 0);
  458. }
  459. /*
  460. * IOC failure.
  461. */
  462. static void
  463. bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
  464. {
  465. bfa_trc(ioc, event);
  466. switch (event) {
  467. case IOC_E_ENABLE:
  468. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  469. break;
  470. case IOC_E_DISABLE:
  471. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  472. break;
  473. case IOC_E_HWERROR:
  474. /*
  475. * HB failure notification, ignore.
  476. */
  477. break;
  478. default:
  479. bfa_sm_fault(ioc, event);
  480. }
  481. }
  482. /*
  483. * IOCPF State Machine
  484. */
  485. /*
  486. * Reset entry actions -- initialize state machine
  487. */
  488. static void
  489. bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
  490. {
  491. iocpf->retry_count = 0;
  492. iocpf->auto_recover = bfa_auto_recover;
  493. }
  494. /*
  495. * Beginning state. IOC is in reset state.
  496. */
  497. static void
  498. bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  499. {
  500. struct bfa_ioc_s *ioc = iocpf->ioc;
  501. bfa_trc(ioc, event);
  502. switch (event) {
  503. case IOCPF_E_ENABLE:
  504. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
  505. break;
  506. case IOCPF_E_STOP:
  507. break;
  508. default:
  509. bfa_sm_fault(ioc, event);
  510. }
  511. }
  512. /*
  513. * Semaphore should be acquired for version check.
  514. */
  515. static void
  516. bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
  517. {
  518. bfa_ioc_hw_sem_get(iocpf->ioc);
  519. }
  520. /*
  521. * Awaiting h/w semaphore to continue with version check.
  522. */
  523. static void
  524. bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  525. {
  526. struct bfa_ioc_s *ioc = iocpf->ioc;
  527. bfa_trc(ioc, event);
  528. switch (event) {
  529. case IOCPF_E_SEMLOCKED:
  530. if (bfa_ioc_firmware_lock(ioc)) {
  531. iocpf->retry_count = 0;
  532. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  533. } else {
  534. writel(1, ioc->ioc_regs.ioc_sem_reg);
  535. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
  536. }
  537. break;
  538. case IOCPF_E_DISABLE:
  539. bfa_sem_timer_stop(ioc);
  540. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  541. bfa_fsm_send_event(ioc, IOC_E_DISABLED);
  542. break;
  543. case IOCPF_E_STOP:
  544. bfa_sem_timer_stop(ioc);
  545. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  546. break;
  547. default:
  548. bfa_sm_fault(ioc, event);
  549. }
  550. }
  551. /*
  552. * Notify enable completion callback.
  553. */
  554. static void
  555. bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
  556. {
  557. /*
  558. * Call only the first time sm enters fwmismatch state.
  559. */
  560. if (iocpf->retry_count == 0)
  561. bfa_ioc_pf_fwmismatch(iocpf->ioc);
  562. iocpf->retry_count++;
  563. bfa_iocpf_timer_start(iocpf->ioc);
  564. }
  565. /*
  566. * Awaiting firmware version match.
  567. */
  568. static void
  569. bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  570. {
  571. struct bfa_ioc_s *ioc = iocpf->ioc;
  572. bfa_trc(ioc, event);
  573. switch (event) {
  574. case IOCPF_E_TIMEOUT:
  575. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
  576. break;
  577. case IOCPF_E_DISABLE:
  578. bfa_iocpf_timer_stop(ioc);
  579. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  580. bfa_fsm_send_event(ioc, IOC_E_DISABLED);
  581. break;
  582. case IOCPF_E_STOP:
  583. bfa_iocpf_timer_stop(ioc);
  584. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  585. break;
  586. default:
  587. bfa_sm_fault(ioc, event);
  588. }
  589. }
  590. /*
  591. * Request for semaphore.
  592. */
  593. static void
  594. bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
  595. {
  596. bfa_ioc_hw_sem_get(iocpf->ioc);
  597. }
  598. /*
  599. * Awaiting semaphore for h/w initialzation.
  600. */
  601. static void
  602. bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  603. {
  604. struct bfa_ioc_s *ioc = iocpf->ioc;
  605. bfa_trc(ioc, event);
  606. switch (event) {
  607. case IOCPF_E_SEMLOCKED:
  608. iocpf->retry_count = 0;
  609. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  610. break;
  611. case IOCPF_E_DISABLE:
  612. bfa_sem_timer_stop(ioc);
  613. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  614. break;
  615. default:
  616. bfa_sm_fault(ioc, event);
  617. }
  618. }
  619. static void
  620. bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
  621. {
  622. bfa_iocpf_timer_start(iocpf->ioc);
  623. bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
  624. }
  625. /*
  626. * Hardware is being initialized. Interrupts are enabled.
  627. * Holding hardware semaphore lock.
  628. */
  629. static void
  630. bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  631. {
  632. struct bfa_ioc_s *ioc = iocpf->ioc;
  633. bfa_trc(ioc, event);
  634. switch (event) {
  635. case IOCPF_E_FWREADY:
  636. bfa_iocpf_timer_stop(ioc);
  637. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
  638. break;
  639. case IOCPF_E_INITFAIL:
  640. bfa_iocpf_timer_stop(ioc);
  641. /*
  642. * !!! fall through !!!
  643. */
  644. case IOCPF_E_TIMEOUT:
  645. iocpf->retry_count++;
  646. if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
  647. bfa_iocpf_timer_start(ioc);
  648. bfa_ioc_hwinit(ioc, BFA_TRUE);
  649. break;
  650. }
  651. writel(1, ioc->ioc_regs.ioc_sem_reg);
  652. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
  653. if (event == IOCPF_E_TIMEOUT)
  654. bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
  655. break;
  656. case IOCPF_E_DISABLE:
  657. writel(1, ioc->ioc_regs.ioc_sem_reg);
  658. bfa_iocpf_timer_stop(ioc);
  659. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  660. break;
  661. default:
  662. bfa_sm_fault(ioc, event);
  663. }
  664. }
  665. static void
  666. bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
  667. {
  668. bfa_iocpf_timer_start(iocpf->ioc);
  669. bfa_ioc_send_enable(iocpf->ioc);
  670. }
  671. /*
  672. * Host IOC function is being enabled, awaiting response from firmware.
  673. * Semaphore is acquired.
  674. */
  675. static void
  676. bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  677. {
  678. struct bfa_ioc_s *ioc = iocpf->ioc;
  679. bfa_trc(ioc, event);
  680. switch (event) {
  681. case IOCPF_E_FWRSP_ENABLE:
  682. bfa_iocpf_timer_stop(ioc);
  683. writel(1, ioc->ioc_regs.ioc_sem_reg);
  684. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
  685. break;
  686. case IOCPF_E_INITFAIL:
  687. bfa_iocpf_timer_stop(ioc);
  688. /*
  689. * !!! fall through !!!
  690. */
  691. case IOCPF_E_TIMEOUT:
  692. iocpf->retry_count++;
  693. if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
  694. writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
  695. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  696. break;
  697. }
  698. writel(1, ioc->ioc_regs.ioc_sem_reg);
  699. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
  700. if (event == IOCPF_E_TIMEOUT)
  701. bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
  702. break;
  703. case IOCPF_E_DISABLE:
  704. bfa_iocpf_timer_stop(ioc);
  705. writel(1, ioc->ioc_regs.ioc_sem_reg);
  706. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
  707. break;
  708. case IOCPF_E_FWREADY:
  709. bfa_ioc_send_enable(ioc);
  710. break;
  711. default:
  712. bfa_sm_fault(ioc, event);
  713. }
  714. }
  715. static void
  716. bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
  717. {
  718. bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
  719. }
  720. static void
  721. bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  722. {
  723. struct bfa_ioc_s *ioc = iocpf->ioc;
  724. bfa_trc(ioc, event);
  725. switch (event) {
  726. case IOCPF_E_DISABLE:
  727. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
  728. break;
  729. case IOCPF_E_GETATTRFAIL:
  730. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
  731. break;
  732. case IOCPF_E_FAIL:
  733. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  734. break;
  735. case IOCPF_E_FWREADY:
  736. if (bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op))
  737. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  738. else
  739. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
  740. bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
  741. break;
  742. default:
  743. bfa_sm_fault(ioc, event);
  744. }
  745. }
  746. static void
  747. bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
  748. {
  749. bfa_iocpf_timer_start(iocpf->ioc);
  750. bfa_ioc_send_disable(iocpf->ioc);
  751. }
  752. /*
  753. * IOC is being disabled
  754. */
  755. static void
  756. bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  757. {
  758. struct bfa_ioc_s *ioc = iocpf->ioc;
  759. bfa_trc(ioc, event);
  760. switch (event) {
  761. case IOCPF_E_FWRSP_DISABLE:
  762. case IOCPF_E_FWREADY:
  763. bfa_iocpf_timer_stop(ioc);
  764. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  765. break;
  766. case IOCPF_E_FAIL:
  767. bfa_iocpf_timer_stop(ioc);
  768. /*
  769. * !!! fall through !!!
  770. */
  771. case IOCPF_E_TIMEOUT:
  772. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  773. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  774. break;
  775. case IOCPF_E_FWRSP_ENABLE:
  776. break;
  777. default:
  778. bfa_sm_fault(ioc, event);
  779. }
  780. }
  781. /*
  782. * IOC disable completion entry.
  783. */
  784. static void
  785. bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
  786. {
  787. bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
  788. }
  789. static void
  790. bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  791. {
  792. struct bfa_ioc_s *ioc = iocpf->ioc;
  793. bfa_trc(ioc, event);
  794. switch (event) {
  795. case IOCPF_E_ENABLE:
  796. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  797. break;
  798. case IOCPF_E_STOP:
  799. bfa_ioc_firmware_unlock(ioc);
  800. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  801. break;
  802. default:
  803. bfa_sm_fault(ioc, event);
  804. }
  805. }
  806. static void
  807. bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
  808. {
  809. bfa_iocpf_timer_start(iocpf->ioc);
  810. }
  811. /*
  812. * Hardware initialization failed.
  813. */
  814. static void
  815. bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  816. {
  817. struct bfa_ioc_s *ioc = iocpf->ioc;
  818. bfa_trc(ioc, event);
  819. switch (event) {
  820. case IOCPF_E_DISABLE:
  821. bfa_iocpf_timer_stop(ioc);
  822. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  823. break;
  824. case IOCPF_E_STOP:
  825. bfa_iocpf_timer_stop(ioc);
  826. bfa_ioc_firmware_unlock(ioc);
  827. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  828. break;
  829. case IOCPF_E_TIMEOUT:
  830. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  831. break;
  832. default:
  833. bfa_sm_fault(ioc, event);
  834. }
  835. }
  836. static void
  837. bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
  838. {
  839. /*
  840. * Mark IOC as failed in hardware and stop firmware.
  841. */
  842. bfa_ioc_lpu_stop(iocpf->ioc);
  843. writel(BFI_IOC_FAIL, iocpf->ioc->ioc_regs.ioc_fwstate);
  844. /*
  845. * Notify other functions on HB failure.
  846. */
  847. bfa_ioc_notify_hbfail(iocpf->ioc);
  848. /*
  849. * Flush any queued up mailbox requests.
  850. */
  851. bfa_ioc_mbox_hbfail(iocpf->ioc);
  852. if (iocpf->auto_recover)
  853. bfa_iocpf_recovery_timer_start(iocpf->ioc);
  854. }
  855. /*
  856. * IOC is in failed state.
  857. */
  858. static void
  859. bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  860. {
  861. struct bfa_ioc_s *ioc = iocpf->ioc;
  862. bfa_trc(ioc, event);
  863. switch (event) {
  864. case IOCPF_E_DISABLE:
  865. if (iocpf->auto_recover)
  866. bfa_iocpf_timer_stop(ioc);
  867. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  868. break;
  869. case IOCPF_E_TIMEOUT:
  870. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  871. break;
  872. default:
  873. bfa_sm_fault(ioc, event);
  874. }
  875. }
  876. /*
  877. * BFA IOC private functions
  878. */
  879. static void
  880. bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
  881. {
  882. struct list_head *qe;
  883. struct bfa_ioc_hbfail_notify_s *notify;
  884. ioc->cbfn->disable_cbfn(ioc->bfa);
  885. /*
  886. * Notify common modules registered for notification.
  887. */
  888. list_for_each(qe, &ioc->hb_notify_q) {
  889. notify = (struct bfa_ioc_hbfail_notify_s *) qe;
  890. notify->cbfn(notify->cbarg);
  891. }
  892. }
  893. bfa_boolean_t
  894. bfa_ioc_sem_get(void __iomem *sem_reg)
  895. {
  896. u32 r32;
  897. int cnt = 0;
  898. #define BFA_SEM_SPINCNT 3000
  899. r32 = readl(sem_reg);
  900. while (r32 && (cnt < BFA_SEM_SPINCNT)) {
  901. cnt++;
  902. udelay(2);
  903. r32 = readl(sem_reg);
  904. }
  905. if (r32 == 0)
  906. return BFA_TRUE;
  907. bfa_assert(cnt < BFA_SEM_SPINCNT);
  908. return BFA_FALSE;
  909. }
  910. static void
  911. bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
  912. {
  913. u32 r32;
  914. /*
  915. * First read to the semaphore register will return 0, subsequent reads
  916. * will return 1. Semaphore is released by writing 1 to the register
  917. */
  918. r32 = readl(ioc->ioc_regs.ioc_sem_reg);
  919. if (r32 == 0) {
  920. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
  921. return;
  922. }
  923. bfa_sem_timer_start(ioc);
  924. }
  925. /*
  926. * Initialize LPU local memory (aka secondary memory / SRAM)
  927. */
  928. static void
  929. bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
  930. {
  931. u32 pss_ctl;
  932. int i;
  933. #define PSS_LMEM_INIT_TIME 10000
  934. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  935. pss_ctl &= ~__PSS_LMEM_RESET;
  936. pss_ctl |= __PSS_LMEM_INIT_EN;
  937. /*
  938. * i2c workaround 12.5khz clock
  939. */
  940. pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
  941. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  942. /*
  943. * wait for memory initialization to be complete
  944. */
  945. i = 0;
  946. do {
  947. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  948. i++;
  949. } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
  950. /*
  951. * If memory initialization is not successful, IOC timeout will catch
  952. * such failures.
  953. */
  954. bfa_assert(pss_ctl & __PSS_LMEM_INIT_DONE);
  955. bfa_trc(ioc, pss_ctl);
  956. pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
  957. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  958. }
  959. static void
  960. bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
  961. {
  962. u32 pss_ctl;
  963. /*
  964. * Take processor out of reset.
  965. */
  966. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  967. pss_ctl &= ~__PSS_LPU0_RESET;
  968. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  969. }
  970. static void
  971. bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
  972. {
  973. u32 pss_ctl;
  974. /*
  975. * Put processors in reset.
  976. */
  977. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  978. pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
  979. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  980. }
  981. /*
  982. * Get driver and firmware versions.
  983. */
  984. void
  985. bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
  986. {
  987. u32 pgnum, pgoff;
  988. u32 loff = 0;
  989. int i;
  990. u32 *fwsig = (u32 *) fwhdr;
  991. pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
  992. pgoff = PSS_SMEM_PGOFF(loff);
  993. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  994. for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
  995. i++) {
  996. fwsig[i] =
  997. bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
  998. loff += sizeof(u32);
  999. }
  1000. }
  1001. /*
  1002. * Returns TRUE if same.
  1003. */
  1004. bfa_boolean_t
  1005. bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
  1006. {
  1007. struct bfi_ioc_image_hdr_s *drv_fwhdr;
  1008. int i;
  1009. drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
  1010. bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
  1011. for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
  1012. if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
  1013. bfa_trc(ioc, i);
  1014. bfa_trc(ioc, fwhdr->md5sum[i]);
  1015. bfa_trc(ioc, drv_fwhdr->md5sum[i]);
  1016. return BFA_FALSE;
  1017. }
  1018. }
  1019. bfa_trc(ioc, fwhdr->md5sum[0]);
  1020. return BFA_TRUE;
  1021. }
  1022. /*
  1023. * Return true if current running version is valid. Firmware signature and
  1024. * execution context (driver/bios) must match.
  1025. */
  1026. static bfa_boolean_t
  1027. bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
  1028. {
  1029. struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
  1030. /*
  1031. * If bios/efi boot (flash based) -- return true
  1032. */
  1033. if (bfa_ioc_is_bios_optrom(ioc))
  1034. return BFA_TRUE;
  1035. bfa_ioc_fwver_get(ioc, &fwhdr);
  1036. drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
  1037. bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
  1038. if (fwhdr.signature != drv_fwhdr->signature) {
  1039. bfa_trc(ioc, fwhdr.signature);
  1040. bfa_trc(ioc, drv_fwhdr->signature);
  1041. return BFA_FALSE;
  1042. }
  1043. if (swab32(fwhdr.param) != boot_env) {
  1044. bfa_trc(ioc, fwhdr.param);
  1045. bfa_trc(ioc, boot_env);
  1046. return BFA_FALSE;
  1047. }
  1048. return bfa_ioc_fwver_cmp(ioc, &fwhdr);
  1049. }
  1050. /*
  1051. * Conditionally flush any pending message from firmware at start.
  1052. */
  1053. static void
  1054. bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
  1055. {
  1056. u32 r32;
  1057. r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
  1058. if (r32)
  1059. writel(1, ioc->ioc_regs.lpu_mbox_cmd);
  1060. }
  1061. static void
  1062. bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
  1063. {
  1064. enum bfi_ioc_state ioc_fwstate;
  1065. bfa_boolean_t fwvalid;
  1066. u32 boot_type;
  1067. u32 boot_env;
  1068. ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  1069. if (force)
  1070. ioc_fwstate = BFI_IOC_UNINIT;
  1071. bfa_trc(ioc, ioc_fwstate);
  1072. boot_type = BFI_BOOT_TYPE_NORMAL;
  1073. boot_env = BFI_BOOT_LOADER_OS;
  1074. /*
  1075. * Flash based firmware boot BIOS env.
  1076. */
  1077. if (bfa_ioc_is_bios_optrom(ioc)) {
  1078. boot_type = BFI_BOOT_TYPE_FLASH;
  1079. boot_env = BFI_BOOT_LOADER_BIOS;
  1080. }
  1081. /*
  1082. * Flash based firmware boot UEFI env.
  1083. */
  1084. if (bfa_ioc_is_uefi(ioc)) {
  1085. boot_type = BFI_BOOT_TYPE_FLASH;
  1086. boot_env = BFI_BOOT_LOADER_UEFI;
  1087. }
  1088. /*
  1089. * check if firmware is valid
  1090. */
  1091. fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
  1092. BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
  1093. if (!fwvalid) {
  1094. bfa_ioc_boot(ioc, boot_type, boot_env);
  1095. return;
  1096. }
  1097. /*
  1098. * If hardware initialization is in progress (initialized by other IOC),
  1099. * just wait for an initialization completion interrupt.
  1100. */
  1101. if (ioc_fwstate == BFI_IOC_INITING) {
  1102. ioc->cbfn->reset_cbfn(ioc->bfa);
  1103. return;
  1104. }
  1105. /*
  1106. * If IOC function is disabled and firmware version is same,
  1107. * just re-enable IOC.
  1108. *
  1109. * If option rom, IOC must not be in operational state. With
  1110. * convergence, IOC will be in operational state when 2nd driver
  1111. * is loaded.
  1112. */
  1113. if (ioc_fwstate == BFI_IOC_DISABLED ||
  1114. (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
  1115. /*
  1116. * When using MSI-X any pending firmware ready event should
  1117. * be flushed. Otherwise MSI-X interrupts are not delivered.
  1118. */
  1119. bfa_ioc_msgflush(ioc);
  1120. ioc->cbfn->reset_cbfn(ioc->bfa);
  1121. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
  1122. return;
  1123. }
  1124. /*
  1125. * Initialize the h/w for any other states.
  1126. */
  1127. bfa_ioc_boot(ioc, boot_type, boot_env);
  1128. }
  1129. static void
  1130. bfa_ioc_timeout(void *ioc_arg)
  1131. {
  1132. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  1133. bfa_trc(ioc, 0);
  1134. bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
  1135. }
  1136. void
  1137. bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
  1138. {
  1139. u32 *msgp = (u32 *) ioc_msg;
  1140. u32 i;
  1141. bfa_trc(ioc, msgp[0]);
  1142. bfa_trc(ioc, len);
  1143. bfa_assert(len <= BFI_IOC_MSGLEN_MAX);
  1144. /*
  1145. * first write msg to mailbox registers
  1146. */
  1147. for (i = 0; i < len / sizeof(u32); i++)
  1148. writel(cpu_to_le32(msgp[i]),
  1149. ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
  1150. for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
  1151. writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
  1152. /*
  1153. * write 1 to mailbox CMD to trigger LPU event
  1154. */
  1155. writel(1, ioc->ioc_regs.hfn_mbox_cmd);
  1156. (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
  1157. }
  1158. static void
  1159. bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
  1160. {
  1161. struct bfi_ioc_ctrl_req_s enable_req;
  1162. struct timeval tv;
  1163. bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
  1164. bfa_ioc_portid(ioc));
  1165. enable_req.ioc_class = ioc->ioc_mc;
  1166. do_gettimeofday(&tv);
  1167. enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
  1168. bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
  1169. }
  1170. static void
  1171. bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
  1172. {
  1173. struct bfi_ioc_ctrl_req_s disable_req;
  1174. bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
  1175. bfa_ioc_portid(ioc));
  1176. bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
  1177. }
  1178. static void
  1179. bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
  1180. {
  1181. struct bfi_ioc_getattr_req_s attr_req;
  1182. bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
  1183. bfa_ioc_portid(ioc));
  1184. bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
  1185. bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
  1186. }
  1187. static void
  1188. bfa_ioc_hb_check(void *cbarg)
  1189. {
  1190. struct bfa_ioc_s *ioc = cbarg;
  1191. u32 hb_count;
  1192. hb_count = readl(ioc->ioc_regs.heartbeat);
  1193. if (ioc->hb_count == hb_count) {
  1194. printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count);
  1195. bfa_ioc_recover(ioc);
  1196. return;
  1197. } else {
  1198. ioc->hb_count = hb_count;
  1199. }
  1200. bfa_ioc_mbox_poll(ioc);
  1201. bfa_hb_timer_start(ioc);
  1202. }
  1203. static void
  1204. bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
  1205. {
  1206. ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
  1207. bfa_hb_timer_start(ioc);
  1208. }
  1209. /*
  1210. * Initiate a full firmware download.
  1211. */
  1212. static void
  1213. bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
  1214. u32 boot_env)
  1215. {
  1216. u32 *fwimg;
  1217. u32 pgnum, pgoff;
  1218. u32 loff = 0;
  1219. u32 chunkno = 0;
  1220. u32 i;
  1221. /*
  1222. * Initialize LMEM first before code download
  1223. */
  1224. bfa_ioc_lmem_init(ioc);
  1225. bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
  1226. fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
  1227. pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
  1228. pgoff = PSS_SMEM_PGOFF(loff);
  1229. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1230. for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
  1231. if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
  1232. chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
  1233. fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
  1234. BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
  1235. }
  1236. /*
  1237. * write smem
  1238. */
  1239. bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
  1240. fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
  1241. loff += sizeof(u32);
  1242. /*
  1243. * handle page offset wrap around
  1244. */
  1245. loff = PSS_SMEM_PGOFF(loff);
  1246. if (loff == 0) {
  1247. pgnum++;
  1248. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1249. }
  1250. }
  1251. writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
  1252. ioc->ioc_regs.host_page_num_fn);
  1253. /*
  1254. * Set boot type and boot param at the end.
  1255. */
  1256. bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
  1257. swab32(boot_type));
  1258. bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
  1259. swab32(boot_env));
  1260. }
  1261. /*
  1262. * Update BFA configuration from firmware configuration.
  1263. */
  1264. static void
  1265. bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
  1266. {
  1267. struct bfi_ioc_attr_s *attr = ioc->attr;
  1268. attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
  1269. attr->card_type = be32_to_cpu(attr->card_type);
  1270. attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
  1271. bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
  1272. }
  1273. /*
  1274. * Attach time initialization of mbox logic.
  1275. */
  1276. static void
  1277. bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
  1278. {
  1279. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1280. int mc;
  1281. INIT_LIST_HEAD(&mod->cmd_q);
  1282. for (mc = 0; mc < BFI_MC_MAX; mc++) {
  1283. mod->mbhdlr[mc].cbfn = NULL;
  1284. mod->mbhdlr[mc].cbarg = ioc->bfa;
  1285. }
  1286. }
  1287. /*
  1288. * Mbox poll timer -- restarts any pending mailbox requests.
  1289. */
  1290. static void
  1291. bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
  1292. {
  1293. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1294. struct bfa_mbox_cmd_s *cmd;
  1295. u32 stat;
  1296. /*
  1297. * If no command pending, do nothing
  1298. */
  1299. if (list_empty(&mod->cmd_q))
  1300. return;
  1301. /*
  1302. * If previous command is not yet fetched by firmware, do nothing
  1303. */
  1304. stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
  1305. if (stat)
  1306. return;
  1307. /*
  1308. * Enqueue command to firmware.
  1309. */
  1310. bfa_q_deq(&mod->cmd_q, &cmd);
  1311. bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
  1312. }
  1313. /*
  1314. * Cleanup any pending requests.
  1315. */
  1316. static void
  1317. bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
  1318. {
  1319. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1320. struct bfa_mbox_cmd_s *cmd;
  1321. while (!list_empty(&mod->cmd_q))
  1322. bfa_q_deq(&mod->cmd_q, &cmd);
  1323. }
  1324. /*
  1325. * Read data from SMEM to host through PCI memmap
  1326. *
  1327. * @param[in] ioc memory for IOC
  1328. * @param[in] tbuf app memory to store data from smem
  1329. * @param[in] soff smem offset
  1330. * @param[in] sz size of smem in bytes
  1331. */
  1332. static bfa_status_t
  1333. bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
  1334. {
  1335. u32 pgnum, loff;
  1336. __be32 r32;
  1337. int i, len;
  1338. u32 *buf = tbuf;
  1339. pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
  1340. loff = PSS_SMEM_PGOFF(soff);
  1341. bfa_trc(ioc, pgnum);
  1342. bfa_trc(ioc, loff);
  1343. bfa_trc(ioc, sz);
  1344. /*
  1345. * Hold semaphore to serialize pll init and fwtrc.
  1346. */
  1347. if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
  1348. bfa_trc(ioc, 0);
  1349. return BFA_STATUS_FAILED;
  1350. }
  1351. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1352. len = sz/sizeof(u32);
  1353. bfa_trc(ioc, len);
  1354. for (i = 0; i < len; i++) {
  1355. r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
  1356. buf[i] = be32_to_cpu(r32);
  1357. loff += sizeof(u32);
  1358. /*
  1359. * handle page offset wrap around
  1360. */
  1361. loff = PSS_SMEM_PGOFF(loff);
  1362. if (loff == 0) {
  1363. pgnum++;
  1364. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1365. }
  1366. }
  1367. writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
  1368. ioc->ioc_regs.host_page_num_fn);
  1369. /*
  1370. * release semaphore.
  1371. */
  1372. writel(1, ioc->ioc_regs.ioc_init_sem_reg);
  1373. bfa_trc(ioc, pgnum);
  1374. return BFA_STATUS_OK;
  1375. }
  1376. /*
  1377. * Clear SMEM data from host through PCI memmap
  1378. *
  1379. * @param[in] ioc memory for IOC
  1380. * @param[in] soff smem offset
  1381. * @param[in] sz size of smem in bytes
  1382. */
  1383. static bfa_status_t
  1384. bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
  1385. {
  1386. int i, len;
  1387. u32 pgnum, loff;
  1388. pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
  1389. loff = PSS_SMEM_PGOFF(soff);
  1390. bfa_trc(ioc, pgnum);
  1391. bfa_trc(ioc, loff);
  1392. bfa_trc(ioc, sz);
  1393. /*
  1394. * Hold semaphore to serialize pll init and fwtrc.
  1395. */
  1396. if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
  1397. bfa_trc(ioc, 0);
  1398. return BFA_STATUS_FAILED;
  1399. }
  1400. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1401. len = sz/sizeof(u32); /* len in words */
  1402. bfa_trc(ioc, len);
  1403. for (i = 0; i < len; i++) {
  1404. bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
  1405. loff += sizeof(u32);
  1406. /*
  1407. * handle page offset wrap around
  1408. */
  1409. loff = PSS_SMEM_PGOFF(loff);
  1410. if (loff == 0) {
  1411. pgnum++;
  1412. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1413. }
  1414. }
  1415. writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
  1416. ioc->ioc_regs.host_page_num_fn);
  1417. /*
  1418. * release semaphore.
  1419. */
  1420. writel(1, ioc->ioc_regs.ioc_init_sem_reg);
  1421. bfa_trc(ioc, pgnum);
  1422. return BFA_STATUS_OK;
  1423. }
  1424. static void
  1425. bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
  1426. {
  1427. struct list_head *qe;
  1428. struct bfa_ioc_hbfail_notify_s *notify;
  1429. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  1430. /**
  1431. * Notify driver and common modules registered for notification.
  1432. */
  1433. ioc->cbfn->hbfail_cbfn(ioc->bfa);
  1434. list_for_each(qe, &ioc->hb_notify_q) {
  1435. notify = (struct bfa_ioc_hbfail_notify_s *) qe;
  1436. notify->cbfn(notify->cbarg);
  1437. }
  1438. bfa_ioc_debug_save_ftrc(ioc);
  1439. BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
  1440. "Heart Beat of IOC has failed\n");
  1441. }
  1442. static void
  1443. bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
  1444. {
  1445. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  1446. /*
  1447. * Provide enable completion callback.
  1448. */
  1449. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  1450. BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
  1451. "Running firmware version is incompatible "
  1452. "with the driver version\n");
  1453. }
  1454. bfa_status_t
  1455. bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
  1456. {
  1457. /*
  1458. * Hold semaphore so that nobody can access the chip during init.
  1459. */
  1460. bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
  1461. bfa_ioc_pll_init_asic(ioc);
  1462. ioc->pllinit = BFA_TRUE;
  1463. /*
  1464. * release semaphore.
  1465. */
  1466. writel(1, ioc->ioc_regs.ioc_init_sem_reg);
  1467. return BFA_STATUS_OK;
  1468. }
  1469. /*
  1470. * Interface used by diag module to do firmware boot with memory test
  1471. * as the entry vector.
  1472. */
  1473. void
  1474. bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
  1475. {
  1476. void __iomem *rb;
  1477. bfa_ioc_stats(ioc, ioc_boots);
  1478. if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
  1479. return;
  1480. /*
  1481. * Initialize IOC state of all functions on a chip reset.
  1482. */
  1483. rb = ioc->pcidev.pci_bar_kva;
  1484. if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
  1485. writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
  1486. writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
  1487. } else {
  1488. writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
  1489. writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
  1490. }
  1491. bfa_ioc_msgflush(ioc);
  1492. bfa_ioc_download_fw(ioc, boot_type, boot_env);
  1493. /*
  1494. * Enable interrupts just before starting LPU
  1495. */
  1496. ioc->cbfn->reset_cbfn(ioc->bfa);
  1497. bfa_ioc_lpu_start(ioc);
  1498. }
  1499. /*
  1500. * Enable/disable IOC failure auto recovery.
  1501. */
  1502. void
  1503. bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
  1504. {
  1505. bfa_auto_recover = auto_recover;
  1506. }
  1507. bfa_boolean_t
  1508. bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
  1509. {
  1510. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
  1511. }
  1512. bfa_boolean_t
  1513. bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
  1514. {
  1515. u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
  1516. return ((r32 != BFI_IOC_UNINIT) &&
  1517. (r32 != BFI_IOC_INITING) &&
  1518. (r32 != BFI_IOC_MEMTEST));
  1519. }
  1520. void
  1521. bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
  1522. {
  1523. __be32 *msgp = mbmsg;
  1524. u32 r32;
  1525. int i;
  1526. /*
  1527. * read the MBOX msg
  1528. */
  1529. for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
  1530. i++) {
  1531. r32 = readl(ioc->ioc_regs.lpu_mbox +
  1532. i * sizeof(u32));
  1533. msgp[i] = cpu_to_be32(r32);
  1534. }
  1535. /*
  1536. * turn off mailbox interrupt by clearing mailbox status
  1537. */
  1538. writel(1, ioc->ioc_regs.lpu_mbox_cmd);
  1539. readl(ioc->ioc_regs.lpu_mbox_cmd);
  1540. }
  1541. void
  1542. bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
  1543. {
  1544. union bfi_ioc_i2h_msg_u *msg;
  1545. struct bfa_iocpf_s *iocpf = &ioc->iocpf;
  1546. msg = (union bfi_ioc_i2h_msg_u *) m;
  1547. bfa_ioc_stats(ioc, ioc_isrs);
  1548. switch (msg->mh.msg_id) {
  1549. case BFI_IOC_I2H_HBEAT:
  1550. break;
  1551. case BFI_IOC_I2H_READY_EVENT:
  1552. bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
  1553. break;
  1554. case BFI_IOC_I2H_ENABLE_REPLY:
  1555. bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
  1556. break;
  1557. case BFI_IOC_I2H_DISABLE_REPLY:
  1558. bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
  1559. break;
  1560. case BFI_IOC_I2H_GETATTR_REPLY:
  1561. bfa_ioc_getattr_reply(ioc);
  1562. break;
  1563. default:
  1564. bfa_trc(ioc, msg->mh.msg_id);
  1565. bfa_assert(0);
  1566. }
  1567. }
  1568. /*
  1569. * IOC attach time initialization and setup.
  1570. *
  1571. * @param[in] ioc memory for IOC
  1572. * @param[in] bfa driver instance structure
  1573. */
  1574. void
  1575. bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
  1576. struct bfa_timer_mod_s *timer_mod)
  1577. {
  1578. ioc->bfa = bfa;
  1579. ioc->cbfn = cbfn;
  1580. ioc->timer_mod = timer_mod;
  1581. ioc->fcmode = BFA_FALSE;
  1582. ioc->pllinit = BFA_FALSE;
  1583. ioc->dbg_fwsave_once = BFA_TRUE;
  1584. ioc->iocpf.ioc = ioc;
  1585. bfa_ioc_mbox_attach(ioc);
  1586. INIT_LIST_HEAD(&ioc->hb_notify_q);
  1587. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  1588. bfa_fsm_send_event(ioc, IOC_E_RESET);
  1589. }
  1590. /*
  1591. * Driver detach time IOC cleanup.
  1592. */
  1593. void
  1594. bfa_ioc_detach(struct bfa_ioc_s *ioc)
  1595. {
  1596. bfa_fsm_send_event(ioc, IOC_E_DETACH);
  1597. }
  1598. /*
  1599. * Setup IOC PCI properties.
  1600. *
  1601. * @param[in] pcidev PCI device information for this IOC
  1602. */
  1603. void
  1604. bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
  1605. enum bfi_mclass mc)
  1606. {
  1607. ioc->ioc_mc = mc;
  1608. ioc->pcidev = *pcidev;
  1609. ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
  1610. ioc->cna = ioc->ctdev && !ioc->fcmode;
  1611. /*
  1612. * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
  1613. */
  1614. if (ioc->ctdev)
  1615. bfa_ioc_set_ct_hwif(ioc);
  1616. else
  1617. bfa_ioc_set_cb_hwif(ioc);
  1618. bfa_ioc_map_port(ioc);
  1619. bfa_ioc_reg_init(ioc);
  1620. }
  1621. /*
  1622. * Initialize IOC dma memory
  1623. *
  1624. * @param[in] dm_kva kernel virtual address of IOC dma memory
  1625. * @param[in] dm_pa physical address of IOC dma memory
  1626. */
  1627. void
  1628. bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
  1629. {
  1630. /*
  1631. * dma memory for firmware attribute
  1632. */
  1633. ioc->attr_dma.kva = dm_kva;
  1634. ioc->attr_dma.pa = dm_pa;
  1635. ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
  1636. }
  1637. void
  1638. bfa_ioc_enable(struct bfa_ioc_s *ioc)
  1639. {
  1640. bfa_ioc_stats(ioc, ioc_enables);
  1641. ioc->dbg_fwsave_once = BFA_TRUE;
  1642. bfa_fsm_send_event(ioc, IOC_E_ENABLE);
  1643. }
  1644. void
  1645. bfa_ioc_disable(struct bfa_ioc_s *ioc)
  1646. {
  1647. bfa_ioc_stats(ioc, ioc_disables);
  1648. bfa_fsm_send_event(ioc, IOC_E_DISABLE);
  1649. }
  1650. /*
  1651. * Initialize memory for saving firmware trace. Driver must initialize
  1652. * trace memory before call bfa_ioc_enable().
  1653. */
  1654. void
  1655. bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
  1656. {
  1657. ioc->dbg_fwsave = dbg_fwsave;
  1658. ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
  1659. }
  1660. /*
  1661. * Register mailbox message handler functions
  1662. *
  1663. * @param[in] ioc IOC instance
  1664. * @param[in] mcfuncs message class handler functions
  1665. */
  1666. void
  1667. bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
  1668. {
  1669. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1670. int mc;
  1671. for (mc = 0; mc < BFI_MC_MAX; mc++)
  1672. mod->mbhdlr[mc].cbfn = mcfuncs[mc];
  1673. }
  1674. /*
  1675. * Register mailbox message handler function, to be called by common modules
  1676. */
  1677. void
  1678. bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
  1679. bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
  1680. {
  1681. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1682. mod->mbhdlr[mc].cbfn = cbfn;
  1683. mod->mbhdlr[mc].cbarg = cbarg;
  1684. }
  1685. /*
  1686. * Queue a mailbox command request to firmware. Waits if mailbox is busy.
  1687. * Responsibility of caller to serialize
  1688. *
  1689. * @param[in] ioc IOC instance
  1690. * @param[i] cmd Mailbox command
  1691. */
  1692. void
  1693. bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
  1694. {
  1695. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1696. u32 stat;
  1697. /*
  1698. * If a previous command is pending, queue new command
  1699. */
  1700. if (!list_empty(&mod->cmd_q)) {
  1701. list_add_tail(&cmd->qe, &mod->cmd_q);
  1702. return;
  1703. }
  1704. /*
  1705. * If mailbox is busy, queue command for poll timer
  1706. */
  1707. stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
  1708. if (stat) {
  1709. list_add_tail(&cmd->qe, &mod->cmd_q);
  1710. return;
  1711. }
  1712. /*
  1713. * mailbox is free -- queue command to firmware
  1714. */
  1715. bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
  1716. }
  1717. /*
  1718. * Handle mailbox interrupts
  1719. */
  1720. void
  1721. bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
  1722. {
  1723. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1724. struct bfi_mbmsg_s m;
  1725. int mc;
  1726. bfa_ioc_msgget(ioc, &m);
  1727. /*
  1728. * Treat IOC message class as special.
  1729. */
  1730. mc = m.mh.msg_class;
  1731. if (mc == BFI_MC_IOC) {
  1732. bfa_ioc_isr(ioc, &m);
  1733. return;
  1734. }
  1735. if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
  1736. return;
  1737. mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
  1738. }
  1739. void
  1740. bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
  1741. {
  1742. bfa_fsm_send_event(ioc, IOC_E_HWERROR);
  1743. }
  1744. void
  1745. bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
  1746. {
  1747. ioc->fcmode = BFA_TRUE;
  1748. ioc->port_id = bfa_ioc_pcifn(ioc);
  1749. }
  1750. /*
  1751. * return true if IOC is disabled
  1752. */
  1753. bfa_boolean_t
  1754. bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
  1755. {
  1756. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
  1757. bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
  1758. }
  1759. /*
  1760. * return true if IOC firmware is different.
  1761. */
  1762. bfa_boolean_t
  1763. bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
  1764. {
  1765. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
  1766. bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
  1767. bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
  1768. }
  1769. #define bfa_ioc_state_disabled(__sm) \
  1770. (((__sm) == BFI_IOC_UNINIT) || \
  1771. ((__sm) == BFI_IOC_INITING) || \
  1772. ((__sm) == BFI_IOC_HWINIT) || \
  1773. ((__sm) == BFI_IOC_DISABLED) || \
  1774. ((__sm) == BFI_IOC_FAIL) || \
  1775. ((__sm) == BFI_IOC_CFG_DISABLED))
  1776. /*
  1777. * Check if adapter is disabled -- both IOCs should be in a disabled
  1778. * state.
  1779. */
  1780. bfa_boolean_t
  1781. bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
  1782. {
  1783. u32 ioc_state;
  1784. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  1785. if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
  1786. return BFA_FALSE;
  1787. ioc_state = readl(rb + BFA_IOC0_STATE_REG);
  1788. if (!bfa_ioc_state_disabled(ioc_state))
  1789. return BFA_FALSE;
  1790. if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
  1791. ioc_state = readl(rb + BFA_IOC1_STATE_REG);
  1792. if (!bfa_ioc_state_disabled(ioc_state))
  1793. return BFA_FALSE;
  1794. }
  1795. return BFA_TRUE;
  1796. }
  1797. #define BFA_MFG_NAME "Brocade"
  1798. void
  1799. bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
  1800. struct bfa_adapter_attr_s *ad_attr)
  1801. {
  1802. struct bfi_ioc_attr_s *ioc_attr;
  1803. ioc_attr = ioc->attr;
  1804. bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
  1805. bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
  1806. bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
  1807. bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
  1808. memcpy(&ad_attr->vpd, &ioc_attr->vpd,
  1809. sizeof(struct bfa_mfg_vpd_s));
  1810. ad_attr->nports = bfa_ioc_get_nports(ioc);
  1811. ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
  1812. bfa_ioc_get_adapter_model(ioc, ad_attr->model);
  1813. /* For now, model descr uses same model string */
  1814. bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
  1815. ad_attr->card_type = ioc_attr->card_type;
  1816. ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
  1817. if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
  1818. ad_attr->prototype = 1;
  1819. else
  1820. ad_attr->prototype = 0;
  1821. ad_attr->pwwn = ioc->attr->pwwn;
  1822. ad_attr->mac = bfa_ioc_get_mac(ioc);
  1823. ad_attr->pcie_gen = ioc_attr->pcie_gen;
  1824. ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
  1825. ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
  1826. ad_attr->asic_rev = ioc_attr->asic_rev;
  1827. bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
  1828. ad_attr->cna_capable = ioc->cna;
  1829. ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna &&
  1830. !ad_attr->is_mezz;
  1831. }
  1832. enum bfa_ioc_type_e
  1833. bfa_ioc_get_type(struct bfa_ioc_s *ioc)
  1834. {
  1835. if (!ioc->ctdev || ioc->fcmode)
  1836. return BFA_IOC_TYPE_FC;
  1837. else if (ioc->ioc_mc == BFI_MC_IOCFC)
  1838. return BFA_IOC_TYPE_FCoE;
  1839. else if (ioc->ioc_mc == BFI_MC_LL)
  1840. return BFA_IOC_TYPE_LL;
  1841. else {
  1842. bfa_assert(ioc->ioc_mc == BFI_MC_LL);
  1843. return BFA_IOC_TYPE_LL;
  1844. }
  1845. }
  1846. void
  1847. bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
  1848. {
  1849. memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
  1850. memcpy((void *)serial_num,
  1851. (void *)ioc->attr->brcd_serialnum,
  1852. BFA_ADAPTER_SERIAL_NUM_LEN);
  1853. }
  1854. void
  1855. bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
  1856. {
  1857. memset((void *)fw_ver, 0, BFA_VERSION_LEN);
  1858. memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
  1859. }
  1860. void
  1861. bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
  1862. {
  1863. bfa_assert(chip_rev);
  1864. memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
  1865. chip_rev[0] = 'R';
  1866. chip_rev[1] = 'e';
  1867. chip_rev[2] = 'v';
  1868. chip_rev[3] = '-';
  1869. chip_rev[4] = ioc->attr->asic_rev;
  1870. chip_rev[5] = '\0';
  1871. }
  1872. void
  1873. bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
  1874. {
  1875. memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
  1876. memcpy(optrom_ver, ioc->attr->optrom_version,
  1877. BFA_VERSION_LEN);
  1878. }
  1879. void
  1880. bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
  1881. {
  1882. memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
  1883. memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
  1884. }
  1885. void
  1886. bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
  1887. {
  1888. struct bfi_ioc_attr_s *ioc_attr;
  1889. bfa_assert(model);
  1890. memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
  1891. ioc_attr = ioc->attr;
  1892. /*
  1893. * model name
  1894. */
  1895. snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
  1896. BFA_MFG_NAME, ioc_attr->card_type);
  1897. }
  1898. enum bfa_ioc_state
  1899. bfa_ioc_get_state(struct bfa_ioc_s *ioc)
  1900. {
  1901. enum bfa_iocpf_state iocpf_st;
  1902. enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
  1903. if (ioc_st == BFA_IOC_ENABLING ||
  1904. ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
  1905. iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
  1906. switch (iocpf_st) {
  1907. case BFA_IOCPF_SEMWAIT:
  1908. ioc_st = BFA_IOC_SEMWAIT;
  1909. break;
  1910. case BFA_IOCPF_HWINIT:
  1911. ioc_st = BFA_IOC_HWINIT;
  1912. break;
  1913. case BFA_IOCPF_FWMISMATCH:
  1914. ioc_st = BFA_IOC_FWMISMATCH;
  1915. break;
  1916. case BFA_IOCPF_FAIL:
  1917. ioc_st = BFA_IOC_FAIL;
  1918. break;
  1919. case BFA_IOCPF_INITFAIL:
  1920. ioc_st = BFA_IOC_INITFAIL;
  1921. break;
  1922. default:
  1923. break;
  1924. }
  1925. }
  1926. return ioc_st;
  1927. }
  1928. void
  1929. bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
  1930. {
  1931. memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
  1932. ioc_attr->state = bfa_ioc_get_state(ioc);
  1933. ioc_attr->port_id = ioc->port_id;
  1934. ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
  1935. bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
  1936. ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
  1937. ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
  1938. bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
  1939. }
  1940. mac_t
  1941. bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
  1942. {
  1943. /*
  1944. * Check the IOC type and return the appropriate MAC
  1945. */
  1946. if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
  1947. return ioc->attr->fcoe_mac;
  1948. else
  1949. return ioc->attr->mac;
  1950. }
  1951. mac_t
  1952. bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
  1953. {
  1954. mac_t m;
  1955. m = ioc->attr->mfg_mac;
  1956. if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
  1957. m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
  1958. else
  1959. bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
  1960. bfa_ioc_pcifn(ioc));
  1961. return m;
  1962. }
  1963. bfa_boolean_t
  1964. bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
  1965. {
  1966. return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
  1967. }
  1968. /*
  1969. * Retrieve saved firmware trace from a prior IOC failure.
  1970. */
  1971. bfa_status_t
  1972. bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
  1973. {
  1974. int tlen;
  1975. if (ioc->dbg_fwsave_len == 0)
  1976. return BFA_STATUS_ENOFSAVE;
  1977. tlen = *trclen;
  1978. if (tlen > ioc->dbg_fwsave_len)
  1979. tlen = ioc->dbg_fwsave_len;
  1980. memcpy(trcdata, ioc->dbg_fwsave, tlen);
  1981. *trclen = tlen;
  1982. return BFA_STATUS_OK;
  1983. }
  1984. /*
  1985. * Retrieve saved firmware trace from a prior IOC failure.
  1986. */
  1987. bfa_status_t
  1988. bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
  1989. {
  1990. u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
  1991. int tlen;
  1992. bfa_status_t status;
  1993. bfa_trc(ioc, *trclen);
  1994. tlen = *trclen;
  1995. if (tlen > BFA_DBG_FWTRC_LEN)
  1996. tlen = BFA_DBG_FWTRC_LEN;
  1997. status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
  1998. *trclen = tlen;
  1999. return status;
  2000. }
  2001. static void
  2002. bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
  2003. {
  2004. struct bfa_mbox_cmd_s cmd;
  2005. struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
  2006. bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
  2007. bfa_ioc_portid(ioc));
  2008. req->ioc_class = ioc->ioc_mc;
  2009. bfa_ioc_mbox_queue(ioc, &cmd);
  2010. }
  2011. static void
  2012. bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
  2013. {
  2014. u32 fwsync_iter = 1000;
  2015. bfa_ioc_send_fwsync(ioc);
  2016. /*
  2017. * After sending a fw sync mbox command wait for it to
  2018. * take effect. We will not wait for a response because
  2019. * 1. fw_sync mbox cmd doesn't have a response.
  2020. * 2. Even if we implement that, interrupts might not
  2021. * be enabled when we call this function.
  2022. * So, just keep checking if any mbox cmd is pending, and
  2023. * after waiting for a reasonable amount of time, go ahead.
  2024. * It is possible that fw has crashed and the mbox command
  2025. * is never acknowledged.
  2026. */
  2027. while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
  2028. fwsync_iter--;
  2029. }
  2030. /*
  2031. * Dump firmware smem
  2032. */
  2033. bfa_status_t
  2034. bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
  2035. u32 *offset, int *buflen)
  2036. {
  2037. u32 loff;
  2038. int dlen;
  2039. bfa_status_t status;
  2040. u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
  2041. if (*offset >= smem_len) {
  2042. *offset = *buflen = 0;
  2043. return BFA_STATUS_EINVAL;
  2044. }
  2045. loff = *offset;
  2046. dlen = *buflen;
  2047. /*
  2048. * First smem read, sync smem before proceeding
  2049. * No need to sync before reading every chunk.
  2050. */
  2051. if (loff == 0)
  2052. bfa_ioc_fwsync(ioc);
  2053. if ((loff + dlen) >= smem_len)
  2054. dlen = smem_len - loff;
  2055. status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
  2056. if (status != BFA_STATUS_OK) {
  2057. *offset = *buflen = 0;
  2058. return status;
  2059. }
  2060. *offset += dlen;
  2061. if (*offset >= smem_len)
  2062. *offset = 0;
  2063. *buflen = dlen;
  2064. return status;
  2065. }
  2066. /*
  2067. * Firmware statistics
  2068. */
  2069. bfa_status_t
  2070. bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
  2071. {
  2072. u32 loff = BFI_IOC_FWSTATS_OFF + \
  2073. BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
  2074. int tlen;
  2075. bfa_status_t status;
  2076. if (ioc->stats_busy) {
  2077. bfa_trc(ioc, ioc->stats_busy);
  2078. return BFA_STATUS_DEVBUSY;
  2079. }
  2080. ioc->stats_busy = BFA_TRUE;
  2081. tlen = sizeof(struct bfa_fw_stats_s);
  2082. status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
  2083. ioc->stats_busy = BFA_FALSE;
  2084. return status;
  2085. }
  2086. bfa_status_t
  2087. bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
  2088. {
  2089. u32 loff = BFI_IOC_FWSTATS_OFF + \
  2090. BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
  2091. int tlen;
  2092. bfa_status_t status;
  2093. if (ioc->stats_busy) {
  2094. bfa_trc(ioc, ioc->stats_busy);
  2095. return BFA_STATUS_DEVBUSY;
  2096. }
  2097. ioc->stats_busy = BFA_TRUE;
  2098. tlen = sizeof(struct bfa_fw_stats_s);
  2099. status = bfa_ioc_smem_clr(ioc, loff, tlen);
  2100. ioc->stats_busy = BFA_FALSE;
  2101. return status;
  2102. }
  2103. /*
  2104. * Save firmware trace if configured.
  2105. */
  2106. static void
  2107. bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
  2108. {
  2109. int tlen;
  2110. if (ioc->dbg_fwsave_once) {
  2111. ioc->dbg_fwsave_once = BFA_FALSE;
  2112. if (ioc->dbg_fwsave_len) {
  2113. tlen = ioc->dbg_fwsave_len;
  2114. bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
  2115. }
  2116. }
  2117. }
  2118. /*
  2119. * Firmware failure detected. Start recovery actions.
  2120. */
  2121. static void
  2122. bfa_ioc_recover(struct bfa_ioc_s *ioc)
  2123. {
  2124. bfa_ioc_stats(ioc, ioc_hbfails);
  2125. bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
  2126. }
  2127. static void
  2128. bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
  2129. {
  2130. if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
  2131. return;
  2132. }
  2133. /*
  2134. * BFA IOC PF private functions
  2135. */
  2136. static void
  2137. bfa_iocpf_timeout(void *ioc_arg)
  2138. {
  2139. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  2140. bfa_trc(ioc, 0);
  2141. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
  2142. }
  2143. static void
  2144. bfa_iocpf_sem_timeout(void *ioc_arg)
  2145. {
  2146. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  2147. bfa_ioc_hw_sem_get(ioc);
  2148. }
  2149. /*
  2150. * bfa timer function
  2151. */
  2152. void
  2153. bfa_timer_beat(struct bfa_timer_mod_s *mod)
  2154. {
  2155. struct list_head *qh = &mod->timer_q;
  2156. struct list_head *qe, *qe_next;
  2157. struct bfa_timer_s *elem;
  2158. struct list_head timedout_q;
  2159. INIT_LIST_HEAD(&timedout_q);
  2160. qe = bfa_q_next(qh);
  2161. while (qe != qh) {
  2162. qe_next = bfa_q_next(qe);
  2163. elem = (struct bfa_timer_s *) qe;
  2164. if (elem->timeout <= BFA_TIMER_FREQ) {
  2165. elem->timeout = 0;
  2166. list_del(&elem->qe);
  2167. list_add_tail(&elem->qe, &timedout_q);
  2168. } else {
  2169. elem->timeout -= BFA_TIMER_FREQ;
  2170. }
  2171. qe = qe_next; /* go to next elem */
  2172. }
  2173. /*
  2174. * Pop all the timeout entries
  2175. */
  2176. while (!list_empty(&timedout_q)) {
  2177. bfa_q_deq(&timedout_q, &elem);
  2178. elem->timercb(elem->arg);
  2179. }
  2180. }
  2181. /*
  2182. * Should be called with lock protection
  2183. */
  2184. void
  2185. bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
  2186. void (*timercb) (void *), void *arg, unsigned int timeout)
  2187. {
  2188. bfa_assert(timercb != NULL);
  2189. bfa_assert(!bfa_q_is_on_q(&mod->timer_q, timer));
  2190. timer->timeout = timeout;
  2191. timer->timercb = timercb;
  2192. timer->arg = arg;
  2193. list_add_tail(&timer->qe, &mod->timer_q);
  2194. }
  2195. /*
  2196. * Should be called with lock protection
  2197. */
  2198. void
  2199. bfa_timer_stop(struct bfa_timer_s *timer)
  2200. {
  2201. bfa_assert(!list_empty(&timer->qe));
  2202. list_del(&timer->qe);
  2203. }