bfa_ioc.c 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include "bfa_ioc.h"
  18. #include "bfi_ctreg.h"
  19. #include "bfa_defs.h"
  20. #include "bfa_defs_svc.h"
  21. #include "bfad_drv.h"
  22. BFA_TRC_FILE(CNA, IOC);
  23. /*
  24. * IOC local definitions
  25. */
  26. #define BFA_IOC_TOV 3000 /* msecs */
  27. #define BFA_IOC_HWSEM_TOV 500 /* msecs */
  28. #define BFA_IOC_HB_TOV 500 /* msecs */
  29. #define BFA_IOC_HWINIT_MAX 2
  30. #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
  31. #define bfa_ioc_timer_start(__ioc) \
  32. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
  33. bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
  34. #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
  35. #define bfa_hb_timer_start(__ioc) \
  36. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
  37. bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
  38. #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
  39. #define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS)
  40. #define BFA_DBG_FWTRC_LEN \
  41. (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \
  42. (sizeof(struct bfa_trc_mod_s) - \
  43. BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
  44. #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
  45. /*
  46. * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
  47. */
  48. #define bfa_ioc_firmware_lock(__ioc) \
  49. ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
  50. #define bfa_ioc_firmware_unlock(__ioc) \
  51. ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
  52. #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
  53. #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
  54. #define bfa_ioc_notify_hbfail(__ioc) \
  55. ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
  56. #ifdef BFA_IOC_IS_UEFI
  57. #define bfa_ioc_is_bios_optrom(__ioc) (0)
  58. #define bfa_ioc_is_uefi(__ioc) BFA_IOC_IS_UEFI
  59. #else
  60. #define bfa_ioc_is_bios_optrom(__ioc) \
  61. (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
  62. #define bfa_ioc_is_uefi(__ioc) (0)
  63. #endif
  64. #define bfa_ioc_mbox_cmd_pending(__ioc) \
  65. (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
  66. readl((__ioc)->ioc_regs.hfn_mbox_cmd))
  67. bfa_boolean_t bfa_auto_recover = BFA_TRUE;
  68. /*
  69. * forward declarations
  70. */
  71. static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
  72. static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
  73. static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
  74. static void bfa_ioc_timeout(void *ioc);
  75. static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
  76. static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
  77. static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
  78. static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
  79. static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
  80. static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
  81. static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
  82. static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
  83. static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
  84. static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
  85. static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
  86. static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
  87. static void bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc);
  88. static void bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc);
  89. static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc);
  90. static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
  91. /*
  92. * hal_ioc_sm
  93. */
  94. /*
  95. * IOC state machine definitions/declarations
  96. */
  97. enum ioc_event {
  98. IOC_E_RESET = 1, /* IOC reset request */
  99. IOC_E_ENABLE = 2, /* IOC enable request */
  100. IOC_E_DISABLE = 3, /* IOC disable request */
  101. IOC_E_DETACH = 4, /* driver detach cleanup */
  102. IOC_E_ENABLED = 5, /* f/w enabled */
  103. IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
  104. IOC_E_DISABLED = 7, /* f/w disabled */
  105. IOC_E_FAILED = 8, /* failure notice by iocpf sm */
  106. IOC_E_HBFAIL = 9, /* heartbeat failure */
  107. IOC_E_HWERROR = 10, /* hardware error interrupt */
  108. IOC_E_TIMEOUT = 11, /* timeout */
  109. };
  110. bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
  111. bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
  112. bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
  113. bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
  114. bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
  115. bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event);
  116. bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
  117. bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
  118. bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
  119. static struct bfa_sm_table_s ioc_sm_table[] = {
  120. {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
  121. {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
  122. {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
  123. {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
  124. {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
  125. {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
  126. {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
  127. {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
  128. {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
  129. };
  130. /*
  131. * IOCPF state machine definitions/declarations
  132. */
  133. #define bfa_iocpf_timer_start(__ioc) \
  134. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
  135. bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
  136. #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
  137. #define bfa_iocpf_recovery_timer_start(__ioc) \
  138. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
  139. bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER)
  140. #define bfa_sem_timer_start(__ioc) \
  141. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
  142. bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
  143. #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
  144. /*
  145. * Forward declareations for iocpf state machine
  146. */
  147. static void bfa_iocpf_enable(struct bfa_ioc_s *ioc);
  148. static void bfa_iocpf_disable(struct bfa_ioc_s *ioc);
  149. static void bfa_iocpf_fail(struct bfa_ioc_s *ioc);
  150. static void bfa_iocpf_initfail(struct bfa_ioc_s *ioc);
  151. static void bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc);
  152. static void bfa_iocpf_stop(struct bfa_ioc_s *ioc);
  153. static void bfa_iocpf_timeout(void *ioc_arg);
  154. static void bfa_iocpf_sem_timeout(void *ioc_arg);
  155. /*
  156. * IOCPF state machine events
  157. */
  158. enum iocpf_event {
  159. IOCPF_E_ENABLE = 1, /* IOCPF enable request */
  160. IOCPF_E_DISABLE = 2, /* IOCPF disable request */
  161. IOCPF_E_STOP = 3, /* stop on driver detach */
  162. IOCPF_E_FWREADY = 4, /* f/w initialization done */
  163. IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
  164. IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
  165. IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
  166. IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
  167. IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
  168. IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
  169. IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
  170. };
  171. /*
  172. * IOCPF states
  173. */
  174. enum bfa_iocpf_state {
  175. BFA_IOCPF_RESET = 1, /* IOC is in reset state */
  176. BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
  177. BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
  178. BFA_IOCPF_READY = 4, /* IOCPF is initialized */
  179. BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
  180. BFA_IOCPF_FAIL = 6, /* IOCPF failed */
  181. BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
  182. BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
  183. BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
  184. };
  185. bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
  186. bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
  187. bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
  188. bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
  189. bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
  190. bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
  191. bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
  192. bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
  193. bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
  194. bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
  195. bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
  196. static struct bfa_sm_table_s iocpf_sm_table[] = {
  197. {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
  198. {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
  199. {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
  200. {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
  201. {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
  202. {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
  203. {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
  204. {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
  205. {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
  206. {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
  207. {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
  208. };
  209. /*
  210. * IOC State Machine
  211. */
  212. /*
  213. * Beginning state. IOC uninit state.
  214. */
  215. static void
  216. bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
  217. {
  218. }
  219. /*
  220. * IOC is in uninit state.
  221. */
  222. static void
  223. bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
  224. {
  225. bfa_trc(ioc, event);
  226. switch (event) {
  227. case IOC_E_RESET:
  228. bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
  229. break;
  230. default:
  231. bfa_sm_fault(ioc, event);
  232. }
  233. }
  234. /*
  235. * Reset entry actions -- initialize state machine
  236. */
  237. static void
  238. bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
  239. {
  240. bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
  241. }
  242. /*
  243. * IOC is in reset state.
  244. */
  245. static void
  246. bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
  247. {
  248. bfa_trc(ioc, event);
  249. switch (event) {
  250. case IOC_E_ENABLE:
  251. bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
  252. break;
  253. case IOC_E_DISABLE:
  254. bfa_ioc_disable_comp(ioc);
  255. break;
  256. case IOC_E_DETACH:
  257. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  258. break;
  259. default:
  260. bfa_sm_fault(ioc, event);
  261. }
  262. }
  263. static void
  264. bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
  265. {
  266. bfa_iocpf_enable(ioc);
  267. }
  268. /*
  269. * Host IOC function is being enabled, awaiting response from firmware.
  270. * Semaphore is acquired.
  271. */
  272. static void
  273. bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
  274. {
  275. bfa_trc(ioc, event);
  276. switch (event) {
  277. case IOC_E_ENABLED:
  278. bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
  279. break;
  280. case IOC_E_FAILED:
  281. bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
  282. break;
  283. case IOC_E_HWERROR:
  284. bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
  285. bfa_iocpf_initfail(ioc);
  286. break;
  287. case IOC_E_DISABLE:
  288. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  289. break;
  290. case IOC_E_DETACH:
  291. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  292. bfa_iocpf_stop(ioc);
  293. break;
  294. case IOC_E_ENABLE:
  295. break;
  296. default:
  297. bfa_sm_fault(ioc, event);
  298. }
  299. }
  300. static void
  301. bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
  302. {
  303. bfa_ioc_timer_start(ioc);
  304. bfa_ioc_send_getattr(ioc);
  305. }
  306. /*
  307. * IOC configuration in progress. Timer is active.
  308. */
  309. static void
  310. bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
  311. {
  312. bfa_trc(ioc, event);
  313. switch (event) {
  314. case IOC_E_FWRSP_GETATTR:
  315. bfa_ioc_timer_stop(ioc);
  316. bfa_ioc_check_attr_wwns(ioc);
  317. bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
  318. break;
  319. case IOC_E_FAILED:
  320. bfa_ioc_timer_stop(ioc);
  321. bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
  322. break;
  323. case IOC_E_HWERROR:
  324. bfa_ioc_timer_stop(ioc);
  325. /* fall through */
  326. case IOC_E_TIMEOUT:
  327. bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
  328. bfa_iocpf_getattrfail(ioc);
  329. break;
  330. case IOC_E_DISABLE:
  331. bfa_ioc_timer_stop(ioc);
  332. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  333. break;
  334. case IOC_E_ENABLE:
  335. break;
  336. default:
  337. bfa_sm_fault(ioc, event);
  338. }
  339. }
  340. static void
  341. bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
  342. {
  343. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  344. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
  345. bfa_ioc_hb_monitor(ioc);
  346. BFA_LOG(KERN_INFO, bfad, log_level, "IOC enabled\n");
  347. }
  348. static void
  349. bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
  350. {
  351. bfa_trc(ioc, event);
  352. switch (event) {
  353. case IOC_E_ENABLE:
  354. break;
  355. case IOC_E_DISABLE:
  356. bfa_ioc_hb_stop(ioc);
  357. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  358. break;
  359. case IOC_E_FAILED:
  360. bfa_ioc_hb_stop(ioc);
  361. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  362. break;
  363. case IOC_E_HWERROR:
  364. bfa_ioc_hb_stop(ioc);
  365. /* !!! fall through !!! */
  366. case IOC_E_HBFAIL:
  367. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  368. bfa_iocpf_fail(ioc);
  369. break;
  370. default:
  371. bfa_sm_fault(ioc, event);
  372. }
  373. }
  374. static void
  375. bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
  376. {
  377. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  378. bfa_iocpf_disable(ioc);
  379. BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n");
  380. }
  381. /*
  382. * IOC is being disabled
  383. */
  384. static void
  385. bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
  386. {
  387. bfa_trc(ioc, event);
  388. switch (event) {
  389. case IOC_E_DISABLED:
  390. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
  391. break;
  392. case IOC_E_HWERROR:
  393. /*
  394. * No state change. Will move to disabled state
  395. * after iocpf sm completes failure processing and
  396. * moves to disabled state.
  397. */
  398. bfa_iocpf_fail(ioc);
  399. break;
  400. default:
  401. bfa_sm_fault(ioc, event);
  402. }
  403. }
  404. /*
  405. * IOC disable completion entry.
  406. */
  407. static void
  408. bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
  409. {
  410. bfa_ioc_disable_comp(ioc);
  411. }
  412. static void
  413. bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
  414. {
  415. bfa_trc(ioc, event);
  416. switch (event) {
  417. case IOC_E_ENABLE:
  418. bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
  419. break;
  420. case IOC_E_DISABLE:
  421. ioc->cbfn->disable_cbfn(ioc->bfa);
  422. break;
  423. case IOC_E_DETACH:
  424. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  425. bfa_iocpf_stop(ioc);
  426. break;
  427. default:
  428. bfa_sm_fault(ioc, event);
  429. }
  430. }
  431. static void
  432. bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
  433. {
  434. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  435. }
  436. /*
  437. * Hardware initialization failed.
  438. */
  439. static void
  440. bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
  441. {
  442. bfa_trc(ioc, event);
  443. switch (event) {
  444. case IOC_E_ENABLED:
  445. bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
  446. break;
  447. case IOC_E_FAILED:
  448. /*
  449. * Initialization failure during iocpf init retry.
  450. */
  451. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  452. break;
  453. case IOC_E_DISABLE:
  454. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  455. break;
  456. case IOC_E_DETACH:
  457. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  458. bfa_iocpf_stop(ioc);
  459. break;
  460. default:
  461. bfa_sm_fault(ioc, event);
  462. }
  463. }
  464. static void
  465. bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
  466. {
  467. struct list_head *qe;
  468. struct bfa_ioc_hbfail_notify_s *notify;
  469. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  470. /*
  471. * Notify driver and common modules registered for notification.
  472. */
  473. ioc->cbfn->hbfail_cbfn(ioc->bfa);
  474. list_for_each(qe, &ioc->hb_notify_q) {
  475. notify = (struct bfa_ioc_hbfail_notify_s *) qe;
  476. notify->cbfn(notify->cbarg);
  477. }
  478. BFA_LOG(KERN_CRIT, bfad, log_level,
  479. "Heart Beat of IOC has failed\n");
  480. }
  481. /*
  482. * IOC failure.
  483. */
  484. static void
  485. bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
  486. {
  487. bfa_trc(ioc, event);
  488. switch (event) {
  489. case IOC_E_FAILED:
  490. /*
  491. * Initialization failure during iocpf recovery.
  492. * !!! Fall through !!!
  493. */
  494. case IOC_E_ENABLE:
  495. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  496. break;
  497. case IOC_E_ENABLED:
  498. bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
  499. break;
  500. case IOC_E_DISABLE:
  501. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  502. break;
  503. case IOC_E_HWERROR:
  504. /*
  505. * HB failure notification, ignore.
  506. */
  507. break;
  508. default:
  509. bfa_sm_fault(ioc, event);
  510. }
  511. }
  512. /*
  513. * IOCPF State Machine
  514. */
  515. /*
  516. * Reset entry actions -- initialize state machine
  517. */
  518. static void
  519. bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
  520. {
  521. iocpf->retry_count = 0;
  522. iocpf->auto_recover = bfa_auto_recover;
  523. }
  524. /*
  525. * Beginning state. IOC is in reset state.
  526. */
  527. static void
  528. bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  529. {
  530. struct bfa_ioc_s *ioc = iocpf->ioc;
  531. bfa_trc(ioc, event);
  532. switch (event) {
  533. case IOCPF_E_ENABLE:
  534. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
  535. break;
  536. case IOCPF_E_STOP:
  537. break;
  538. default:
  539. bfa_sm_fault(ioc, event);
  540. }
  541. }
  542. /*
  543. * Semaphore should be acquired for version check.
  544. */
  545. static void
  546. bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
  547. {
  548. bfa_ioc_hw_sem_get(iocpf->ioc);
  549. }
  550. /*
  551. * Awaiting h/w semaphore to continue with version check.
  552. */
  553. static void
  554. bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  555. {
  556. struct bfa_ioc_s *ioc = iocpf->ioc;
  557. bfa_trc(ioc, event);
  558. switch (event) {
  559. case IOCPF_E_SEMLOCKED:
  560. if (bfa_ioc_firmware_lock(ioc)) {
  561. iocpf->retry_count = 0;
  562. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  563. } else {
  564. bfa_ioc_hw_sem_release(ioc);
  565. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
  566. }
  567. break;
  568. case IOCPF_E_DISABLE:
  569. bfa_ioc_hw_sem_get_cancel(ioc);
  570. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  571. bfa_ioc_pf_disabled(ioc);
  572. break;
  573. case IOCPF_E_STOP:
  574. bfa_ioc_hw_sem_get_cancel(ioc);
  575. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  576. break;
  577. default:
  578. bfa_sm_fault(ioc, event);
  579. }
  580. }
  581. /*
  582. * Notify enable completion callback.
  583. */
  584. static void
  585. bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
  586. {
  587. /*
  588. * Call only the first time sm enters fwmismatch state.
  589. */
  590. if (iocpf->retry_count == 0)
  591. bfa_ioc_pf_fwmismatch(iocpf->ioc);
  592. iocpf->retry_count++;
  593. bfa_iocpf_timer_start(iocpf->ioc);
  594. }
  595. /*
  596. * Awaiting firmware version match.
  597. */
  598. static void
  599. bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  600. {
  601. struct bfa_ioc_s *ioc = iocpf->ioc;
  602. bfa_trc(ioc, event);
  603. switch (event) {
  604. case IOCPF_E_TIMEOUT:
  605. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
  606. break;
  607. case IOCPF_E_DISABLE:
  608. bfa_iocpf_timer_stop(ioc);
  609. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  610. bfa_ioc_pf_disabled(ioc);
  611. break;
  612. case IOCPF_E_STOP:
  613. bfa_iocpf_timer_stop(ioc);
  614. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  615. break;
  616. default:
  617. bfa_sm_fault(ioc, event);
  618. }
  619. }
  620. /*
  621. * Request for semaphore.
  622. */
  623. static void
  624. bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
  625. {
  626. bfa_ioc_hw_sem_get(iocpf->ioc);
  627. }
  628. /*
  629. * Awaiting semaphore for h/w initialzation.
  630. */
  631. static void
  632. bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  633. {
  634. struct bfa_ioc_s *ioc = iocpf->ioc;
  635. bfa_trc(ioc, event);
  636. switch (event) {
  637. case IOCPF_E_SEMLOCKED:
  638. iocpf->retry_count = 0;
  639. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  640. break;
  641. case IOCPF_E_DISABLE:
  642. bfa_ioc_hw_sem_get_cancel(ioc);
  643. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  644. break;
  645. default:
  646. bfa_sm_fault(ioc, event);
  647. }
  648. }
  649. static void
  650. bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
  651. {
  652. bfa_iocpf_timer_start(iocpf->ioc);
  653. bfa_ioc_reset(iocpf->ioc, BFA_FALSE);
  654. }
  655. /*
  656. * Hardware is being initialized. Interrupts are enabled.
  657. * Holding hardware semaphore lock.
  658. */
  659. static void
  660. bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  661. {
  662. struct bfa_ioc_s *ioc = iocpf->ioc;
  663. bfa_trc(ioc, event);
  664. switch (event) {
  665. case IOCPF_E_FWREADY:
  666. bfa_iocpf_timer_stop(ioc);
  667. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
  668. break;
  669. case IOCPF_E_INITFAIL:
  670. bfa_iocpf_timer_stop(ioc);
  671. /*
  672. * !!! fall through !!!
  673. */
  674. case IOCPF_E_TIMEOUT:
  675. iocpf->retry_count++;
  676. if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
  677. bfa_iocpf_timer_start(ioc);
  678. bfa_ioc_reset(ioc, BFA_TRUE);
  679. break;
  680. }
  681. bfa_ioc_hw_sem_release(ioc);
  682. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
  683. if (event == IOCPF_E_TIMEOUT)
  684. bfa_ioc_pf_failed(ioc);
  685. break;
  686. case IOCPF_E_DISABLE:
  687. bfa_ioc_hw_sem_release(ioc);
  688. bfa_iocpf_timer_stop(ioc);
  689. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  690. break;
  691. default:
  692. bfa_sm_fault(ioc, event);
  693. }
  694. }
  695. static void
  696. bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
  697. {
  698. bfa_iocpf_timer_start(iocpf->ioc);
  699. bfa_ioc_send_enable(iocpf->ioc);
  700. }
  701. /*
  702. * Host IOC function is being enabled, awaiting response from firmware.
  703. * Semaphore is acquired.
  704. */
  705. static void
  706. bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  707. {
  708. struct bfa_ioc_s *ioc = iocpf->ioc;
  709. bfa_trc(ioc, event);
  710. switch (event) {
  711. case IOCPF_E_FWRSP_ENABLE:
  712. bfa_iocpf_timer_stop(ioc);
  713. bfa_ioc_hw_sem_release(ioc);
  714. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
  715. break;
  716. case IOCPF_E_INITFAIL:
  717. bfa_iocpf_timer_stop(ioc);
  718. /*
  719. * !!! fall through !!!
  720. */
  721. case IOCPF_E_TIMEOUT:
  722. iocpf->retry_count++;
  723. if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
  724. writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
  725. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  726. break;
  727. }
  728. bfa_ioc_hw_sem_release(ioc);
  729. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
  730. if (event == IOCPF_E_TIMEOUT)
  731. bfa_ioc_pf_failed(ioc);
  732. break;
  733. case IOCPF_E_DISABLE:
  734. bfa_iocpf_timer_stop(ioc);
  735. bfa_ioc_hw_sem_release(ioc);
  736. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
  737. break;
  738. case IOCPF_E_FWREADY:
  739. bfa_ioc_send_enable(ioc);
  740. break;
  741. default:
  742. bfa_sm_fault(ioc, event);
  743. }
  744. }
  745. static void
  746. bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
  747. {
  748. bfa_ioc_pf_enabled(iocpf->ioc);
  749. }
  750. static void
  751. bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  752. {
  753. struct bfa_ioc_s *ioc = iocpf->ioc;
  754. bfa_trc(ioc, event);
  755. switch (event) {
  756. case IOCPF_E_DISABLE:
  757. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
  758. break;
  759. case IOCPF_E_GETATTRFAIL:
  760. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
  761. break;
  762. case IOCPF_E_FAIL:
  763. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  764. break;
  765. case IOCPF_E_FWREADY:
  766. if (bfa_ioc_is_operational(ioc))
  767. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  768. else
  769. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
  770. bfa_ioc_pf_failed(ioc);
  771. break;
  772. default:
  773. bfa_sm_fault(ioc, event);
  774. }
  775. }
  776. static void
  777. bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
  778. {
  779. bfa_iocpf_timer_start(iocpf->ioc);
  780. bfa_ioc_send_disable(iocpf->ioc);
  781. }
  782. /*
  783. * IOC is being disabled
  784. */
  785. static void
  786. bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  787. {
  788. struct bfa_ioc_s *ioc = iocpf->ioc;
  789. bfa_trc(ioc, event);
  790. switch (event) {
  791. case IOCPF_E_FWRSP_DISABLE:
  792. case IOCPF_E_FWREADY:
  793. bfa_iocpf_timer_stop(ioc);
  794. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  795. break;
  796. case IOCPF_E_FAIL:
  797. bfa_iocpf_timer_stop(ioc);
  798. /*
  799. * !!! fall through !!!
  800. */
  801. case IOCPF_E_TIMEOUT:
  802. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  803. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  804. break;
  805. case IOCPF_E_FWRSP_ENABLE:
  806. break;
  807. default:
  808. bfa_sm_fault(ioc, event);
  809. }
  810. }
  811. /*
  812. * IOC disable completion entry.
  813. */
  814. static void
  815. bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
  816. {
  817. bfa_ioc_pf_disabled(iocpf->ioc);
  818. }
  819. static void
  820. bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  821. {
  822. struct bfa_ioc_s *ioc = iocpf->ioc;
  823. bfa_trc(ioc, event);
  824. switch (event) {
  825. case IOCPF_E_ENABLE:
  826. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  827. break;
  828. case IOCPF_E_STOP:
  829. bfa_ioc_firmware_unlock(ioc);
  830. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  831. break;
  832. default:
  833. bfa_sm_fault(ioc, event);
  834. }
  835. }
  836. static void
  837. bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
  838. {
  839. bfa_iocpf_timer_start(iocpf->ioc);
  840. }
  841. /*
  842. * Hardware initialization failed.
  843. */
  844. static void
  845. bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  846. {
  847. struct bfa_ioc_s *ioc = iocpf->ioc;
  848. bfa_trc(ioc, event);
  849. switch (event) {
  850. case IOCPF_E_DISABLE:
  851. bfa_iocpf_timer_stop(ioc);
  852. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  853. break;
  854. case IOCPF_E_STOP:
  855. bfa_iocpf_timer_stop(ioc);
  856. bfa_ioc_firmware_unlock(ioc);
  857. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  858. break;
  859. case IOCPF_E_TIMEOUT:
  860. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  861. break;
  862. default:
  863. bfa_sm_fault(ioc, event);
  864. }
  865. }
  866. static void
  867. bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
  868. {
  869. /*
  870. * Mark IOC as failed in hardware and stop firmware.
  871. */
  872. bfa_ioc_lpu_stop(iocpf->ioc);
  873. writel(BFI_IOC_FAIL, iocpf->ioc->ioc_regs.ioc_fwstate);
  874. /*
  875. * Notify other functions on HB failure.
  876. */
  877. bfa_ioc_notify_hbfail(iocpf->ioc);
  878. /*
  879. * Flush any queued up mailbox requests.
  880. */
  881. bfa_ioc_mbox_hbfail(iocpf->ioc);
  882. if (iocpf->auto_recover)
  883. bfa_iocpf_recovery_timer_start(iocpf->ioc);
  884. }
  885. /*
  886. * IOC is in failed state.
  887. */
  888. static void
  889. bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  890. {
  891. struct bfa_ioc_s *ioc = iocpf->ioc;
  892. bfa_trc(ioc, event);
  893. switch (event) {
  894. case IOCPF_E_DISABLE:
  895. if (iocpf->auto_recover)
  896. bfa_iocpf_timer_stop(ioc);
  897. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  898. break;
  899. case IOCPF_E_TIMEOUT:
  900. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  901. break;
  902. default:
  903. bfa_sm_fault(ioc, event);
  904. }
  905. }
  906. /*
  907. * hal_ioc_pvt BFA IOC private functions
  908. */
  909. static void
  910. bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
  911. {
  912. struct list_head *qe;
  913. struct bfa_ioc_hbfail_notify_s *notify;
  914. ioc->cbfn->disable_cbfn(ioc->bfa);
  915. /*
  916. * Notify common modules registered for notification.
  917. */
  918. list_for_each(qe, &ioc->hb_notify_q) {
  919. notify = (struct bfa_ioc_hbfail_notify_s *) qe;
  920. notify->cbfn(notify->cbarg);
  921. }
  922. }
  923. bfa_boolean_t
  924. bfa_ioc_sem_get(void __iomem *sem_reg)
  925. {
  926. u32 r32;
  927. int cnt = 0;
  928. #define BFA_SEM_SPINCNT 3000
  929. r32 = readl(sem_reg);
  930. while (r32 && (cnt < BFA_SEM_SPINCNT)) {
  931. cnt++;
  932. udelay(2);
  933. r32 = readl(sem_reg);
  934. }
  935. if (r32 == 0)
  936. return BFA_TRUE;
  937. bfa_assert(cnt < BFA_SEM_SPINCNT);
  938. return BFA_FALSE;
  939. }
  940. void
  941. bfa_ioc_sem_release(void __iomem *sem_reg)
  942. {
  943. writel(1, sem_reg);
  944. }
  945. static void
  946. bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
  947. {
  948. u32 r32;
  949. /*
  950. * First read to the semaphore register will return 0, subsequent reads
  951. * will return 1. Semaphore is released by writing 1 to the register
  952. */
  953. r32 = readl(ioc->ioc_regs.ioc_sem_reg);
  954. if (r32 == 0) {
  955. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
  956. return;
  957. }
  958. bfa_sem_timer_start(ioc);
  959. }
  960. void
  961. bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
  962. {
  963. writel(1, ioc->ioc_regs.ioc_sem_reg);
  964. }
  965. static void
  966. bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
  967. {
  968. bfa_sem_timer_stop(ioc);
  969. }
  970. /*
  971. * Initialize LPU local memory (aka secondary memory / SRAM)
  972. */
  973. static void
  974. bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
  975. {
  976. u32 pss_ctl;
  977. int i;
  978. #define PSS_LMEM_INIT_TIME 10000
  979. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  980. pss_ctl &= ~__PSS_LMEM_RESET;
  981. pss_ctl |= __PSS_LMEM_INIT_EN;
  982. /*
  983. * i2c workaround 12.5khz clock
  984. */
  985. pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
  986. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  987. /*
  988. * wait for memory initialization to be complete
  989. */
  990. i = 0;
  991. do {
  992. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  993. i++;
  994. } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
  995. /*
  996. * If memory initialization is not successful, IOC timeout will catch
  997. * such failures.
  998. */
  999. bfa_assert(pss_ctl & __PSS_LMEM_INIT_DONE);
  1000. bfa_trc(ioc, pss_ctl);
  1001. pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
  1002. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1003. }
  1004. static void
  1005. bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
  1006. {
  1007. u32 pss_ctl;
  1008. /*
  1009. * Take processor out of reset.
  1010. */
  1011. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  1012. pss_ctl &= ~__PSS_LPU0_RESET;
  1013. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1014. }
  1015. static void
  1016. bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
  1017. {
  1018. u32 pss_ctl;
  1019. /*
  1020. * Put processors in reset.
  1021. */
  1022. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  1023. pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
  1024. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1025. }
  1026. /*
  1027. * Get driver and firmware versions.
  1028. */
  1029. void
  1030. bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
  1031. {
  1032. u32 pgnum, pgoff;
  1033. u32 loff = 0;
  1034. int i;
  1035. u32 *fwsig = (u32 *) fwhdr;
  1036. pgnum = bfa_ioc_smem_pgnum(ioc, loff);
  1037. pgoff = bfa_ioc_smem_pgoff(ioc, loff);
  1038. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1039. for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
  1040. i++) {
  1041. fwsig[i] =
  1042. bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
  1043. loff += sizeof(u32);
  1044. }
  1045. }
  1046. /*
  1047. * Returns TRUE if same.
  1048. */
  1049. bfa_boolean_t
  1050. bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
  1051. {
  1052. struct bfi_ioc_image_hdr_s *drv_fwhdr;
  1053. int i;
  1054. drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
  1055. bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
  1056. for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
  1057. if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
  1058. bfa_trc(ioc, i);
  1059. bfa_trc(ioc, fwhdr->md5sum[i]);
  1060. bfa_trc(ioc, drv_fwhdr->md5sum[i]);
  1061. return BFA_FALSE;
  1062. }
  1063. }
  1064. bfa_trc(ioc, fwhdr->md5sum[0]);
  1065. return BFA_TRUE;
  1066. }
  1067. /*
  1068. * Return true if current running version is valid. Firmware signature and
  1069. * execution context (driver/bios) must match.
  1070. */
  1071. static bfa_boolean_t
  1072. bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
  1073. {
  1074. struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
  1075. /*
  1076. * If bios/efi boot (flash based) -- return true
  1077. */
  1078. if (bfa_ioc_is_bios_optrom(ioc))
  1079. return BFA_TRUE;
  1080. bfa_ioc_fwver_get(ioc, &fwhdr);
  1081. drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
  1082. bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
  1083. if (fwhdr.signature != drv_fwhdr->signature) {
  1084. bfa_trc(ioc, fwhdr.signature);
  1085. bfa_trc(ioc, drv_fwhdr->signature);
  1086. return BFA_FALSE;
  1087. }
  1088. if (swab32(fwhdr.param) != boot_env) {
  1089. bfa_trc(ioc, fwhdr.param);
  1090. bfa_trc(ioc, boot_env);
  1091. return BFA_FALSE;
  1092. }
  1093. return bfa_ioc_fwver_cmp(ioc, &fwhdr);
  1094. }
  1095. /*
  1096. * Conditionally flush any pending message from firmware at start.
  1097. */
  1098. static void
  1099. bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
  1100. {
  1101. u32 r32;
  1102. r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
  1103. if (r32)
  1104. writel(1, ioc->ioc_regs.lpu_mbox_cmd);
  1105. }
  1106. static void
  1107. bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
  1108. {
  1109. enum bfi_ioc_state ioc_fwstate;
  1110. bfa_boolean_t fwvalid;
  1111. u32 boot_type;
  1112. u32 boot_env;
  1113. ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  1114. if (force)
  1115. ioc_fwstate = BFI_IOC_UNINIT;
  1116. bfa_trc(ioc, ioc_fwstate);
  1117. boot_type = BFI_BOOT_TYPE_NORMAL;
  1118. boot_env = BFI_BOOT_LOADER_OS;
  1119. /*
  1120. * Flash based firmware boot BIOS env.
  1121. */
  1122. if (bfa_ioc_is_bios_optrom(ioc)) {
  1123. boot_type = BFI_BOOT_TYPE_FLASH;
  1124. boot_env = BFI_BOOT_LOADER_BIOS;
  1125. }
  1126. /*
  1127. * Flash based firmware boot UEFI env.
  1128. */
  1129. if (bfa_ioc_is_uefi(ioc)) {
  1130. boot_type = BFI_BOOT_TYPE_FLASH;
  1131. boot_env = BFI_BOOT_LOADER_UEFI;
  1132. }
  1133. /*
  1134. * check if firmware is valid
  1135. */
  1136. fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
  1137. BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
  1138. if (!fwvalid) {
  1139. bfa_ioc_boot(ioc, boot_type, boot_env);
  1140. return;
  1141. }
  1142. /*
  1143. * If hardware initialization is in progress (initialized by other IOC),
  1144. * just wait for an initialization completion interrupt.
  1145. */
  1146. if (ioc_fwstate == BFI_IOC_INITING) {
  1147. ioc->cbfn->reset_cbfn(ioc->bfa);
  1148. return;
  1149. }
  1150. /*
  1151. * If IOC function is disabled and firmware version is same,
  1152. * just re-enable IOC.
  1153. *
  1154. * If option rom, IOC must not be in operational state. With
  1155. * convergence, IOC will be in operational state when 2nd driver
  1156. * is loaded.
  1157. */
  1158. if (ioc_fwstate == BFI_IOC_DISABLED ||
  1159. (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
  1160. /*
  1161. * When using MSI-X any pending firmware ready event should
  1162. * be flushed. Otherwise MSI-X interrupts are not delivered.
  1163. */
  1164. bfa_ioc_msgflush(ioc);
  1165. ioc->cbfn->reset_cbfn(ioc->bfa);
  1166. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
  1167. return;
  1168. }
  1169. /*
  1170. * Initialize the h/w for any other states.
  1171. */
  1172. bfa_ioc_boot(ioc, boot_type, boot_env);
  1173. }
  1174. static void
  1175. bfa_ioc_timeout(void *ioc_arg)
  1176. {
  1177. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  1178. bfa_trc(ioc, 0);
  1179. bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
  1180. }
  1181. void
  1182. bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
  1183. {
  1184. u32 *msgp = (u32 *) ioc_msg;
  1185. u32 i;
  1186. bfa_trc(ioc, msgp[0]);
  1187. bfa_trc(ioc, len);
  1188. bfa_assert(len <= BFI_IOC_MSGLEN_MAX);
  1189. /*
  1190. * first write msg to mailbox registers
  1191. */
  1192. for (i = 0; i < len / sizeof(u32); i++)
  1193. writel(cpu_to_le32(msgp[i]),
  1194. ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
  1195. for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
  1196. writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
  1197. /*
  1198. * write 1 to mailbox CMD to trigger LPU event
  1199. */
  1200. writel(1, ioc->ioc_regs.hfn_mbox_cmd);
  1201. (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
  1202. }
  1203. static void
  1204. bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
  1205. {
  1206. struct bfi_ioc_ctrl_req_s enable_req;
  1207. struct bfa_timeval_s tv;
  1208. bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
  1209. bfa_ioc_portid(ioc));
  1210. enable_req.ioc_class = ioc->ioc_mc;
  1211. bfa_os_gettimeofday(&tv);
  1212. enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
  1213. bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
  1214. }
  1215. static void
  1216. bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
  1217. {
  1218. struct bfi_ioc_ctrl_req_s disable_req;
  1219. bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
  1220. bfa_ioc_portid(ioc));
  1221. bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
  1222. }
  1223. static void
  1224. bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
  1225. {
  1226. struct bfi_ioc_getattr_req_s attr_req;
  1227. bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
  1228. bfa_ioc_portid(ioc));
  1229. bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
  1230. bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
  1231. }
  1232. static void
  1233. bfa_ioc_hb_check(void *cbarg)
  1234. {
  1235. struct bfa_ioc_s *ioc = cbarg;
  1236. u32 hb_count;
  1237. hb_count = readl(ioc->ioc_regs.heartbeat);
  1238. if (ioc->hb_count == hb_count) {
  1239. printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count);
  1240. bfa_ioc_recover(ioc);
  1241. return;
  1242. } else {
  1243. ioc->hb_count = hb_count;
  1244. }
  1245. bfa_ioc_mbox_poll(ioc);
  1246. bfa_hb_timer_start(ioc);
  1247. }
  1248. static void
  1249. bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
  1250. {
  1251. ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
  1252. bfa_hb_timer_start(ioc);
  1253. }
  1254. static void
  1255. bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
  1256. {
  1257. bfa_hb_timer_stop(ioc);
  1258. }
  1259. /*
  1260. * Initiate a full firmware download.
  1261. */
  1262. static void
  1263. bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
  1264. u32 boot_env)
  1265. {
  1266. u32 *fwimg;
  1267. u32 pgnum, pgoff;
  1268. u32 loff = 0;
  1269. u32 chunkno = 0;
  1270. u32 i;
  1271. /*
  1272. * Initialize LMEM first before code download
  1273. */
  1274. bfa_ioc_lmem_init(ioc);
  1275. bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
  1276. fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
  1277. pgnum = bfa_ioc_smem_pgnum(ioc, loff);
  1278. pgoff = bfa_ioc_smem_pgoff(ioc, loff);
  1279. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1280. for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
  1281. if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
  1282. chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
  1283. fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
  1284. BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
  1285. }
  1286. /*
  1287. * write smem
  1288. */
  1289. bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
  1290. fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
  1291. loff += sizeof(u32);
  1292. /*
  1293. * handle page offset wrap around
  1294. */
  1295. loff = PSS_SMEM_PGOFF(loff);
  1296. if (loff == 0) {
  1297. pgnum++;
  1298. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1299. }
  1300. }
  1301. writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
  1302. /*
  1303. * Set boot type and boot param at the end.
  1304. */
  1305. bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
  1306. swab32(boot_type));
  1307. bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
  1308. swab32(boot_env));
  1309. }
  1310. static void
  1311. bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
  1312. {
  1313. bfa_ioc_hwinit(ioc, force);
  1314. }
  1315. /*
  1316. * Update BFA configuration from firmware configuration.
  1317. */
  1318. static void
  1319. bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
  1320. {
  1321. struct bfi_ioc_attr_s *attr = ioc->attr;
  1322. attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
  1323. attr->card_type = be32_to_cpu(attr->card_type);
  1324. attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
  1325. bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
  1326. }
  1327. /*
  1328. * Attach time initialization of mbox logic.
  1329. */
  1330. static void
  1331. bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
  1332. {
  1333. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1334. int mc;
  1335. INIT_LIST_HEAD(&mod->cmd_q);
  1336. for (mc = 0; mc < BFI_MC_MAX; mc++) {
  1337. mod->mbhdlr[mc].cbfn = NULL;
  1338. mod->mbhdlr[mc].cbarg = ioc->bfa;
  1339. }
  1340. }
  1341. /*
  1342. * Mbox poll timer -- restarts any pending mailbox requests.
  1343. */
  1344. static void
  1345. bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
  1346. {
  1347. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1348. struct bfa_mbox_cmd_s *cmd;
  1349. u32 stat;
  1350. /*
  1351. * If no command pending, do nothing
  1352. */
  1353. if (list_empty(&mod->cmd_q))
  1354. return;
  1355. /*
  1356. * If previous command is not yet fetched by firmware, do nothing
  1357. */
  1358. stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
  1359. if (stat)
  1360. return;
  1361. /*
  1362. * Enqueue command to firmware.
  1363. */
  1364. bfa_q_deq(&mod->cmd_q, &cmd);
  1365. bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
  1366. }
  1367. /*
  1368. * Cleanup any pending requests.
  1369. */
  1370. static void
  1371. bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
  1372. {
  1373. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1374. struct bfa_mbox_cmd_s *cmd;
  1375. while (!list_empty(&mod->cmd_q))
  1376. bfa_q_deq(&mod->cmd_q, &cmd);
  1377. }
  1378. /*
  1379. * Read data from SMEM to host through PCI memmap
  1380. *
  1381. * @param[in] ioc memory for IOC
  1382. * @param[in] tbuf app memory to store data from smem
  1383. * @param[in] soff smem offset
  1384. * @param[in] sz size of smem in bytes
  1385. */
  1386. static bfa_status_t
  1387. bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
  1388. {
  1389. u32 pgnum, loff, r32;
  1390. int i, len;
  1391. u32 *buf = tbuf;
  1392. pgnum = bfa_ioc_smem_pgnum(ioc, soff);
  1393. loff = bfa_ioc_smem_pgoff(ioc, soff);
  1394. bfa_trc(ioc, pgnum);
  1395. bfa_trc(ioc, loff);
  1396. bfa_trc(ioc, sz);
  1397. /*
  1398. * Hold semaphore to serialize pll init and fwtrc.
  1399. */
  1400. if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
  1401. bfa_trc(ioc, 0);
  1402. return BFA_STATUS_FAILED;
  1403. }
  1404. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1405. len = sz/sizeof(u32);
  1406. bfa_trc(ioc, len);
  1407. for (i = 0; i < len; i++) {
  1408. r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
  1409. buf[i] = be32_to_cpu(r32);
  1410. loff += sizeof(u32);
  1411. /*
  1412. * handle page offset wrap around
  1413. */
  1414. loff = PSS_SMEM_PGOFF(loff);
  1415. if (loff == 0) {
  1416. pgnum++;
  1417. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1418. }
  1419. }
  1420. writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
  1421. /*
  1422. * release semaphore.
  1423. */
  1424. bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
  1425. bfa_trc(ioc, pgnum);
  1426. return BFA_STATUS_OK;
  1427. }
  1428. /*
  1429. * Clear SMEM data from host through PCI memmap
  1430. *
  1431. * @param[in] ioc memory for IOC
  1432. * @param[in] soff smem offset
  1433. * @param[in] sz size of smem in bytes
  1434. */
  1435. static bfa_status_t
  1436. bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
  1437. {
  1438. int i, len;
  1439. u32 pgnum, loff;
  1440. pgnum = bfa_ioc_smem_pgnum(ioc, soff);
  1441. loff = bfa_ioc_smem_pgoff(ioc, soff);
  1442. bfa_trc(ioc, pgnum);
  1443. bfa_trc(ioc, loff);
  1444. bfa_trc(ioc, sz);
  1445. /*
  1446. * Hold semaphore to serialize pll init and fwtrc.
  1447. */
  1448. if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
  1449. bfa_trc(ioc, 0);
  1450. return BFA_STATUS_FAILED;
  1451. }
  1452. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1453. len = sz/sizeof(u32); /* len in words */
  1454. bfa_trc(ioc, len);
  1455. for (i = 0; i < len; i++) {
  1456. bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
  1457. loff += sizeof(u32);
  1458. /*
  1459. * handle page offset wrap around
  1460. */
  1461. loff = PSS_SMEM_PGOFF(loff);
  1462. if (loff == 0) {
  1463. pgnum++;
  1464. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1465. }
  1466. }
  1467. writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
  1468. /*
  1469. * release semaphore.
  1470. */
  1471. bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
  1472. bfa_trc(ioc, pgnum);
  1473. return BFA_STATUS_OK;
  1474. }
  1475. /*
  1476. * hal iocpf to ioc interface
  1477. */
  1478. static void
  1479. bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc)
  1480. {
  1481. bfa_fsm_send_event(ioc, IOC_E_ENABLED);
  1482. }
  1483. static void
  1484. bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc)
  1485. {
  1486. bfa_fsm_send_event(ioc, IOC_E_DISABLED);
  1487. }
  1488. static void
  1489. bfa_ioc_pf_failed(struct bfa_ioc_s *ioc)
  1490. {
  1491. bfa_fsm_send_event(ioc, IOC_E_FAILED);
  1492. }
  1493. static void
  1494. bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
  1495. {
  1496. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  1497. /*
  1498. * Provide enable completion callback.
  1499. */
  1500. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  1501. BFA_LOG(KERN_WARNING, bfad, log_level,
  1502. "Running firmware version is incompatible "
  1503. "with the driver version\n");
  1504. }
  1505. /*
  1506. * hal_ioc_public
  1507. */
  1508. bfa_status_t
  1509. bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
  1510. {
  1511. /*
  1512. * Hold semaphore so that nobody can access the chip during init.
  1513. */
  1514. bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
  1515. bfa_ioc_pll_init_asic(ioc);
  1516. ioc->pllinit = BFA_TRUE;
  1517. /*
  1518. * release semaphore.
  1519. */
  1520. bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
  1521. return BFA_STATUS_OK;
  1522. }
  1523. /*
  1524. * Interface used by diag module to do firmware boot with memory test
  1525. * as the entry vector.
  1526. */
  1527. void
  1528. bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
  1529. {
  1530. void __iomem *rb;
  1531. bfa_ioc_stats(ioc, ioc_boots);
  1532. if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
  1533. return;
  1534. /*
  1535. * Initialize IOC state of all functions on a chip reset.
  1536. */
  1537. rb = ioc->pcidev.pci_bar_kva;
  1538. if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
  1539. writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
  1540. writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
  1541. } else {
  1542. writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
  1543. writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
  1544. }
  1545. bfa_ioc_msgflush(ioc);
  1546. bfa_ioc_download_fw(ioc, boot_type, boot_env);
  1547. /*
  1548. * Enable interrupts just before starting LPU
  1549. */
  1550. ioc->cbfn->reset_cbfn(ioc->bfa);
  1551. bfa_ioc_lpu_start(ioc);
  1552. }
  1553. /*
  1554. * Enable/disable IOC failure auto recovery.
  1555. */
  1556. void
  1557. bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
  1558. {
  1559. bfa_auto_recover = auto_recover;
  1560. }
  1561. bfa_boolean_t
  1562. bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
  1563. {
  1564. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
  1565. }
  1566. bfa_boolean_t
  1567. bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
  1568. {
  1569. u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
  1570. return ((r32 != BFI_IOC_UNINIT) &&
  1571. (r32 != BFI_IOC_INITING) &&
  1572. (r32 != BFI_IOC_MEMTEST));
  1573. }
  1574. void
  1575. bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
  1576. {
  1577. u32 *msgp = mbmsg;
  1578. u32 r32;
  1579. int i;
  1580. /*
  1581. * read the MBOX msg
  1582. */
  1583. for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
  1584. i++) {
  1585. r32 = readl(ioc->ioc_regs.lpu_mbox +
  1586. i * sizeof(u32));
  1587. msgp[i] = cpu_to_be32(r32);
  1588. }
  1589. /*
  1590. * turn off mailbox interrupt by clearing mailbox status
  1591. */
  1592. writel(1, ioc->ioc_regs.lpu_mbox_cmd);
  1593. readl(ioc->ioc_regs.lpu_mbox_cmd);
  1594. }
  1595. void
  1596. bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
  1597. {
  1598. union bfi_ioc_i2h_msg_u *msg;
  1599. struct bfa_iocpf_s *iocpf = &ioc->iocpf;
  1600. msg = (union bfi_ioc_i2h_msg_u *) m;
  1601. bfa_ioc_stats(ioc, ioc_isrs);
  1602. switch (msg->mh.msg_id) {
  1603. case BFI_IOC_I2H_HBEAT:
  1604. break;
  1605. case BFI_IOC_I2H_READY_EVENT:
  1606. bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
  1607. break;
  1608. case BFI_IOC_I2H_ENABLE_REPLY:
  1609. bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
  1610. break;
  1611. case BFI_IOC_I2H_DISABLE_REPLY:
  1612. bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
  1613. break;
  1614. case BFI_IOC_I2H_GETATTR_REPLY:
  1615. bfa_ioc_getattr_reply(ioc);
  1616. break;
  1617. default:
  1618. bfa_trc(ioc, msg->mh.msg_id);
  1619. bfa_assert(0);
  1620. }
  1621. }
  1622. /*
  1623. * IOC attach time initialization and setup.
  1624. *
  1625. * @param[in] ioc memory for IOC
  1626. * @param[in] bfa driver instance structure
  1627. */
  1628. void
  1629. bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
  1630. struct bfa_timer_mod_s *timer_mod)
  1631. {
  1632. ioc->bfa = bfa;
  1633. ioc->cbfn = cbfn;
  1634. ioc->timer_mod = timer_mod;
  1635. ioc->fcmode = BFA_FALSE;
  1636. ioc->pllinit = BFA_FALSE;
  1637. ioc->dbg_fwsave_once = BFA_TRUE;
  1638. ioc->iocpf.ioc = ioc;
  1639. bfa_ioc_mbox_attach(ioc);
  1640. INIT_LIST_HEAD(&ioc->hb_notify_q);
  1641. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  1642. bfa_fsm_send_event(ioc, IOC_E_RESET);
  1643. }
  1644. /*
  1645. * Driver detach time IOC cleanup.
  1646. */
  1647. void
  1648. bfa_ioc_detach(struct bfa_ioc_s *ioc)
  1649. {
  1650. bfa_fsm_send_event(ioc, IOC_E_DETACH);
  1651. }
  1652. /*
  1653. * Setup IOC PCI properties.
  1654. *
  1655. * @param[in] pcidev PCI device information for this IOC
  1656. */
  1657. void
  1658. bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
  1659. enum bfi_mclass mc)
  1660. {
  1661. ioc->ioc_mc = mc;
  1662. ioc->pcidev = *pcidev;
  1663. ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
  1664. ioc->cna = ioc->ctdev && !ioc->fcmode;
  1665. /*
  1666. * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
  1667. */
  1668. if (ioc->ctdev)
  1669. bfa_ioc_set_ct_hwif(ioc);
  1670. else
  1671. bfa_ioc_set_cb_hwif(ioc);
  1672. bfa_ioc_map_port(ioc);
  1673. bfa_ioc_reg_init(ioc);
  1674. }
  1675. /*
  1676. * Initialize IOC dma memory
  1677. *
  1678. * @param[in] dm_kva kernel virtual address of IOC dma memory
  1679. * @param[in] dm_pa physical address of IOC dma memory
  1680. */
  1681. void
  1682. bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
  1683. {
  1684. /*
  1685. * dma memory for firmware attribute
  1686. */
  1687. ioc->attr_dma.kva = dm_kva;
  1688. ioc->attr_dma.pa = dm_pa;
  1689. ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
  1690. }
  1691. /*
  1692. * Return size of dma memory required.
  1693. */
  1694. u32
  1695. bfa_ioc_meminfo(void)
  1696. {
  1697. return BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
  1698. }
  1699. void
  1700. bfa_ioc_enable(struct bfa_ioc_s *ioc)
  1701. {
  1702. bfa_ioc_stats(ioc, ioc_enables);
  1703. ioc->dbg_fwsave_once = BFA_TRUE;
  1704. bfa_fsm_send_event(ioc, IOC_E_ENABLE);
  1705. }
  1706. void
  1707. bfa_ioc_disable(struct bfa_ioc_s *ioc)
  1708. {
  1709. bfa_ioc_stats(ioc, ioc_disables);
  1710. bfa_fsm_send_event(ioc, IOC_E_DISABLE);
  1711. }
  1712. /*
  1713. * Returns memory required for saving firmware trace in case of crash.
  1714. * Driver must call this interface to allocate memory required for
  1715. * automatic saving of firmware trace. Driver should call
  1716. * bfa_ioc_debug_memclaim() right after bfa_ioc_attach() to setup this
  1717. * trace memory.
  1718. */
  1719. int
  1720. bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
  1721. {
  1722. return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
  1723. }
  1724. /*
  1725. * Initialize memory for saving firmware trace. Driver must initialize
  1726. * trace memory before call bfa_ioc_enable().
  1727. */
  1728. void
  1729. bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
  1730. {
  1731. ioc->dbg_fwsave = dbg_fwsave;
  1732. ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->iocpf.auto_recover);
  1733. }
  1734. u32
  1735. bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr)
  1736. {
  1737. return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
  1738. }
  1739. u32
  1740. bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
  1741. {
  1742. return PSS_SMEM_PGOFF(fmaddr);
  1743. }
  1744. /*
  1745. * Register mailbox message handler functions
  1746. *
  1747. * @param[in] ioc IOC instance
  1748. * @param[in] mcfuncs message class handler functions
  1749. */
  1750. void
  1751. bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
  1752. {
  1753. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1754. int mc;
  1755. for (mc = 0; mc < BFI_MC_MAX; mc++)
  1756. mod->mbhdlr[mc].cbfn = mcfuncs[mc];
  1757. }
  1758. /*
  1759. * Register mailbox message handler function, to be called by common modules
  1760. */
  1761. void
  1762. bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
  1763. bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
  1764. {
  1765. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1766. mod->mbhdlr[mc].cbfn = cbfn;
  1767. mod->mbhdlr[mc].cbarg = cbarg;
  1768. }
  1769. /*
  1770. * Queue a mailbox command request to firmware. Waits if mailbox is busy.
  1771. * Responsibility of caller to serialize
  1772. *
  1773. * @param[in] ioc IOC instance
  1774. * @param[i] cmd Mailbox command
  1775. */
  1776. void
  1777. bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
  1778. {
  1779. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1780. u32 stat;
  1781. /*
  1782. * If a previous command is pending, queue new command
  1783. */
  1784. if (!list_empty(&mod->cmd_q)) {
  1785. list_add_tail(&cmd->qe, &mod->cmd_q);
  1786. return;
  1787. }
  1788. /*
  1789. * If mailbox is busy, queue command for poll timer
  1790. */
  1791. stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
  1792. if (stat) {
  1793. list_add_tail(&cmd->qe, &mod->cmd_q);
  1794. return;
  1795. }
  1796. /*
  1797. * mailbox is free -- queue command to firmware
  1798. */
  1799. bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
  1800. }
  1801. /*
  1802. * Handle mailbox interrupts
  1803. */
  1804. void
  1805. bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
  1806. {
  1807. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1808. struct bfi_mbmsg_s m;
  1809. int mc;
  1810. bfa_ioc_msgget(ioc, &m);
  1811. /*
  1812. * Treat IOC message class as special.
  1813. */
  1814. mc = m.mh.msg_class;
  1815. if (mc == BFI_MC_IOC) {
  1816. bfa_ioc_isr(ioc, &m);
  1817. return;
  1818. }
  1819. if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
  1820. return;
  1821. mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
  1822. }
  1823. void
  1824. bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
  1825. {
  1826. bfa_fsm_send_event(ioc, IOC_E_HWERROR);
  1827. }
  1828. void
  1829. bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
  1830. {
  1831. ioc->fcmode = BFA_TRUE;
  1832. ioc->port_id = bfa_ioc_pcifn(ioc);
  1833. }
  1834. /*
  1835. * return true if IOC is disabled
  1836. */
  1837. bfa_boolean_t
  1838. bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
  1839. {
  1840. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
  1841. bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
  1842. }
  1843. /*
  1844. * return true if IOC firmware is different.
  1845. */
  1846. bfa_boolean_t
  1847. bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
  1848. {
  1849. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
  1850. bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
  1851. bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
  1852. }
  1853. #define bfa_ioc_state_disabled(__sm) \
  1854. (((__sm) == BFI_IOC_UNINIT) || \
  1855. ((__sm) == BFI_IOC_INITING) || \
  1856. ((__sm) == BFI_IOC_HWINIT) || \
  1857. ((__sm) == BFI_IOC_DISABLED) || \
  1858. ((__sm) == BFI_IOC_FAIL) || \
  1859. ((__sm) == BFI_IOC_CFG_DISABLED))
  1860. /*
  1861. * Check if adapter is disabled -- both IOCs should be in a disabled
  1862. * state.
  1863. */
  1864. bfa_boolean_t
  1865. bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
  1866. {
  1867. u32 ioc_state;
  1868. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  1869. if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
  1870. return BFA_FALSE;
  1871. ioc_state = readl(rb + BFA_IOC0_STATE_REG);
  1872. if (!bfa_ioc_state_disabled(ioc_state))
  1873. return BFA_FALSE;
  1874. if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
  1875. ioc_state = readl(rb + BFA_IOC1_STATE_REG);
  1876. if (!bfa_ioc_state_disabled(ioc_state))
  1877. return BFA_FALSE;
  1878. }
  1879. return BFA_TRUE;
  1880. }
  1881. /*
  1882. * Add to IOC heartbeat failure notification queue. To be used by common
  1883. * modules such as cee, port, diag.
  1884. */
  1885. void
  1886. bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
  1887. struct bfa_ioc_hbfail_notify_s *notify)
  1888. {
  1889. list_add_tail(&notify->qe, &ioc->hb_notify_q);
  1890. }
  1891. #define BFA_MFG_NAME "Brocade"
  1892. void
  1893. bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
  1894. struct bfa_adapter_attr_s *ad_attr)
  1895. {
  1896. struct bfi_ioc_attr_s *ioc_attr;
  1897. ioc_attr = ioc->attr;
  1898. bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
  1899. bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
  1900. bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
  1901. bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
  1902. memcpy(&ad_attr->vpd, &ioc_attr->vpd,
  1903. sizeof(struct bfa_mfg_vpd_s));
  1904. ad_attr->nports = bfa_ioc_get_nports(ioc);
  1905. ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
  1906. bfa_ioc_get_adapter_model(ioc, ad_attr->model);
  1907. /* For now, model descr uses same model string */
  1908. bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
  1909. ad_attr->card_type = ioc_attr->card_type;
  1910. ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
  1911. if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
  1912. ad_attr->prototype = 1;
  1913. else
  1914. ad_attr->prototype = 0;
  1915. ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
  1916. ad_attr->mac = bfa_ioc_get_mac(ioc);
  1917. ad_attr->pcie_gen = ioc_attr->pcie_gen;
  1918. ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
  1919. ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
  1920. ad_attr->asic_rev = ioc_attr->asic_rev;
  1921. bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
  1922. ad_attr->cna_capable = ioc->cna;
  1923. ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
  1924. }
  1925. enum bfa_ioc_type_e
  1926. bfa_ioc_get_type(struct bfa_ioc_s *ioc)
  1927. {
  1928. if (!ioc->ctdev || ioc->fcmode)
  1929. return BFA_IOC_TYPE_FC;
  1930. else if (ioc->ioc_mc == BFI_MC_IOCFC)
  1931. return BFA_IOC_TYPE_FCoE;
  1932. else if (ioc->ioc_mc == BFI_MC_LL)
  1933. return BFA_IOC_TYPE_LL;
  1934. else {
  1935. bfa_assert(ioc->ioc_mc == BFI_MC_LL);
  1936. return BFA_IOC_TYPE_LL;
  1937. }
  1938. }
  1939. void
  1940. bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
  1941. {
  1942. memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
  1943. memcpy((void *)serial_num,
  1944. (void *)ioc->attr->brcd_serialnum,
  1945. BFA_ADAPTER_SERIAL_NUM_LEN);
  1946. }
  1947. void
  1948. bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
  1949. {
  1950. memset((void *)fw_ver, 0, BFA_VERSION_LEN);
  1951. memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
  1952. }
  1953. void
  1954. bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
  1955. {
  1956. bfa_assert(chip_rev);
  1957. memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
  1958. chip_rev[0] = 'R';
  1959. chip_rev[1] = 'e';
  1960. chip_rev[2] = 'v';
  1961. chip_rev[3] = '-';
  1962. chip_rev[4] = ioc->attr->asic_rev;
  1963. chip_rev[5] = '\0';
  1964. }
  1965. void
  1966. bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
  1967. {
  1968. memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
  1969. memcpy(optrom_ver, ioc->attr->optrom_version,
  1970. BFA_VERSION_LEN);
  1971. }
  1972. void
  1973. bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
  1974. {
  1975. memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
  1976. memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
  1977. }
  1978. void
  1979. bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
  1980. {
  1981. struct bfi_ioc_attr_s *ioc_attr;
  1982. bfa_assert(model);
  1983. memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
  1984. ioc_attr = ioc->attr;
  1985. /*
  1986. * model name
  1987. */
  1988. snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
  1989. BFA_MFG_NAME, ioc_attr->card_type);
  1990. }
  1991. enum bfa_ioc_state
  1992. bfa_ioc_get_state(struct bfa_ioc_s *ioc)
  1993. {
  1994. enum bfa_iocpf_state iocpf_st;
  1995. enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
  1996. if (ioc_st == BFA_IOC_ENABLING ||
  1997. ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
  1998. iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
  1999. switch (iocpf_st) {
  2000. case BFA_IOCPF_SEMWAIT:
  2001. ioc_st = BFA_IOC_SEMWAIT;
  2002. break;
  2003. case BFA_IOCPF_HWINIT:
  2004. ioc_st = BFA_IOC_HWINIT;
  2005. break;
  2006. case BFA_IOCPF_FWMISMATCH:
  2007. ioc_st = BFA_IOC_FWMISMATCH;
  2008. break;
  2009. case BFA_IOCPF_FAIL:
  2010. ioc_st = BFA_IOC_FAIL;
  2011. break;
  2012. case BFA_IOCPF_INITFAIL:
  2013. ioc_st = BFA_IOC_INITFAIL;
  2014. break;
  2015. default:
  2016. break;
  2017. }
  2018. }
  2019. return ioc_st;
  2020. }
  2021. void
  2022. bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
  2023. {
  2024. memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
  2025. ioc_attr->state = bfa_ioc_get_state(ioc);
  2026. ioc_attr->port_id = ioc->port_id;
  2027. ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
  2028. bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
  2029. ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
  2030. ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
  2031. bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
  2032. }
  2033. /*
  2034. * hal_wwn_public
  2035. */
  2036. wwn_t
  2037. bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc)
  2038. {
  2039. return ioc->attr->pwwn;
  2040. }
  2041. wwn_t
  2042. bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc)
  2043. {
  2044. return ioc->attr->nwwn;
  2045. }
  2046. u64
  2047. bfa_ioc_get_adid(struct bfa_ioc_s *ioc)
  2048. {
  2049. return ioc->attr->mfg_pwwn;
  2050. }
  2051. mac_t
  2052. bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
  2053. {
  2054. /*
  2055. * Check the IOC type and return the appropriate MAC
  2056. */
  2057. if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
  2058. return ioc->attr->fcoe_mac;
  2059. else
  2060. return ioc->attr->mac;
  2061. }
  2062. wwn_t
  2063. bfa_ioc_get_mfg_pwwn(struct bfa_ioc_s *ioc)
  2064. {
  2065. return ioc->attr->mfg_pwwn;
  2066. }
  2067. wwn_t
  2068. bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc)
  2069. {
  2070. return ioc->attr->mfg_nwwn;
  2071. }
  2072. mac_t
  2073. bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
  2074. {
  2075. mac_t m;
  2076. m = ioc->attr->mfg_mac;
  2077. if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
  2078. m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
  2079. else
  2080. bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
  2081. bfa_ioc_pcifn(ioc));
  2082. return m;
  2083. }
  2084. bfa_boolean_t
  2085. bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
  2086. {
  2087. return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
  2088. }
  2089. /*
  2090. * Retrieve saved firmware trace from a prior IOC failure.
  2091. */
  2092. bfa_status_t
  2093. bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
  2094. {
  2095. int tlen;
  2096. if (ioc->dbg_fwsave_len == 0)
  2097. return BFA_STATUS_ENOFSAVE;
  2098. tlen = *trclen;
  2099. if (tlen > ioc->dbg_fwsave_len)
  2100. tlen = ioc->dbg_fwsave_len;
  2101. memcpy(trcdata, ioc->dbg_fwsave, tlen);
  2102. *trclen = tlen;
  2103. return BFA_STATUS_OK;
  2104. }
  2105. /*
  2106. * Clear saved firmware trace
  2107. */
  2108. void
  2109. bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
  2110. {
  2111. ioc->dbg_fwsave_once = BFA_TRUE;
  2112. }
  2113. /*
  2114. * Retrieve saved firmware trace from a prior IOC failure.
  2115. */
  2116. bfa_status_t
  2117. bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
  2118. {
  2119. u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
  2120. int tlen;
  2121. bfa_status_t status;
  2122. bfa_trc(ioc, *trclen);
  2123. tlen = *trclen;
  2124. if (tlen > BFA_DBG_FWTRC_LEN)
  2125. tlen = BFA_DBG_FWTRC_LEN;
  2126. status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
  2127. *trclen = tlen;
  2128. return status;
  2129. }
  2130. static void
  2131. bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
  2132. {
  2133. struct bfa_mbox_cmd_s cmd;
  2134. struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
  2135. bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
  2136. bfa_ioc_portid(ioc));
  2137. req->ioc_class = ioc->ioc_mc;
  2138. bfa_ioc_mbox_queue(ioc, &cmd);
  2139. }
  2140. static void
  2141. bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
  2142. {
  2143. u32 fwsync_iter = 1000;
  2144. bfa_ioc_send_fwsync(ioc);
  2145. /*
  2146. * After sending a fw sync mbox command wait for it to
  2147. * take effect. We will not wait for a response because
  2148. * 1. fw_sync mbox cmd doesn't have a response.
  2149. * 2. Even if we implement that, interrupts might not
  2150. * be enabled when we call this function.
  2151. * So, just keep checking if any mbox cmd is pending, and
  2152. * after waiting for a reasonable amount of time, go ahead.
  2153. * It is possible that fw has crashed and the mbox command
  2154. * is never acknowledged.
  2155. */
  2156. while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
  2157. fwsync_iter--;
  2158. }
  2159. /*
  2160. * Dump firmware smem
  2161. */
  2162. bfa_status_t
  2163. bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
  2164. u32 *offset, int *buflen)
  2165. {
  2166. u32 loff;
  2167. int dlen;
  2168. bfa_status_t status;
  2169. u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
  2170. if (*offset >= smem_len) {
  2171. *offset = *buflen = 0;
  2172. return BFA_STATUS_EINVAL;
  2173. }
  2174. loff = *offset;
  2175. dlen = *buflen;
  2176. /*
  2177. * First smem read, sync smem before proceeding
  2178. * No need to sync before reading every chunk.
  2179. */
  2180. if (loff == 0)
  2181. bfa_ioc_fwsync(ioc);
  2182. if ((loff + dlen) >= smem_len)
  2183. dlen = smem_len - loff;
  2184. status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
  2185. if (status != BFA_STATUS_OK) {
  2186. *offset = *buflen = 0;
  2187. return status;
  2188. }
  2189. *offset += dlen;
  2190. if (*offset >= smem_len)
  2191. *offset = 0;
  2192. *buflen = dlen;
  2193. return status;
  2194. }
  2195. /*
  2196. * Firmware statistics
  2197. */
  2198. bfa_status_t
  2199. bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
  2200. {
  2201. u32 loff = BFI_IOC_FWSTATS_OFF + \
  2202. BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
  2203. int tlen;
  2204. bfa_status_t status;
  2205. if (ioc->stats_busy) {
  2206. bfa_trc(ioc, ioc->stats_busy);
  2207. return BFA_STATUS_DEVBUSY;
  2208. }
  2209. ioc->stats_busy = BFA_TRUE;
  2210. tlen = sizeof(struct bfa_fw_stats_s);
  2211. status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
  2212. ioc->stats_busy = BFA_FALSE;
  2213. return status;
  2214. }
  2215. bfa_status_t
  2216. bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
  2217. {
  2218. u32 loff = BFI_IOC_FWSTATS_OFF + \
  2219. BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
  2220. int tlen;
  2221. bfa_status_t status;
  2222. if (ioc->stats_busy) {
  2223. bfa_trc(ioc, ioc->stats_busy);
  2224. return BFA_STATUS_DEVBUSY;
  2225. }
  2226. ioc->stats_busy = BFA_TRUE;
  2227. tlen = sizeof(struct bfa_fw_stats_s);
  2228. status = bfa_ioc_smem_clr(ioc, loff, tlen);
  2229. ioc->stats_busy = BFA_FALSE;
  2230. return status;
  2231. }
  2232. /*
  2233. * Save firmware trace if configured.
  2234. */
  2235. static void
  2236. bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
  2237. {
  2238. int tlen;
  2239. if (ioc->dbg_fwsave_len) {
  2240. tlen = ioc->dbg_fwsave_len;
  2241. bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
  2242. }
  2243. }
  2244. /*
  2245. * Firmware failure detected. Start recovery actions.
  2246. */
  2247. static void
  2248. bfa_ioc_recover(struct bfa_ioc_s *ioc)
  2249. {
  2250. if (ioc->dbg_fwsave_once) {
  2251. ioc->dbg_fwsave_once = BFA_FALSE;
  2252. bfa_ioc_debug_save(ioc);
  2253. }
  2254. bfa_ioc_stats(ioc, ioc_hbfails);
  2255. bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
  2256. }
  2257. static void
  2258. bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
  2259. {
  2260. if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
  2261. return;
  2262. }
  2263. /*
  2264. * hal_iocpf_pvt BFA IOC PF private functions
  2265. */
  2266. static void
  2267. bfa_iocpf_enable(struct bfa_ioc_s *ioc)
  2268. {
  2269. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
  2270. }
  2271. static void
  2272. bfa_iocpf_disable(struct bfa_ioc_s *ioc)
  2273. {
  2274. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
  2275. }
  2276. static void
  2277. bfa_iocpf_fail(struct bfa_ioc_s *ioc)
  2278. {
  2279. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
  2280. }
  2281. static void
  2282. bfa_iocpf_initfail(struct bfa_ioc_s *ioc)
  2283. {
  2284. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
  2285. }
  2286. static void
  2287. bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc)
  2288. {
  2289. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
  2290. }
  2291. static void
  2292. bfa_iocpf_stop(struct bfa_ioc_s *ioc)
  2293. {
  2294. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
  2295. }
  2296. static void
  2297. bfa_iocpf_timeout(void *ioc_arg)
  2298. {
  2299. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  2300. bfa_trc(ioc, 0);
  2301. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
  2302. }
  2303. static void
  2304. bfa_iocpf_sem_timeout(void *ioc_arg)
  2305. {
  2306. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  2307. bfa_ioc_hw_sem_get(ioc);
  2308. }
  2309. /*
  2310. * bfa timer function
  2311. */
  2312. void
  2313. bfa_timer_init(struct bfa_timer_mod_s *mod)
  2314. {
  2315. INIT_LIST_HEAD(&mod->timer_q);
  2316. }
  2317. void
  2318. bfa_timer_beat(struct bfa_timer_mod_s *mod)
  2319. {
  2320. struct list_head *qh = &mod->timer_q;
  2321. struct list_head *qe, *qe_next;
  2322. struct bfa_timer_s *elem;
  2323. struct list_head timedout_q;
  2324. INIT_LIST_HEAD(&timedout_q);
  2325. qe = bfa_q_next(qh);
  2326. while (qe != qh) {
  2327. qe_next = bfa_q_next(qe);
  2328. elem = (struct bfa_timer_s *) qe;
  2329. if (elem->timeout <= BFA_TIMER_FREQ) {
  2330. elem->timeout = 0;
  2331. list_del(&elem->qe);
  2332. list_add_tail(&elem->qe, &timedout_q);
  2333. } else {
  2334. elem->timeout -= BFA_TIMER_FREQ;
  2335. }
  2336. qe = qe_next; /* go to next elem */
  2337. }
  2338. /*
  2339. * Pop all the timeout entries
  2340. */
  2341. while (!list_empty(&timedout_q)) {
  2342. bfa_q_deq(&timedout_q, &elem);
  2343. elem->timercb(elem->arg);
  2344. }
  2345. }
  2346. /*
  2347. * Should be called with lock protection
  2348. */
  2349. void
  2350. bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
  2351. void (*timercb) (void *), void *arg, unsigned int timeout)
  2352. {
  2353. bfa_assert(timercb != NULL);
  2354. bfa_assert(!bfa_q_is_on_q(&mod->timer_q, timer));
  2355. timer->timeout = timeout;
  2356. timer->timercb = timercb;
  2357. timer->arg = arg;
  2358. list_add_tail(&timer->qe, &mod->timer_q);
  2359. }
  2360. /*
  2361. * Should be called with lock protection
  2362. */
  2363. void
  2364. bfa_timer_stop(struct bfa_timer_s *timer)
  2365. {
  2366. bfa_assert(!list_empty(&timer->qe));
  2367. list_del(&timer->qe);
  2368. }