bfa_ioc.c 61 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include "bfad_drv.h"
  18. #include "bfa_ioc.h"
  19. #include "bfi_reg.h"
  20. #include "bfa_defs.h"
  21. #include "bfa_defs_svc.h"
  22. BFA_TRC_FILE(CNA, IOC);
  23. /*
  24. * IOC local definitions
  25. */
  26. #define BFA_IOC_TOV 3000 /* msecs */
  27. #define BFA_IOC_HWSEM_TOV 500 /* msecs */
  28. #define BFA_IOC_HB_TOV 500 /* msecs */
  29. #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
  30. #define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
  31. #define bfa_ioc_timer_start(__ioc) \
  32. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
  33. bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
  34. #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
  35. #define bfa_hb_timer_start(__ioc) \
  36. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
  37. bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
  38. #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
  39. #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
  40. /*
  41. * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
  42. */
  43. #define bfa_ioc_firmware_lock(__ioc) \
  44. ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
  45. #define bfa_ioc_firmware_unlock(__ioc) \
  46. ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
  47. #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
  48. #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
  49. #define bfa_ioc_notify_fail(__ioc) \
  50. ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
  51. #define bfa_ioc_sync_start(__ioc) \
  52. ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
  53. #define bfa_ioc_sync_join(__ioc) \
  54. ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
  55. #define bfa_ioc_sync_leave(__ioc) \
  56. ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
  57. #define bfa_ioc_sync_ack(__ioc) \
  58. ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
  59. #define bfa_ioc_sync_complete(__ioc) \
  60. ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
  61. #define bfa_ioc_mbox_cmd_pending(__ioc) \
  62. (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
  63. readl((__ioc)->ioc_regs.hfn_mbox_cmd))
  64. bfa_boolean_t bfa_auto_recover = BFA_TRUE;
  65. /*
  66. * forward declarations
  67. */
  68. static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
  69. static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
  70. static void bfa_ioc_timeout(void *ioc);
  71. static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
  72. static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
  73. static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
  74. static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
  75. static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
  76. static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
  77. static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
  78. static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
  79. static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
  80. static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
  81. enum bfa_ioc_event_e event);
  82. static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
  83. static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
  84. static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
  85. static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
  86. static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
  87. /*
  88. * IOC state machine definitions/declarations
  89. */
  90. enum ioc_event {
  91. IOC_E_RESET = 1, /* IOC reset request */
  92. IOC_E_ENABLE = 2, /* IOC enable request */
  93. IOC_E_DISABLE = 3, /* IOC disable request */
  94. IOC_E_DETACH = 4, /* driver detach cleanup */
  95. IOC_E_ENABLED = 5, /* f/w enabled */
  96. IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
  97. IOC_E_DISABLED = 7, /* f/w disabled */
  98. IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
  99. IOC_E_HBFAIL = 9, /* heartbeat failure */
  100. IOC_E_HWERROR = 10, /* hardware error interrupt */
  101. IOC_E_TIMEOUT = 11, /* timeout */
  102. };
  103. bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
  104. bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
  105. bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
  106. bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
  107. bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
  108. bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
  109. bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
  110. bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
  111. bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
  112. static struct bfa_sm_table_s ioc_sm_table[] = {
  113. {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
  114. {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
  115. {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
  116. {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
  117. {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
  118. {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
  119. {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
  120. {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
  121. {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
  122. };
  123. /*
  124. * IOCPF state machine definitions/declarations
  125. */
  126. #define bfa_iocpf_timer_start(__ioc) \
  127. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
  128. bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
  129. #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
  130. #define bfa_iocpf_poll_timer_start(__ioc) \
  131. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
  132. bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
  133. #define bfa_sem_timer_start(__ioc) \
  134. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
  135. bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
  136. #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
  137. /*
  138. * Forward declareations for iocpf state machine
  139. */
  140. static void bfa_iocpf_timeout(void *ioc_arg);
  141. static void bfa_iocpf_sem_timeout(void *ioc_arg);
  142. static void bfa_iocpf_poll_timeout(void *ioc_arg);
  143. /*
  144. * IOCPF state machine events
  145. */
  146. enum iocpf_event {
  147. IOCPF_E_ENABLE = 1, /* IOCPF enable request */
  148. IOCPF_E_DISABLE = 2, /* IOCPF disable request */
  149. IOCPF_E_STOP = 3, /* stop on driver detach */
  150. IOCPF_E_FWREADY = 4, /* f/w initialization done */
  151. IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
  152. IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
  153. IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
  154. IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
  155. IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
  156. IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
  157. IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
  158. };
  159. /*
  160. * IOCPF states
  161. */
  162. enum bfa_iocpf_state {
  163. BFA_IOCPF_RESET = 1, /* IOC is in reset state */
  164. BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
  165. BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
  166. BFA_IOCPF_READY = 4, /* IOCPF is initialized */
  167. BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
  168. BFA_IOCPF_FAIL = 6, /* IOCPF failed */
  169. BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
  170. BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
  171. BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
  172. };
  173. bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
  174. bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
  175. bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
  176. bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
  177. bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
  178. bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
  179. bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
  180. bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
  181. enum iocpf_event);
  182. bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
  183. bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
  184. bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
  185. bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
  186. bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
  187. enum iocpf_event);
  188. bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
  189. static struct bfa_sm_table_s iocpf_sm_table[] = {
  190. {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
  191. {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
  192. {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
  193. {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
  194. {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
  195. {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
  196. {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
  197. {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
  198. {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
  199. {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
  200. {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
  201. {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
  202. {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
  203. {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
  204. };
  205. /*
  206. * IOC State Machine
  207. */
  208. /*
  209. * Beginning state. IOC uninit state.
  210. */
  211. static void
  212. bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
  213. {
  214. }
  215. /*
  216. * IOC is in uninit state.
  217. */
  218. static void
  219. bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
  220. {
  221. bfa_trc(ioc, event);
  222. switch (event) {
  223. case IOC_E_RESET:
  224. bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
  225. break;
  226. default:
  227. bfa_sm_fault(ioc, event);
  228. }
  229. }
  230. /*
  231. * Reset entry actions -- initialize state machine
  232. */
  233. static void
  234. bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
  235. {
  236. bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
  237. }
  238. /*
  239. * IOC is in reset state.
  240. */
  241. static void
  242. bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
  243. {
  244. bfa_trc(ioc, event);
  245. switch (event) {
  246. case IOC_E_ENABLE:
  247. bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
  248. break;
  249. case IOC_E_DISABLE:
  250. bfa_ioc_disable_comp(ioc);
  251. break;
  252. case IOC_E_DETACH:
  253. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  254. break;
  255. default:
  256. bfa_sm_fault(ioc, event);
  257. }
  258. }
  259. static void
  260. bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
  261. {
  262. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
  263. }
  264. /*
  265. * Host IOC function is being enabled, awaiting response from firmware.
  266. * Semaphore is acquired.
  267. */
  268. static void
  269. bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
  270. {
  271. bfa_trc(ioc, event);
  272. switch (event) {
  273. case IOC_E_ENABLED:
  274. bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
  275. break;
  276. case IOC_E_PFFAILED:
  277. /* !!! fall through !!! */
  278. case IOC_E_HWERROR:
  279. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  280. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  281. if (event != IOC_E_PFFAILED)
  282. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
  283. break;
  284. case IOC_E_DISABLE:
  285. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  286. break;
  287. case IOC_E_DETACH:
  288. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  289. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
  290. break;
  291. case IOC_E_ENABLE:
  292. break;
  293. default:
  294. bfa_sm_fault(ioc, event);
  295. }
  296. }
  297. static void
  298. bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
  299. {
  300. bfa_ioc_timer_start(ioc);
  301. bfa_ioc_send_getattr(ioc);
  302. }
  303. /*
  304. * IOC configuration in progress. Timer is active.
  305. */
  306. static void
  307. bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
  308. {
  309. bfa_trc(ioc, event);
  310. switch (event) {
  311. case IOC_E_FWRSP_GETATTR:
  312. bfa_ioc_timer_stop(ioc);
  313. bfa_ioc_check_attr_wwns(ioc);
  314. bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
  315. break;
  316. break;
  317. case IOC_E_PFFAILED:
  318. case IOC_E_HWERROR:
  319. bfa_ioc_timer_stop(ioc);
  320. /* !!! fall through !!! */
  321. case IOC_E_TIMEOUT:
  322. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  323. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  324. if (event != IOC_E_PFFAILED)
  325. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
  326. break;
  327. case IOC_E_DISABLE:
  328. bfa_ioc_timer_stop(ioc);
  329. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  330. break;
  331. case IOC_E_ENABLE:
  332. break;
  333. default:
  334. bfa_sm_fault(ioc, event);
  335. }
  336. }
  337. static void
  338. bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
  339. {
  340. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  341. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
  342. bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
  343. bfa_ioc_hb_monitor(ioc);
  344. BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
  345. }
  346. static void
  347. bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
  348. {
  349. bfa_trc(ioc, event);
  350. switch (event) {
  351. case IOC_E_ENABLE:
  352. break;
  353. case IOC_E_DISABLE:
  354. bfa_hb_timer_stop(ioc);
  355. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  356. break;
  357. case IOC_E_PFFAILED:
  358. case IOC_E_HWERROR:
  359. bfa_hb_timer_stop(ioc);
  360. /* !!! fall through !!! */
  361. case IOC_E_HBFAIL:
  362. if (ioc->iocpf.auto_recover)
  363. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
  364. else
  365. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  366. bfa_ioc_fail_notify(ioc);
  367. if (event != IOC_E_PFFAILED)
  368. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
  369. break;
  370. default:
  371. bfa_sm_fault(ioc, event);
  372. }
  373. }
  374. static void
  375. bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
  376. {
  377. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  378. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
  379. BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
  380. }
  381. /*
  382. * IOC is being disabled
  383. */
  384. static void
  385. bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
  386. {
  387. bfa_trc(ioc, event);
  388. switch (event) {
  389. case IOC_E_DISABLED:
  390. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
  391. break;
  392. case IOC_E_HWERROR:
  393. /*
  394. * No state change. Will move to disabled state
  395. * after iocpf sm completes failure processing and
  396. * moves to disabled state.
  397. */
  398. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
  399. break;
  400. default:
  401. bfa_sm_fault(ioc, event);
  402. }
  403. }
  404. /*
  405. * IOC disable completion entry.
  406. */
  407. static void
  408. bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
  409. {
  410. bfa_ioc_disable_comp(ioc);
  411. }
  412. static void
  413. bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
  414. {
  415. bfa_trc(ioc, event);
  416. switch (event) {
  417. case IOC_E_ENABLE:
  418. bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
  419. break;
  420. case IOC_E_DISABLE:
  421. ioc->cbfn->disable_cbfn(ioc->bfa);
  422. break;
  423. case IOC_E_DETACH:
  424. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  425. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
  426. break;
  427. default:
  428. bfa_sm_fault(ioc, event);
  429. }
  430. }
  431. static void
  432. bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
  433. {
  434. bfa_trc(ioc, 0);
  435. }
  436. /*
  437. * Hardware initialization retry.
  438. */
  439. static void
  440. bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
  441. {
  442. bfa_trc(ioc, event);
  443. switch (event) {
  444. case IOC_E_ENABLED:
  445. bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
  446. break;
  447. case IOC_E_PFFAILED:
  448. case IOC_E_HWERROR:
  449. /*
  450. * Initialization retry failed.
  451. */
  452. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  453. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  454. if (event != IOC_E_PFFAILED)
  455. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
  456. break;
  457. case IOC_E_ENABLE:
  458. break;
  459. case IOC_E_DISABLE:
  460. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  461. break;
  462. case IOC_E_DETACH:
  463. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  464. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
  465. break;
  466. default:
  467. bfa_sm_fault(ioc, event);
  468. }
  469. }
  470. static void
  471. bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
  472. {
  473. bfa_trc(ioc, 0);
  474. }
  475. /*
  476. * IOC failure.
  477. */
  478. static void
  479. bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
  480. {
  481. bfa_trc(ioc, event);
  482. switch (event) {
  483. case IOC_E_ENABLE:
  484. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  485. break;
  486. case IOC_E_DISABLE:
  487. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  488. break;
  489. case IOC_E_DETACH:
  490. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  491. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
  492. break;
  493. case IOC_E_HWERROR:
  494. /*
  495. * HB failure notification, ignore.
  496. */
  497. break;
  498. default:
  499. bfa_sm_fault(ioc, event);
  500. }
  501. }
  502. /*
  503. * IOCPF State Machine
  504. */
  505. /*
  506. * Reset entry actions -- initialize state machine
  507. */
  508. static void
  509. bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
  510. {
  511. iocpf->fw_mismatch_notified = BFA_FALSE;
  512. iocpf->auto_recover = bfa_auto_recover;
  513. }
  514. /*
  515. * Beginning state. IOC is in reset state.
  516. */
  517. static void
  518. bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  519. {
  520. struct bfa_ioc_s *ioc = iocpf->ioc;
  521. bfa_trc(ioc, event);
  522. switch (event) {
  523. case IOCPF_E_ENABLE:
  524. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
  525. break;
  526. case IOCPF_E_STOP:
  527. break;
  528. default:
  529. bfa_sm_fault(ioc, event);
  530. }
  531. }
  532. /*
  533. * Semaphore should be acquired for version check.
  534. */
  535. static void
  536. bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
  537. {
  538. bfa_ioc_hw_sem_get(iocpf->ioc);
  539. }
  540. /*
  541. * Awaiting h/w semaphore to continue with version check.
  542. */
  543. static void
  544. bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  545. {
  546. struct bfa_ioc_s *ioc = iocpf->ioc;
  547. bfa_trc(ioc, event);
  548. switch (event) {
  549. case IOCPF_E_SEMLOCKED:
  550. if (bfa_ioc_firmware_lock(ioc)) {
  551. if (bfa_ioc_sync_start(ioc)) {
  552. bfa_ioc_sync_join(ioc);
  553. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  554. } else {
  555. bfa_ioc_firmware_unlock(ioc);
  556. writel(1, ioc->ioc_regs.ioc_sem_reg);
  557. bfa_sem_timer_start(ioc);
  558. }
  559. } else {
  560. writel(1, ioc->ioc_regs.ioc_sem_reg);
  561. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
  562. }
  563. break;
  564. case IOCPF_E_DISABLE:
  565. bfa_sem_timer_stop(ioc);
  566. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  567. bfa_fsm_send_event(ioc, IOC_E_DISABLED);
  568. break;
  569. case IOCPF_E_STOP:
  570. bfa_sem_timer_stop(ioc);
  571. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  572. break;
  573. default:
  574. bfa_sm_fault(ioc, event);
  575. }
  576. }
  577. /*
  578. * Notify enable completion callback.
  579. */
  580. static void
  581. bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
  582. {
  583. /*
  584. * Call only the first time sm enters fwmismatch state.
  585. */
  586. if (iocpf->fw_mismatch_notified == BFA_FALSE)
  587. bfa_ioc_pf_fwmismatch(iocpf->ioc);
  588. iocpf->fw_mismatch_notified = BFA_TRUE;
  589. bfa_iocpf_timer_start(iocpf->ioc);
  590. }
  591. /*
  592. * Awaiting firmware version match.
  593. */
  594. static void
  595. bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  596. {
  597. struct bfa_ioc_s *ioc = iocpf->ioc;
  598. bfa_trc(ioc, event);
  599. switch (event) {
  600. case IOCPF_E_TIMEOUT:
  601. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
  602. break;
  603. case IOCPF_E_DISABLE:
  604. bfa_iocpf_timer_stop(ioc);
  605. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  606. bfa_fsm_send_event(ioc, IOC_E_DISABLED);
  607. break;
  608. case IOCPF_E_STOP:
  609. bfa_iocpf_timer_stop(ioc);
  610. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  611. break;
  612. default:
  613. bfa_sm_fault(ioc, event);
  614. }
  615. }
  616. /*
  617. * Request for semaphore.
  618. */
  619. static void
  620. bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
  621. {
  622. bfa_ioc_hw_sem_get(iocpf->ioc);
  623. }
  624. /*
  625. * Awaiting semaphore for h/w initialzation.
  626. */
  627. static void
  628. bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  629. {
  630. struct bfa_ioc_s *ioc = iocpf->ioc;
  631. bfa_trc(ioc, event);
  632. switch (event) {
  633. case IOCPF_E_SEMLOCKED:
  634. if (bfa_ioc_sync_complete(ioc)) {
  635. bfa_ioc_sync_join(ioc);
  636. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  637. } else {
  638. writel(1, ioc->ioc_regs.ioc_sem_reg);
  639. bfa_sem_timer_start(ioc);
  640. }
  641. break;
  642. case IOCPF_E_DISABLE:
  643. bfa_sem_timer_stop(ioc);
  644. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  645. break;
  646. default:
  647. bfa_sm_fault(ioc, event);
  648. }
  649. }
  650. static void
  651. bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
  652. {
  653. iocpf->poll_time = 0;
  654. bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
  655. }
  656. /*
  657. * Hardware is being initialized. Interrupts are enabled.
  658. * Holding hardware semaphore lock.
  659. */
  660. static void
  661. bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  662. {
  663. struct bfa_ioc_s *ioc = iocpf->ioc;
  664. bfa_trc(ioc, event);
  665. switch (event) {
  666. case IOCPF_E_FWREADY:
  667. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
  668. break;
  669. case IOCPF_E_TIMEOUT:
  670. writel(1, ioc->ioc_regs.ioc_sem_reg);
  671. bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
  672. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
  673. break;
  674. case IOCPF_E_DISABLE:
  675. bfa_iocpf_timer_stop(ioc);
  676. bfa_ioc_sync_leave(ioc);
  677. writel(1, ioc->ioc_regs.ioc_sem_reg);
  678. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  679. break;
  680. default:
  681. bfa_sm_fault(ioc, event);
  682. }
  683. }
  684. static void
  685. bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
  686. {
  687. bfa_iocpf_timer_start(iocpf->ioc);
  688. /*
  689. * Enable Interrupts before sending fw IOC ENABLE cmd.
  690. */
  691. iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
  692. bfa_ioc_send_enable(iocpf->ioc);
  693. }
  694. /*
  695. * Host IOC function is being enabled, awaiting response from firmware.
  696. * Semaphore is acquired.
  697. */
  698. static void
  699. bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  700. {
  701. struct bfa_ioc_s *ioc = iocpf->ioc;
  702. bfa_trc(ioc, event);
  703. switch (event) {
  704. case IOCPF_E_FWRSP_ENABLE:
  705. bfa_iocpf_timer_stop(ioc);
  706. writel(1, ioc->ioc_regs.ioc_sem_reg);
  707. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
  708. break;
  709. case IOCPF_E_INITFAIL:
  710. bfa_iocpf_timer_stop(ioc);
  711. /*
  712. * !!! fall through !!!
  713. */
  714. case IOCPF_E_TIMEOUT:
  715. writel(1, ioc->ioc_regs.ioc_sem_reg);
  716. if (event == IOCPF_E_TIMEOUT)
  717. bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
  718. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
  719. break;
  720. case IOCPF_E_DISABLE:
  721. bfa_iocpf_timer_stop(ioc);
  722. writel(1, ioc->ioc_regs.ioc_sem_reg);
  723. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
  724. break;
  725. default:
  726. bfa_sm_fault(ioc, event);
  727. }
  728. }
  729. static void
  730. bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
  731. {
  732. bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
  733. }
  734. static void
  735. bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  736. {
  737. struct bfa_ioc_s *ioc = iocpf->ioc;
  738. bfa_trc(ioc, event);
  739. switch (event) {
  740. case IOCPF_E_DISABLE:
  741. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
  742. break;
  743. case IOCPF_E_GETATTRFAIL:
  744. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
  745. break;
  746. case IOCPF_E_FAIL:
  747. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
  748. break;
  749. default:
  750. bfa_sm_fault(ioc, event);
  751. }
  752. }
  753. static void
  754. bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
  755. {
  756. bfa_iocpf_timer_start(iocpf->ioc);
  757. bfa_ioc_send_disable(iocpf->ioc);
  758. }
  759. /*
  760. * IOC is being disabled
  761. */
  762. static void
  763. bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  764. {
  765. struct bfa_ioc_s *ioc = iocpf->ioc;
  766. bfa_trc(ioc, event);
  767. switch (event) {
  768. case IOCPF_E_FWRSP_DISABLE:
  769. bfa_iocpf_timer_stop(ioc);
  770. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  771. break;
  772. case IOCPF_E_FAIL:
  773. bfa_iocpf_timer_stop(ioc);
  774. /*
  775. * !!! fall through !!!
  776. */
  777. case IOCPF_E_TIMEOUT:
  778. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  779. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  780. break;
  781. case IOCPF_E_FWRSP_ENABLE:
  782. break;
  783. default:
  784. bfa_sm_fault(ioc, event);
  785. }
  786. }
  787. static void
  788. bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
  789. {
  790. bfa_ioc_hw_sem_get(iocpf->ioc);
  791. }
  792. /*
  793. * IOC hb ack request is being removed.
  794. */
  795. static void
  796. bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  797. {
  798. struct bfa_ioc_s *ioc = iocpf->ioc;
  799. bfa_trc(ioc, event);
  800. switch (event) {
  801. case IOCPF_E_SEMLOCKED:
  802. bfa_ioc_sync_leave(ioc);
  803. writel(1, ioc->ioc_regs.ioc_sem_reg);
  804. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  805. break;
  806. case IOCPF_E_FAIL:
  807. break;
  808. default:
  809. bfa_sm_fault(ioc, event);
  810. }
  811. }
  812. /*
  813. * IOC disable completion entry.
  814. */
  815. static void
  816. bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
  817. {
  818. bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
  819. }
  820. static void
  821. bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  822. {
  823. struct bfa_ioc_s *ioc = iocpf->ioc;
  824. bfa_trc(ioc, event);
  825. switch (event) {
  826. case IOCPF_E_ENABLE:
  827. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  828. break;
  829. case IOCPF_E_STOP:
  830. bfa_ioc_firmware_unlock(ioc);
  831. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  832. break;
  833. default:
  834. bfa_sm_fault(ioc, event);
  835. }
  836. }
  837. static void
  838. bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
  839. {
  840. bfa_ioc_hw_sem_get(iocpf->ioc);
  841. }
  842. /*
  843. * Hardware initialization failed.
  844. */
  845. static void
  846. bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  847. {
  848. struct bfa_ioc_s *ioc = iocpf->ioc;
  849. bfa_trc(ioc, event);
  850. switch (event) {
  851. case IOCPF_E_SEMLOCKED:
  852. bfa_ioc_notify_fail(ioc);
  853. bfa_ioc_sync_leave(ioc);
  854. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  855. writel(1, ioc->ioc_regs.ioc_sem_reg);
  856. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
  857. break;
  858. case IOCPF_E_DISABLE:
  859. bfa_sem_timer_stop(ioc);
  860. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  861. break;
  862. case IOCPF_E_STOP:
  863. bfa_sem_timer_stop(ioc);
  864. bfa_ioc_firmware_unlock(ioc);
  865. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  866. break;
  867. case IOCPF_E_FAIL:
  868. break;
  869. default:
  870. bfa_sm_fault(ioc, event);
  871. }
  872. }
  873. static void
  874. bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
  875. {
  876. }
  877. /*
  878. * Hardware initialization failed.
  879. */
  880. static void
  881. bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  882. {
  883. struct bfa_ioc_s *ioc = iocpf->ioc;
  884. bfa_trc(ioc, event);
  885. switch (event) {
  886. case IOCPF_E_DISABLE:
  887. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  888. break;
  889. case IOCPF_E_STOP:
  890. bfa_ioc_firmware_unlock(ioc);
  891. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  892. break;
  893. default:
  894. bfa_sm_fault(ioc, event);
  895. }
  896. }
  897. static void
  898. bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
  899. {
  900. /*
  901. * Mark IOC as failed in hardware and stop firmware.
  902. */
  903. bfa_ioc_lpu_stop(iocpf->ioc);
  904. /*
  905. * Flush any queued up mailbox requests.
  906. */
  907. bfa_ioc_mbox_hbfail(iocpf->ioc);
  908. bfa_ioc_hw_sem_get(iocpf->ioc);
  909. }
  910. static void
  911. bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  912. {
  913. struct bfa_ioc_s *ioc = iocpf->ioc;
  914. bfa_trc(ioc, event);
  915. switch (event) {
  916. case IOCPF_E_SEMLOCKED:
  917. bfa_ioc_sync_ack(ioc);
  918. bfa_ioc_notify_fail(ioc);
  919. if (!iocpf->auto_recover) {
  920. bfa_ioc_sync_leave(ioc);
  921. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  922. writel(1, ioc->ioc_regs.ioc_sem_reg);
  923. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  924. } else {
  925. if (bfa_ioc_sync_complete(ioc))
  926. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  927. else {
  928. writel(1, ioc->ioc_regs.ioc_sem_reg);
  929. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  930. }
  931. }
  932. break;
  933. case IOCPF_E_DISABLE:
  934. bfa_sem_timer_stop(ioc);
  935. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  936. break;
  937. case IOCPF_E_FAIL:
  938. break;
  939. default:
  940. bfa_sm_fault(ioc, event);
  941. }
  942. }
  943. static void
  944. bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
  945. {
  946. }
  947. /*
  948. * IOC is in failed state.
  949. */
  950. static void
  951. bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  952. {
  953. struct bfa_ioc_s *ioc = iocpf->ioc;
  954. bfa_trc(ioc, event);
  955. switch (event) {
  956. case IOCPF_E_DISABLE:
  957. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  958. break;
  959. default:
  960. bfa_sm_fault(ioc, event);
  961. }
  962. }
  963. /*
  964. * BFA IOC private functions
  965. */
  966. /*
  967. * Notify common modules registered for notification.
  968. */
  969. static void
  970. bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
  971. {
  972. struct bfa_ioc_notify_s *notify;
  973. struct list_head *qe;
  974. list_for_each(qe, &ioc->notify_q) {
  975. notify = (struct bfa_ioc_notify_s *)qe;
  976. notify->cbfn(notify->cbarg, event);
  977. }
  978. }
  979. static void
  980. bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
  981. {
  982. ioc->cbfn->disable_cbfn(ioc->bfa);
  983. bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
  984. }
  985. bfa_boolean_t
  986. bfa_ioc_sem_get(void __iomem *sem_reg)
  987. {
  988. u32 r32;
  989. int cnt = 0;
  990. #define BFA_SEM_SPINCNT 3000
  991. r32 = readl(sem_reg);
  992. while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
  993. cnt++;
  994. udelay(2);
  995. r32 = readl(sem_reg);
  996. }
  997. if (!(r32 & 1))
  998. return BFA_TRUE;
  999. WARN_ON(cnt >= BFA_SEM_SPINCNT);
  1000. return BFA_FALSE;
  1001. }
  1002. static void
  1003. bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
  1004. {
  1005. u32 r32;
  1006. /*
  1007. * First read to the semaphore register will return 0, subsequent reads
  1008. * will return 1. Semaphore is released by writing 1 to the register
  1009. */
  1010. r32 = readl(ioc->ioc_regs.ioc_sem_reg);
  1011. if (!(r32 & 1)) {
  1012. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
  1013. return;
  1014. }
  1015. bfa_sem_timer_start(ioc);
  1016. }
  1017. /*
  1018. * Initialize LPU local memory (aka secondary memory / SRAM)
  1019. */
  1020. static void
  1021. bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
  1022. {
  1023. u32 pss_ctl;
  1024. int i;
  1025. #define PSS_LMEM_INIT_TIME 10000
  1026. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  1027. pss_ctl &= ~__PSS_LMEM_RESET;
  1028. pss_ctl |= __PSS_LMEM_INIT_EN;
  1029. /*
  1030. * i2c workaround 12.5khz clock
  1031. */
  1032. pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
  1033. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1034. /*
  1035. * wait for memory initialization to be complete
  1036. */
  1037. i = 0;
  1038. do {
  1039. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  1040. i++;
  1041. } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
  1042. /*
  1043. * If memory initialization is not successful, IOC timeout will catch
  1044. * such failures.
  1045. */
  1046. WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
  1047. bfa_trc(ioc, pss_ctl);
  1048. pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
  1049. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1050. }
  1051. static void
  1052. bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
  1053. {
  1054. u32 pss_ctl;
  1055. /*
  1056. * Take processor out of reset.
  1057. */
  1058. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  1059. pss_ctl &= ~__PSS_LPU0_RESET;
  1060. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1061. }
  1062. static void
  1063. bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
  1064. {
  1065. u32 pss_ctl;
  1066. /*
  1067. * Put processors in reset.
  1068. */
  1069. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  1070. pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
  1071. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1072. }
  1073. /*
  1074. * Get driver and firmware versions.
  1075. */
  1076. void
  1077. bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
  1078. {
  1079. u32 pgnum, pgoff;
  1080. u32 loff = 0;
  1081. int i;
  1082. u32 *fwsig = (u32 *) fwhdr;
  1083. pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
  1084. pgoff = PSS_SMEM_PGOFF(loff);
  1085. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1086. for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
  1087. i++) {
  1088. fwsig[i] =
  1089. bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
  1090. loff += sizeof(u32);
  1091. }
  1092. }
  1093. /*
  1094. * Returns TRUE if same.
  1095. */
  1096. bfa_boolean_t
  1097. bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
  1098. {
  1099. struct bfi_ioc_image_hdr_s *drv_fwhdr;
  1100. int i;
  1101. drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
  1102. bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
  1103. for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
  1104. if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
  1105. bfa_trc(ioc, i);
  1106. bfa_trc(ioc, fwhdr->md5sum[i]);
  1107. bfa_trc(ioc, drv_fwhdr->md5sum[i]);
  1108. return BFA_FALSE;
  1109. }
  1110. }
  1111. bfa_trc(ioc, fwhdr->md5sum[0]);
  1112. return BFA_TRUE;
  1113. }
  1114. /*
  1115. * Return true if current running version is valid. Firmware signature and
  1116. * execution context (driver/bios) must match.
  1117. */
  1118. static bfa_boolean_t
  1119. bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
  1120. {
  1121. struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
  1122. bfa_ioc_fwver_get(ioc, &fwhdr);
  1123. drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
  1124. bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
  1125. if (fwhdr.signature != drv_fwhdr->signature) {
  1126. bfa_trc(ioc, fwhdr.signature);
  1127. bfa_trc(ioc, drv_fwhdr->signature);
  1128. return BFA_FALSE;
  1129. }
  1130. if (swab32(fwhdr.bootenv) != boot_env) {
  1131. bfa_trc(ioc, fwhdr.bootenv);
  1132. bfa_trc(ioc, boot_env);
  1133. return BFA_FALSE;
  1134. }
  1135. return bfa_ioc_fwver_cmp(ioc, &fwhdr);
  1136. }
  1137. /*
  1138. * Conditionally flush any pending message from firmware at start.
  1139. */
  1140. static void
  1141. bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
  1142. {
  1143. u32 r32;
  1144. r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
  1145. if (r32)
  1146. writel(1, ioc->ioc_regs.lpu_mbox_cmd);
  1147. }
  1148. static void
  1149. bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
  1150. {
  1151. enum bfi_ioc_state ioc_fwstate;
  1152. bfa_boolean_t fwvalid;
  1153. u32 boot_type;
  1154. u32 boot_env;
  1155. ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  1156. if (force)
  1157. ioc_fwstate = BFI_IOC_UNINIT;
  1158. bfa_trc(ioc, ioc_fwstate);
  1159. boot_type = BFI_FWBOOT_TYPE_NORMAL;
  1160. boot_env = BFI_FWBOOT_ENV_OS;
  1161. /*
  1162. * check if firmware is valid
  1163. */
  1164. fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
  1165. BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
  1166. if (!fwvalid) {
  1167. bfa_ioc_boot(ioc, boot_type, boot_env);
  1168. return;
  1169. }
  1170. /*
  1171. * If hardware initialization is in progress (initialized by other IOC),
  1172. * just wait for an initialization completion interrupt.
  1173. */
  1174. if (ioc_fwstate == BFI_IOC_INITING) {
  1175. bfa_ioc_poll_fwinit(ioc);
  1176. return;
  1177. }
  1178. /*
  1179. * If IOC function is disabled and firmware version is same,
  1180. * just re-enable IOC.
  1181. *
  1182. * If option rom, IOC must not be in operational state. With
  1183. * convergence, IOC will be in operational state when 2nd driver
  1184. * is loaded.
  1185. */
  1186. if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
  1187. /*
  1188. * When using MSI-X any pending firmware ready event should
  1189. * be flushed. Otherwise MSI-X interrupts are not delivered.
  1190. */
  1191. bfa_ioc_msgflush(ioc);
  1192. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
  1193. return;
  1194. }
  1195. /*
  1196. * Initialize the h/w for any other states.
  1197. */
  1198. bfa_ioc_boot(ioc, boot_type, boot_env);
  1199. }
  1200. static void
  1201. bfa_ioc_timeout(void *ioc_arg)
  1202. {
  1203. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  1204. bfa_trc(ioc, 0);
  1205. bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
  1206. }
  1207. void
  1208. bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
  1209. {
  1210. u32 *msgp = (u32 *) ioc_msg;
  1211. u32 i;
  1212. bfa_trc(ioc, msgp[0]);
  1213. bfa_trc(ioc, len);
  1214. WARN_ON(len > BFI_IOC_MSGLEN_MAX);
  1215. /*
  1216. * first write msg to mailbox registers
  1217. */
  1218. for (i = 0; i < len / sizeof(u32); i++)
  1219. writel(cpu_to_le32(msgp[i]),
  1220. ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
  1221. for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
  1222. writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
  1223. /*
  1224. * write 1 to mailbox CMD to trigger LPU event
  1225. */
  1226. writel(1, ioc->ioc_regs.hfn_mbox_cmd);
  1227. (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
  1228. }
  1229. static void
  1230. bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
  1231. {
  1232. struct bfi_ioc_ctrl_req_s enable_req;
  1233. struct timeval tv;
  1234. bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
  1235. bfa_ioc_portid(ioc));
  1236. enable_req.clscode = cpu_to_be16(ioc->clscode);
  1237. do_gettimeofday(&tv);
  1238. enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
  1239. bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
  1240. }
  1241. static void
  1242. bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
  1243. {
  1244. struct bfi_ioc_ctrl_req_s disable_req;
  1245. bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
  1246. bfa_ioc_portid(ioc));
  1247. bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
  1248. }
  1249. static void
  1250. bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
  1251. {
  1252. struct bfi_ioc_getattr_req_s attr_req;
  1253. bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
  1254. bfa_ioc_portid(ioc));
  1255. bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
  1256. bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
  1257. }
  1258. static void
  1259. bfa_ioc_hb_check(void *cbarg)
  1260. {
  1261. struct bfa_ioc_s *ioc = cbarg;
  1262. u32 hb_count;
  1263. hb_count = readl(ioc->ioc_regs.heartbeat);
  1264. if (ioc->hb_count == hb_count) {
  1265. bfa_ioc_recover(ioc);
  1266. return;
  1267. } else {
  1268. ioc->hb_count = hb_count;
  1269. }
  1270. bfa_ioc_mbox_poll(ioc);
  1271. bfa_hb_timer_start(ioc);
  1272. }
  1273. static void
  1274. bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
  1275. {
  1276. ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
  1277. bfa_hb_timer_start(ioc);
  1278. }
  1279. /*
  1280. * Initiate a full firmware download.
  1281. */
  1282. static void
  1283. bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
  1284. u32 boot_env)
  1285. {
  1286. u32 *fwimg;
  1287. u32 pgnum, pgoff;
  1288. u32 loff = 0;
  1289. u32 chunkno = 0;
  1290. u32 i;
  1291. u32 asicmode;
  1292. /*
  1293. * Initialize LMEM first before code download
  1294. */
  1295. bfa_ioc_lmem_init(ioc);
  1296. bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
  1297. fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
  1298. pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
  1299. pgoff = PSS_SMEM_PGOFF(loff);
  1300. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1301. for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
  1302. if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
  1303. chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
  1304. fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
  1305. BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
  1306. }
  1307. /*
  1308. * write smem
  1309. */
  1310. bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
  1311. fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
  1312. loff += sizeof(u32);
  1313. /*
  1314. * handle page offset wrap around
  1315. */
  1316. loff = PSS_SMEM_PGOFF(loff);
  1317. if (loff == 0) {
  1318. pgnum++;
  1319. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1320. }
  1321. }
  1322. writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
  1323. ioc->ioc_regs.host_page_num_fn);
  1324. /*
  1325. * Set boot type and device mode at the end.
  1326. */
  1327. asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
  1328. ioc->port0_mode, ioc->port1_mode);
  1329. bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
  1330. swab32(asicmode));
  1331. bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
  1332. swab32(boot_type));
  1333. bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
  1334. swab32(boot_env));
  1335. }
  1336. /*
  1337. * Update BFA configuration from firmware configuration.
  1338. */
  1339. static void
  1340. bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
  1341. {
  1342. struct bfi_ioc_attr_s *attr = ioc->attr;
  1343. attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
  1344. attr->card_type = be32_to_cpu(attr->card_type);
  1345. attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
  1346. bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
  1347. }
  1348. /*
  1349. * Attach time initialization of mbox logic.
  1350. */
  1351. static void
  1352. bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
  1353. {
  1354. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1355. int mc;
  1356. INIT_LIST_HEAD(&mod->cmd_q);
  1357. for (mc = 0; mc < BFI_MC_MAX; mc++) {
  1358. mod->mbhdlr[mc].cbfn = NULL;
  1359. mod->mbhdlr[mc].cbarg = ioc->bfa;
  1360. }
  1361. }
  1362. /*
  1363. * Mbox poll timer -- restarts any pending mailbox requests.
  1364. */
  1365. static void
  1366. bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
  1367. {
  1368. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1369. struct bfa_mbox_cmd_s *cmd;
  1370. u32 stat;
  1371. /*
  1372. * If no command pending, do nothing
  1373. */
  1374. if (list_empty(&mod->cmd_q))
  1375. return;
  1376. /*
  1377. * If previous command is not yet fetched by firmware, do nothing
  1378. */
  1379. stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
  1380. if (stat)
  1381. return;
  1382. /*
  1383. * Enqueue command to firmware.
  1384. */
  1385. bfa_q_deq(&mod->cmd_q, &cmd);
  1386. bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
  1387. }
  1388. /*
  1389. * Cleanup any pending requests.
  1390. */
  1391. static void
  1392. bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
  1393. {
  1394. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1395. struct bfa_mbox_cmd_s *cmd;
  1396. while (!list_empty(&mod->cmd_q))
  1397. bfa_q_deq(&mod->cmd_q, &cmd);
  1398. }
  1399. /*
  1400. * Read data from SMEM to host through PCI memmap
  1401. *
  1402. * @param[in] ioc memory for IOC
  1403. * @param[in] tbuf app memory to store data from smem
  1404. * @param[in] soff smem offset
  1405. * @param[in] sz size of smem in bytes
  1406. */
  1407. static bfa_status_t
  1408. bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
  1409. {
  1410. u32 pgnum, loff;
  1411. __be32 r32;
  1412. int i, len;
  1413. u32 *buf = tbuf;
  1414. pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
  1415. loff = PSS_SMEM_PGOFF(soff);
  1416. bfa_trc(ioc, pgnum);
  1417. bfa_trc(ioc, loff);
  1418. bfa_trc(ioc, sz);
  1419. /*
  1420. * Hold semaphore to serialize pll init and fwtrc.
  1421. */
  1422. if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
  1423. bfa_trc(ioc, 0);
  1424. return BFA_STATUS_FAILED;
  1425. }
  1426. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1427. len = sz/sizeof(u32);
  1428. bfa_trc(ioc, len);
  1429. for (i = 0; i < len; i++) {
  1430. r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
  1431. buf[i] = be32_to_cpu(r32);
  1432. loff += sizeof(u32);
  1433. /*
  1434. * handle page offset wrap around
  1435. */
  1436. loff = PSS_SMEM_PGOFF(loff);
  1437. if (loff == 0) {
  1438. pgnum++;
  1439. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1440. }
  1441. }
  1442. writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
  1443. ioc->ioc_regs.host_page_num_fn);
  1444. /*
  1445. * release semaphore.
  1446. */
  1447. writel(1, ioc->ioc_regs.ioc_init_sem_reg);
  1448. bfa_trc(ioc, pgnum);
  1449. return BFA_STATUS_OK;
  1450. }
  1451. /*
  1452. * Clear SMEM data from host through PCI memmap
  1453. *
  1454. * @param[in] ioc memory for IOC
  1455. * @param[in] soff smem offset
  1456. * @param[in] sz size of smem in bytes
  1457. */
  1458. static bfa_status_t
  1459. bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
  1460. {
  1461. int i, len;
  1462. u32 pgnum, loff;
  1463. pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
  1464. loff = PSS_SMEM_PGOFF(soff);
  1465. bfa_trc(ioc, pgnum);
  1466. bfa_trc(ioc, loff);
  1467. bfa_trc(ioc, sz);
  1468. /*
  1469. * Hold semaphore to serialize pll init and fwtrc.
  1470. */
  1471. if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
  1472. bfa_trc(ioc, 0);
  1473. return BFA_STATUS_FAILED;
  1474. }
  1475. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1476. len = sz/sizeof(u32); /* len in words */
  1477. bfa_trc(ioc, len);
  1478. for (i = 0; i < len; i++) {
  1479. bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
  1480. loff += sizeof(u32);
  1481. /*
  1482. * handle page offset wrap around
  1483. */
  1484. loff = PSS_SMEM_PGOFF(loff);
  1485. if (loff == 0) {
  1486. pgnum++;
  1487. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1488. }
  1489. }
  1490. writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
  1491. ioc->ioc_regs.host_page_num_fn);
  1492. /*
  1493. * release semaphore.
  1494. */
  1495. writel(1, ioc->ioc_regs.ioc_init_sem_reg);
  1496. bfa_trc(ioc, pgnum);
  1497. return BFA_STATUS_OK;
  1498. }
  1499. static void
  1500. bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
  1501. {
  1502. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  1503. /*
  1504. * Notify driver and common modules registered for notification.
  1505. */
  1506. ioc->cbfn->hbfail_cbfn(ioc->bfa);
  1507. bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
  1508. bfa_ioc_debug_save_ftrc(ioc);
  1509. BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
  1510. "Heart Beat of IOC has failed\n");
  1511. }
  1512. static void
  1513. bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
  1514. {
  1515. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  1516. /*
  1517. * Provide enable completion callback.
  1518. */
  1519. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  1520. BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
  1521. "Running firmware version is incompatible "
  1522. "with the driver version\n");
  1523. }
  1524. bfa_status_t
  1525. bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
  1526. {
  1527. /*
  1528. * Hold semaphore so that nobody can access the chip during init.
  1529. */
  1530. bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
  1531. bfa_ioc_pll_init_asic(ioc);
  1532. ioc->pllinit = BFA_TRUE;
  1533. /*
  1534. * release semaphore.
  1535. */
  1536. writel(1, ioc->ioc_regs.ioc_init_sem_reg);
  1537. return BFA_STATUS_OK;
  1538. }
  1539. /*
  1540. * Interface used by diag module to do firmware boot with memory test
  1541. * as the entry vector.
  1542. */
  1543. void
  1544. bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
  1545. {
  1546. bfa_ioc_stats(ioc, ioc_boots);
  1547. if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
  1548. return;
  1549. /*
  1550. * Initialize IOC state of all functions on a chip reset.
  1551. */
  1552. if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
  1553. writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
  1554. writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
  1555. } else {
  1556. writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
  1557. writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
  1558. }
  1559. bfa_ioc_msgflush(ioc);
  1560. bfa_ioc_download_fw(ioc, boot_type, boot_env);
  1561. bfa_ioc_lpu_start(ioc);
  1562. }
  1563. /*
  1564. * Enable/disable IOC failure auto recovery.
  1565. */
  1566. void
  1567. bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
  1568. {
  1569. bfa_auto_recover = auto_recover;
  1570. }
  1571. bfa_boolean_t
  1572. bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
  1573. {
  1574. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
  1575. }
  1576. bfa_boolean_t
  1577. bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
  1578. {
  1579. u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
  1580. return ((r32 != BFI_IOC_UNINIT) &&
  1581. (r32 != BFI_IOC_INITING) &&
  1582. (r32 != BFI_IOC_MEMTEST));
  1583. }
  1584. bfa_boolean_t
  1585. bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
  1586. {
  1587. __be32 *msgp = mbmsg;
  1588. u32 r32;
  1589. int i;
  1590. r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
  1591. if ((r32 & 1) == 0)
  1592. return BFA_FALSE;
  1593. /*
  1594. * read the MBOX msg
  1595. */
  1596. for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
  1597. i++) {
  1598. r32 = readl(ioc->ioc_regs.lpu_mbox +
  1599. i * sizeof(u32));
  1600. msgp[i] = cpu_to_be32(r32);
  1601. }
  1602. /*
  1603. * turn off mailbox interrupt by clearing mailbox status
  1604. */
  1605. writel(1, ioc->ioc_regs.lpu_mbox_cmd);
  1606. readl(ioc->ioc_regs.lpu_mbox_cmd);
  1607. return BFA_TRUE;
  1608. }
  1609. void
  1610. bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
  1611. {
  1612. union bfi_ioc_i2h_msg_u *msg;
  1613. struct bfa_iocpf_s *iocpf = &ioc->iocpf;
  1614. msg = (union bfi_ioc_i2h_msg_u *) m;
  1615. bfa_ioc_stats(ioc, ioc_isrs);
  1616. switch (msg->mh.msg_id) {
  1617. case BFI_IOC_I2H_HBEAT:
  1618. break;
  1619. case BFI_IOC_I2H_ENABLE_REPLY:
  1620. bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
  1621. break;
  1622. case BFI_IOC_I2H_DISABLE_REPLY:
  1623. bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
  1624. break;
  1625. case BFI_IOC_I2H_GETATTR_REPLY:
  1626. bfa_ioc_getattr_reply(ioc);
  1627. break;
  1628. default:
  1629. bfa_trc(ioc, msg->mh.msg_id);
  1630. WARN_ON(1);
  1631. }
  1632. }
  1633. /*
  1634. * IOC attach time initialization and setup.
  1635. *
  1636. * @param[in] ioc memory for IOC
  1637. * @param[in] bfa driver instance structure
  1638. */
  1639. void
  1640. bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
  1641. struct bfa_timer_mod_s *timer_mod)
  1642. {
  1643. ioc->bfa = bfa;
  1644. ioc->cbfn = cbfn;
  1645. ioc->timer_mod = timer_mod;
  1646. ioc->fcmode = BFA_FALSE;
  1647. ioc->pllinit = BFA_FALSE;
  1648. ioc->dbg_fwsave_once = BFA_TRUE;
  1649. ioc->iocpf.ioc = ioc;
  1650. bfa_ioc_mbox_attach(ioc);
  1651. INIT_LIST_HEAD(&ioc->notify_q);
  1652. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  1653. bfa_fsm_send_event(ioc, IOC_E_RESET);
  1654. }
  1655. /*
  1656. * Driver detach time IOC cleanup.
  1657. */
  1658. void
  1659. bfa_ioc_detach(struct bfa_ioc_s *ioc)
  1660. {
  1661. bfa_fsm_send_event(ioc, IOC_E_DETACH);
  1662. }
  1663. /*
  1664. * Setup IOC PCI properties.
  1665. *
  1666. * @param[in] pcidev PCI device information for this IOC
  1667. */
  1668. void
  1669. bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
  1670. enum bfi_pcifn_class clscode)
  1671. {
  1672. ioc->clscode = clscode;
  1673. ioc->pcidev = *pcidev;
  1674. /*
  1675. * Initialize IOC and device personality
  1676. */
  1677. ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
  1678. ioc->asic_mode = BFI_ASIC_MODE_FC;
  1679. switch (pcidev->device_id) {
  1680. case BFA_PCI_DEVICE_ID_FC_8G1P:
  1681. case BFA_PCI_DEVICE_ID_FC_8G2P:
  1682. ioc->asic_gen = BFI_ASIC_GEN_CB;
  1683. break;
  1684. case BFA_PCI_DEVICE_ID_CT:
  1685. ioc->asic_gen = BFI_ASIC_GEN_CT;
  1686. ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
  1687. ioc->asic_mode = BFI_ASIC_MODE_ETH;
  1688. break;
  1689. case BFA_PCI_DEVICE_ID_CT_FC:
  1690. ioc->asic_gen = BFI_ASIC_GEN_CT;
  1691. break;
  1692. case BFA_PCI_DEVICE_ID_CT2:
  1693. ioc->asic_gen = BFI_ASIC_GEN_CT2;
  1694. if (clscode == BFI_PCIFN_CLASS_FC)
  1695. ioc->asic_mode = BFI_ASIC_MODE_FC16;
  1696. else {
  1697. ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
  1698. ioc->asic_mode = BFI_ASIC_MODE_ETH;
  1699. }
  1700. break;
  1701. default:
  1702. WARN_ON(1);
  1703. }
  1704. /*
  1705. * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
  1706. */
  1707. if (ioc->asic_gen == BFI_ASIC_GEN_CB)
  1708. bfa_ioc_set_cb_hwif(ioc);
  1709. else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
  1710. bfa_ioc_set_ct_hwif(ioc);
  1711. else {
  1712. WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
  1713. bfa_ioc_set_ct2_hwif(ioc);
  1714. bfa_ioc_ct2_poweron(ioc);
  1715. }
  1716. bfa_ioc_map_port(ioc);
  1717. bfa_ioc_reg_init(ioc);
  1718. }
  1719. /*
  1720. * Initialize IOC dma memory
  1721. *
  1722. * @param[in] dm_kva kernel virtual address of IOC dma memory
  1723. * @param[in] dm_pa physical address of IOC dma memory
  1724. */
  1725. void
  1726. bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
  1727. {
  1728. /*
  1729. * dma memory for firmware attribute
  1730. */
  1731. ioc->attr_dma.kva = dm_kva;
  1732. ioc->attr_dma.pa = dm_pa;
  1733. ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
  1734. }
  1735. void
  1736. bfa_ioc_enable(struct bfa_ioc_s *ioc)
  1737. {
  1738. bfa_ioc_stats(ioc, ioc_enables);
  1739. ioc->dbg_fwsave_once = BFA_TRUE;
  1740. bfa_fsm_send_event(ioc, IOC_E_ENABLE);
  1741. }
  1742. void
  1743. bfa_ioc_disable(struct bfa_ioc_s *ioc)
  1744. {
  1745. bfa_ioc_stats(ioc, ioc_disables);
  1746. bfa_fsm_send_event(ioc, IOC_E_DISABLE);
  1747. }
  1748. /*
  1749. * Initialize memory for saving firmware trace. Driver must initialize
  1750. * trace memory before call bfa_ioc_enable().
  1751. */
  1752. void
  1753. bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
  1754. {
  1755. ioc->dbg_fwsave = dbg_fwsave;
  1756. ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
  1757. }
  1758. /*
  1759. * Register mailbox message handler functions
  1760. *
  1761. * @param[in] ioc IOC instance
  1762. * @param[in] mcfuncs message class handler functions
  1763. */
  1764. void
  1765. bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
  1766. {
  1767. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1768. int mc;
  1769. for (mc = 0; mc < BFI_MC_MAX; mc++)
  1770. mod->mbhdlr[mc].cbfn = mcfuncs[mc];
  1771. }
  1772. /*
  1773. * Register mailbox message handler function, to be called by common modules
  1774. */
  1775. void
  1776. bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
  1777. bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
  1778. {
  1779. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1780. mod->mbhdlr[mc].cbfn = cbfn;
  1781. mod->mbhdlr[mc].cbarg = cbarg;
  1782. }
  1783. /*
  1784. * Queue a mailbox command request to firmware. Waits if mailbox is busy.
  1785. * Responsibility of caller to serialize
  1786. *
  1787. * @param[in] ioc IOC instance
  1788. * @param[i] cmd Mailbox command
  1789. */
  1790. void
  1791. bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
  1792. {
  1793. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1794. u32 stat;
  1795. /*
  1796. * If a previous command is pending, queue new command
  1797. */
  1798. if (!list_empty(&mod->cmd_q)) {
  1799. list_add_tail(&cmd->qe, &mod->cmd_q);
  1800. return;
  1801. }
  1802. /*
  1803. * If mailbox is busy, queue command for poll timer
  1804. */
  1805. stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
  1806. if (stat) {
  1807. list_add_tail(&cmd->qe, &mod->cmd_q);
  1808. return;
  1809. }
  1810. /*
  1811. * mailbox is free -- queue command to firmware
  1812. */
  1813. bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
  1814. }
  1815. /*
  1816. * Handle mailbox interrupts
  1817. */
  1818. void
  1819. bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
  1820. {
  1821. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1822. struct bfi_mbmsg_s m;
  1823. int mc;
  1824. if (!bfa_ioc_msgget(ioc, &m))
  1825. return;
  1826. /*
  1827. * Treat IOC message class as special.
  1828. */
  1829. mc = m.mh.msg_class;
  1830. if (mc == BFI_MC_IOC) {
  1831. bfa_ioc_isr(ioc, &m);
  1832. return;
  1833. }
  1834. if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
  1835. return;
  1836. mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
  1837. }
  1838. void
  1839. bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
  1840. {
  1841. bfa_fsm_send_event(ioc, IOC_E_HWERROR);
  1842. }
  1843. void
  1844. bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
  1845. {
  1846. ioc->fcmode = BFA_TRUE;
  1847. }
  1848. /*
  1849. * return true if IOC is disabled
  1850. */
  1851. bfa_boolean_t
  1852. bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
  1853. {
  1854. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
  1855. bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
  1856. }
  1857. /*
  1858. * return true if IOC firmware is different.
  1859. */
  1860. bfa_boolean_t
  1861. bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
  1862. {
  1863. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
  1864. bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
  1865. bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
  1866. }
  1867. #define bfa_ioc_state_disabled(__sm) \
  1868. (((__sm) == BFI_IOC_UNINIT) || \
  1869. ((__sm) == BFI_IOC_INITING) || \
  1870. ((__sm) == BFI_IOC_HWINIT) || \
  1871. ((__sm) == BFI_IOC_DISABLED) || \
  1872. ((__sm) == BFI_IOC_FAIL) || \
  1873. ((__sm) == BFI_IOC_CFG_DISABLED))
  1874. /*
  1875. * Check if adapter is disabled -- both IOCs should be in a disabled
  1876. * state.
  1877. */
  1878. bfa_boolean_t
  1879. bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
  1880. {
  1881. u32 ioc_state;
  1882. if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
  1883. return BFA_FALSE;
  1884. ioc_state = readl(ioc->ioc_regs.ioc_fwstate);
  1885. if (!bfa_ioc_state_disabled(ioc_state))
  1886. return BFA_FALSE;
  1887. if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
  1888. ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate);
  1889. if (!bfa_ioc_state_disabled(ioc_state))
  1890. return BFA_FALSE;
  1891. }
  1892. return BFA_TRUE;
  1893. }
  1894. /*
  1895. * Reset IOC fwstate registers.
  1896. */
  1897. void
  1898. bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
  1899. {
  1900. writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
  1901. writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
  1902. }
  1903. #define BFA_MFG_NAME "Brocade"
  1904. void
  1905. bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
  1906. struct bfa_adapter_attr_s *ad_attr)
  1907. {
  1908. struct bfi_ioc_attr_s *ioc_attr;
  1909. ioc_attr = ioc->attr;
  1910. bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
  1911. bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
  1912. bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
  1913. bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
  1914. memcpy(&ad_attr->vpd, &ioc_attr->vpd,
  1915. sizeof(struct bfa_mfg_vpd_s));
  1916. ad_attr->nports = bfa_ioc_get_nports(ioc);
  1917. ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
  1918. bfa_ioc_get_adapter_model(ioc, ad_attr->model);
  1919. /* For now, model descr uses same model string */
  1920. bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
  1921. ad_attr->card_type = ioc_attr->card_type;
  1922. ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
  1923. if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
  1924. ad_attr->prototype = 1;
  1925. else
  1926. ad_attr->prototype = 0;
  1927. ad_attr->pwwn = ioc->attr->pwwn;
  1928. ad_attr->mac = bfa_ioc_get_mac(ioc);
  1929. ad_attr->pcie_gen = ioc_attr->pcie_gen;
  1930. ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
  1931. ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
  1932. ad_attr->asic_rev = ioc_attr->asic_rev;
  1933. bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
  1934. ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
  1935. ad_attr->trunk_capable = (ad_attr->nports > 1) &&
  1936. !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
  1937. }
  1938. enum bfa_ioc_type_e
  1939. bfa_ioc_get_type(struct bfa_ioc_s *ioc)
  1940. {
  1941. enum bfi_port_mode mode;
  1942. if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
  1943. return BFA_IOC_TYPE_LL;
  1944. WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
  1945. mode = (ioc->port_id == 0) ? ioc->port0_mode : ioc->port1_mode;
  1946. return (mode == BFI_PORT_MODE_FC)
  1947. ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
  1948. }
  1949. void
  1950. bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
  1951. {
  1952. memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
  1953. memcpy((void *)serial_num,
  1954. (void *)ioc->attr->brcd_serialnum,
  1955. BFA_ADAPTER_SERIAL_NUM_LEN);
  1956. }
  1957. void
  1958. bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
  1959. {
  1960. memset((void *)fw_ver, 0, BFA_VERSION_LEN);
  1961. memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
  1962. }
  1963. void
  1964. bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
  1965. {
  1966. WARN_ON(!chip_rev);
  1967. memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
  1968. chip_rev[0] = 'R';
  1969. chip_rev[1] = 'e';
  1970. chip_rev[2] = 'v';
  1971. chip_rev[3] = '-';
  1972. chip_rev[4] = ioc->attr->asic_rev;
  1973. chip_rev[5] = '\0';
  1974. }
  1975. void
  1976. bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
  1977. {
  1978. memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
  1979. memcpy(optrom_ver, ioc->attr->optrom_version,
  1980. BFA_VERSION_LEN);
  1981. }
  1982. void
  1983. bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
  1984. {
  1985. memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
  1986. memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
  1987. }
  1988. void
  1989. bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
  1990. {
  1991. struct bfi_ioc_attr_s *ioc_attr;
  1992. WARN_ON(!model);
  1993. memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
  1994. ioc_attr = ioc->attr;
  1995. /*
  1996. * model name
  1997. */
  1998. snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
  1999. BFA_MFG_NAME, ioc_attr->card_type);
  2000. }
  2001. enum bfa_ioc_state
  2002. bfa_ioc_get_state(struct bfa_ioc_s *ioc)
  2003. {
  2004. enum bfa_iocpf_state iocpf_st;
  2005. enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
  2006. if (ioc_st == BFA_IOC_ENABLING ||
  2007. ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
  2008. iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
  2009. switch (iocpf_st) {
  2010. case BFA_IOCPF_SEMWAIT:
  2011. ioc_st = BFA_IOC_SEMWAIT;
  2012. break;
  2013. case BFA_IOCPF_HWINIT:
  2014. ioc_st = BFA_IOC_HWINIT;
  2015. break;
  2016. case BFA_IOCPF_FWMISMATCH:
  2017. ioc_st = BFA_IOC_FWMISMATCH;
  2018. break;
  2019. case BFA_IOCPF_FAIL:
  2020. ioc_st = BFA_IOC_FAIL;
  2021. break;
  2022. case BFA_IOCPF_INITFAIL:
  2023. ioc_st = BFA_IOC_INITFAIL;
  2024. break;
  2025. default:
  2026. break;
  2027. }
  2028. }
  2029. return ioc_st;
  2030. }
  2031. void
  2032. bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
  2033. {
  2034. memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
  2035. ioc_attr->state = bfa_ioc_get_state(ioc);
  2036. ioc_attr->port_id = ioc->port_id;
  2037. ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
  2038. bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
  2039. ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
  2040. ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
  2041. bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
  2042. }
  2043. mac_t
  2044. bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
  2045. {
  2046. /*
  2047. * Check the IOC type and return the appropriate MAC
  2048. */
  2049. if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
  2050. return ioc->attr->fcoe_mac;
  2051. else
  2052. return ioc->attr->mac;
  2053. }
  2054. mac_t
  2055. bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
  2056. {
  2057. mac_t m;
  2058. m = ioc->attr->mfg_mac;
  2059. if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
  2060. m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
  2061. else
  2062. bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
  2063. bfa_ioc_pcifn(ioc));
  2064. return m;
  2065. }
  2066. bfa_boolean_t
  2067. bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
  2068. {
  2069. return ioc->fcmode || bfa_asic_id_cb(ioc->pcidev.device_id);
  2070. }
  2071. /*
  2072. * Retrieve saved firmware trace from a prior IOC failure.
  2073. */
  2074. bfa_status_t
  2075. bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
  2076. {
  2077. int tlen;
  2078. if (ioc->dbg_fwsave_len == 0)
  2079. return BFA_STATUS_ENOFSAVE;
  2080. tlen = *trclen;
  2081. if (tlen > ioc->dbg_fwsave_len)
  2082. tlen = ioc->dbg_fwsave_len;
  2083. memcpy(trcdata, ioc->dbg_fwsave, tlen);
  2084. *trclen = tlen;
  2085. return BFA_STATUS_OK;
  2086. }
  2087. /*
  2088. * Retrieve saved firmware trace from a prior IOC failure.
  2089. */
  2090. bfa_status_t
  2091. bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
  2092. {
  2093. u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
  2094. int tlen;
  2095. bfa_status_t status;
  2096. bfa_trc(ioc, *trclen);
  2097. tlen = *trclen;
  2098. if (tlen > BFA_DBG_FWTRC_LEN)
  2099. tlen = BFA_DBG_FWTRC_LEN;
  2100. status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
  2101. *trclen = tlen;
  2102. return status;
  2103. }
  2104. static void
  2105. bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
  2106. {
  2107. struct bfa_mbox_cmd_s cmd;
  2108. struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
  2109. bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
  2110. bfa_ioc_portid(ioc));
  2111. req->clscode = cpu_to_be16(ioc->clscode);
  2112. bfa_ioc_mbox_queue(ioc, &cmd);
  2113. }
  2114. static void
  2115. bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
  2116. {
  2117. u32 fwsync_iter = 1000;
  2118. bfa_ioc_send_fwsync(ioc);
  2119. /*
  2120. * After sending a fw sync mbox command wait for it to
  2121. * take effect. We will not wait for a response because
  2122. * 1. fw_sync mbox cmd doesn't have a response.
  2123. * 2. Even if we implement that, interrupts might not
  2124. * be enabled when we call this function.
  2125. * So, just keep checking if any mbox cmd is pending, and
  2126. * after waiting for a reasonable amount of time, go ahead.
  2127. * It is possible that fw has crashed and the mbox command
  2128. * is never acknowledged.
  2129. */
  2130. while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
  2131. fwsync_iter--;
  2132. }
  2133. /*
  2134. * Dump firmware smem
  2135. */
  2136. bfa_status_t
  2137. bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
  2138. u32 *offset, int *buflen)
  2139. {
  2140. u32 loff;
  2141. int dlen;
  2142. bfa_status_t status;
  2143. u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
  2144. if (*offset >= smem_len) {
  2145. *offset = *buflen = 0;
  2146. return BFA_STATUS_EINVAL;
  2147. }
  2148. loff = *offset;
  2149. dlen = *buflen;
  2150. /*
  2151. * First smem read, sync smem before proceeding
  2152. * No need to sync before reading every chunk.
  2153. */
  2154. if (loff == 0)
  2155. bfa_ioc_fwsync(ioc);
  2156. if ((loff + dlen) >= smem_len)
  2157. dlen = smem_len - loff;
  2158. status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
  2159. if (status != BFA_STATUS_OK) {
  2160. *offset = *buflen = 0;
  2161. return status;
  2162. }
  2163. *offset += dlen;
  2164. if (*offset >= smem_len)
  2165. *offset = 0;
  2166. *buflen = dlen;
  2167. return status;
  2168. }
  2169. /*
  2170. * Firmware statistics
  2171. */
  2172. bfa_status_t
  2173. bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
  2174. {
  2175. u32 loff = BFI_IOC_FWSTATS_OFF + \
  2176. BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
  2177. int tlen;
  2178. bfa_status_t status;
  2179. if (ioc->stats_busy) {
  2180. bfa_trc(ioc, ioc->stats_busy);
  2181. return BFA_STATUS_DEVBUSY;
  2182. }
  2183. ioc->stats_busy = BFA_TRUE;
  2184. tlen = sizeof(struct bfa_fw_stats_s);
  2185. status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
  2186. ioc->stats_busy = BFA_FALSE;
  2187. return status;
  2188. }
  2189. bfa_status_t
  2190. bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
  2191. {
  2192. u32 loff = BFI_IOC_FWSTATS_OFF + \
  2193. BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
  2194. int tlen;
  2195. bfa_status_t status;
  2196. if (ioc->stats_busy) {
  2197. bfa_trc(ioc, ioc->stats_busy);
  2198. return BFA_STATUS_DEVBUSY;
  2199. }
  2200. ioc->stats_busy = BFA_TRUE;
  2201. tlen = sizeof(struct bfa_fw_stats_s);
  2202. status = bfa_ioc_smem_clr(ioc, loff, tlen);
  2203. ioc->stats_busy = BFA_FALSE;
  2204. return status;
  2205. }
  2206. /*
  2207. * Save firmware trace if configured.
  2208. */
  2209. static void
  2210. bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
  2211. {
  2212. int tlen;
  2213. if (ioc->dbg_fwsave_once) {
  2214. ioc->dbg_fwsave_once = BFA_FALSE;
  2215. if (ioc->dbg_fwsave_len) {
  2216. tlen = ioc->dbg_fwsave_len;
  2217. bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
  2218. }
  2219. }
  2220. }
  2221. /*
  2222. * Firmware failure detected. Start recovery actions.
  2223. */
  2224. static void
  2225. bfa_ioc_recover(struct bfa_ioc_s *ioc)
  2226. {
  2227. bfa_ioc_stats(ioc, ioc_hbfails);
  2228. bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
  2229. }
  2230. static void
  2231. bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
  2232. {
  2233. if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
  2234. return;
  2235. }
  2236. /*
  2237. * BFA IOC PF private functions
  2238. */
  2239. static void
  2240. bfa_iocpf_timeout(void *ioc_arg)
  2241. {
  2242. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  2243. bfa_trc(ioc, 0);
  2244. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
  2245. }
  2246. static void
  2247. bfa_iocpf_sem_timeout(void *ioc_arg)
  2248. {
  2249. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  2250. bfa_ioc_hw_sem_get(ioc);
  2251. }
  2252. static void
  2253. bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
  2254. {
  2255. u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  2256. bfa_trc(ioc, fwstate);
  2257. if (fwstate == BFI_IOC_DISABLED) {
  2258. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
  2259. return;
  2260. }
  2261. if (ioc->iocpf.poll_time >= BFA_IOC_TOV)
  2262. bfa_iocpf_timeout(ioc);
  2263. else {
  2264. ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
  2265. bfa_iocpf_poll_timer_start(ioc);
  2266. }
  2267. }
  2268. static void
  2269. bfa_iocpf_poll_timeout(void *ioc_arg)
  2270. {
  2271. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  2272. bfa_ioc_poll_fwinit(ioc);
  2273. }
  2274. /*
  2275. * bfa timer function
  2276. */
  2277. void
  2278. bfa_timer_beat(struct bfa_timer_mod_s *mod)
  2279. {
  2280. struct list_head *qh = &mod->timer_q;
  2281. struct list_head *qe, *qe_next;
  2282. struct bfa_timer_s *elem;
  2283. struct list_head timedout_q;
  2284. INIT_LIST_HEAD(&timedout_q);
  2285. qe = bfa_q_next(qh);
  2286. while (qe != qh) {
  2287. qe_next = bfa_q_next(qe);
  2288. elem = (struct bfa_timer_s *) qe;
  2289. if (elem->timeout <= BFA_TIMER_FREQ) {
  2290. elem->timeout = 0;
  2291. list_del(&elem->qe);
  2292. list_add_tail(&elem->qe, &timedout_q);
  2293. } else {
  2294. elem->timeout -= BFA_TIMER_FREQ;
  2295. }
  2296. qe = qe_next; /* go to next elem */
  2297. }
  2298. /*
  2299. * Pop all the timeout entries
  2300. */
  2301. while (!list_empty(&timedout_q)) {
  2302. bfa_q_deq(&timedout_q, &elem);
  2303. elem->timercb(elem->arg);
  2304. }
  2305. }
  2306. /*
  2307. * Should be called with lock protection
  2308. */
  2309. void
  2310. bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
  2311. void (*timercb) (void *), void *arg, unsigned int timeout)
  2312. {
  2313. WARN_ON(timercb == NULL);
  2314. WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
  2315. timer->timeout = timeout;
  2316. timer->timercb = timercb;
  2317. timer->arg = arg;
  2318. list_add_tail(&timer->qe, &mod->timer_q);
  2319. }
  2320. /*
  2321. * Should be called with lock protection
  2322. */
  2323. void
  2324. bfa_timer_stop(struct bfa_timer_s *timer)
  2325. {
  2326. WARN_ON(list_empty(&timer->qe));
  2327. list_del(&timer->qe);
  2328. }