bfa_ioc.c 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include "bfad_drv.h"
  18. #include "bfa_ioc.h"
  19. #include "bfi_ctreg.h"
  20. #include "bfa_defs.h"
  21. #include "bfa_defs_svc.h"
  22. BFA_TRC_FILE(CNA, IOC);
  23. /*
  24. * IOC local definitions
  25. */
  26. #define BFA_IOC_TOV 3000 /* msecs */
  27. #define BFA_IOC_HWSEM_TOV 500 /* msecs */
  28. #define BFA_IOC_HB_TOV 500 /* msecs */
  29. #define BFA_IOC_HWINIT_MAX 5
  30. #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
  31. #define bfa_ioc_timer_start(__ioc) \
  32. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
  33. bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
  34. #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
  35. #define bfa_hb_timer_start(__ioc) \
  36. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
  37. bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
  38. #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
  39. #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
  40. /*
  41. * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
  42. */
  43. #define bfa_ioc_firmware_lock(__ioc) \
  44. ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
  45. #define bfa_ioc_firmware_unlock(__ioc) \
  46. ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
  47. #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
  48. #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
  49. #define bfa_ioc_notify_fail(__ioc) \
  50. ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
  51. #define bfa_ioc_sync_start(__ioc) \
  52. ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
  53. #define bfa_ioc_sync_join(__ioc) \
  54. ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
  55. #define bfa_ioc_sync_leave(__ioc) \
  56. ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
  57. #define bfa_ioc_sync_ack(__ioc) \
  58. ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
  59. #define bfa_ioc_sync_complete(__ioc) \
  60. ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
  61. #define bfa_ioc_mbox_cmd_pending(__ioc) \
  62. (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
  63. readl((__ioc)->ioc_regs.hfn_mbox_cmd))
  64. bfa_boolean_t bfa_auto_recover = BFA_TRUE;
  65. /*
  66. * forward declarations
  67. */
  68. static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
  69. static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
  70. static void bfa_ioc_timeout(void *ioc);
  71. static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
  72. static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
  73. static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
  74. static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
  75. static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
  76. static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
  77. static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
  78. static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
  79. static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
  80. static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
  81. static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
  82. static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
  83. static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
  84. /*
  85. * IOC state machine definitions/declarations
  86. */
  87. enum ioc_event {
  88. IOC_E_RESET = 1, /* IOC reset request */
  89. IOC_E_ENABLE = 2, /* IOC enable request */
  90. IOC_E_DISABLE = 3, /* IOC disable request */
  91. IOC_E_DETACH = 4, /* driver detach cleanup */
  92. IOC_E_ENABLED = 5, /* f/w enabled */
  93. IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
  94. IOC_E_DISABLED = 7, /* f/w disabled */
  95. IOC_E_INITFAILED = 8, /* failure notice by iocpf sm */
  96. IOC_E_PFFAILED = 9, /* failure notice by iocpf sm */
  97. IOC_E_HBFAIL = 10, /* heartbeat failure */
  98. IOC_E_HWERROR = 11, /* hardware error interrupt */
  99. IOC_E_TIMEOUT = 12, /* timeout */
  100. };
  101. bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
  102. bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
  103. bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
  104. bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
  105. bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
  106. bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
  107. bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
  108. bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
  109. bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
  110. static struct bfa_sm_table_s ioc_sm_table[] = {
  111. {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
  112. {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
  113. {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
  114. {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
  115. {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
  116. {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
  117. {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
  118. {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
  119. {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
  120. };
  121. /*
  122. * IOCPF state machine definitions/declarations
  123. */
  124. #define bfa_iocpf_timer_start(__ioc) \
  125. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
  126. bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
  127. #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
  128. #define bfa_iocpf_recovery_timer_start(__ioc) \
  129. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
  130. bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER)
  131. #define bfa_sem_timer_start(__ioc) \
  132. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
  133. bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
  134. #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
  135. /*
  136. * Forward declareations for iocpf state machine
  137. */
  138. static void bfa_iocpf_timeout(void *ioc_arg);
  139. static void bfa_iocpf_sem_timeout(void *ioc_arg);
  140. /*
  141. * IOCPF state machine events
  142. */
  143. enum iocpf_event {
  144. IOCPF_E_ENABLE = 1, /* IOCPF enable request */
  145. IOCPF_E_DISABLE = 2, /* IOCPF disable request */
  146. IOCPF_E_STOP = 3, /* stop on driver detach */
  147. IOCPF_E_FWREADY = 4, /* f/w initialization done */
  148. IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
  149. IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
  150. IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
  151. IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
  152. IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
  153. IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
  154. IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
  155. };
  156. /*
  157. * IOCPF states
  158. */
  159. enum bfa_iocpf_state {
  160. BFA_IOCPF_RESET = 1, /* IOC is in reset state */
  161. BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
  162. BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
  163. BFA_IOCPF_READY = 4, /* IOCPF is initialized */
  164. BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
  165. BFA_IOCPF_FAIL = 6, /* IOCPF failed */
  166. BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
  167. BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
  168. BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
  169. };
  170. bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
  171. bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
  172. bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
  173. bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
  174. bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
  175. bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
  176. bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
  177. bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
  178. enum iocpf_event);
  179. bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
  180. bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
  181. bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
  182. bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
  183. bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
  184. enum iocpf_event);
  185. bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
  186. static struct bfa_sm_table_s iocpf_sm_table[] = {
  187. {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
  188. {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
  189. {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
  190. {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
  191. {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
  192. {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
  193. {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
  194. {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
  195. {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
  196. {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
  197. {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
  198. {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
  199. {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
  200. {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
  201. };
  202. /*
  203. * IOC State Machine
  204. */
  205. /*
  206. * Beginning state. IOC uninit state.
  207. */
  208. static void
  209. bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
  210. {
  211. }
  212. /*
  213. * IOC is in uninit state.
  214. */
  215. static void
  216. bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
  217. {
  218. bfa_trc(ioc, event);
  219. switch (event) {
  220. case IOC_E_RESET:
  221. bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
  222. break;
  223. default:
  224. bfa_sm_fault(ioc, event);
  225. }
  226. }
  227. /*
  228. * Reset entry actions -- initialize state machine
  229. */
  230. static void
  231. bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
  232. {
  233. bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
  234. }
  235. /*
  236. * IOC is in reset state.
  237. */
  238. static void
  239. bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
  240. {
  241. bfa_trc(ioc, event);
  242. switch (event) {
  243. case IOC_E_ENABLE:
  244. bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
  245. break;
  246. case IOC_E_DISABLE:
  247. bfa_ioc_disable_comp(ioc);
  248. break;
  249. case IOC_E_DETACH:
  250. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  251. break;
  252. default:
  253. bfa_sm_fault(ioc, event);
  254. }
  255. }
  256. static void
  257. bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
  258. {
  259. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
  260. }
  261. /*
  262. * Host IOC function is being enabled, awaiting response from firmware.
  263. * Semaphore is acquired.
  264. */
  265. static void
  266. bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
  267. {
  268. bfa_trc(ioc, event);
  269. switch (event) {
  270. case IOC_E_ENABLED:
  271. bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
  272. break;
  273. case IOC_E_PFFAILED:
  274. /* !!! fall through !!! */
  275. case IOC_E_HWERROR:
  276. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  277. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
  278. if (event != IOC_E_PFFAILED)
  279. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
  280. break;
  281. case IOC_E_DISABLE:
  282. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  283. break;
  284. case IOC_E_DETACH:
  285. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  286. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
  287. break;
  288. case IOC_E_ENABLE:
  289. break;
  290. default:
  291. bfa_sm_fault(ioc, event);
  292. }
  293. }
  294. static void
  295. bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
  296. {
  297. bfa_ioc_timer_start(ioc);
  298. bfa_ioc_send_getattr(ioc);
  299. }
  300. /*
  301. * IOC configuration in progress. Timer is active.
  302. */
  303. static void
  304. bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
  305. {
  306. bfa_trc(ioc, event);
  307. switch (event) {
  308. case IOC_E_FWRSP_GETATTR:
  309. bfa_ioc_timer_stop(ioc);
  310. bfa_ioc_check_attr_wwns(ioc);
  311. bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
  312. break;
  313. break;
  314. case IOC_E_PFFAILED:
  315. case IOC_E_HWERROR:
  316. bfa_ioc_timer_stop(ioc);
  317. /* !!! fall through !!! */
  318. case IOC_E_TIMEOUT:
  319. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  320. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
  321. if (event != IOC_E_PFFAILED)
  322. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
  323. break;
  324. case IOC_E_DISABLE:
  325. bfa_ioc_timer_stop(ioc);
  326. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  327. break;
  328. case IOC_E_ENABLE:
  329. break;
  330. default:
  331. bfa_sm_fault(ioc, event);
  332. }
  333. }
  334. static void
  335. bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
  336. {
  337. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  338. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
  339. bfa_ioc_hb_monitor(ioc);
  340. BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
  341. }
  342. static void
  343. bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
  344. {
  345. bfa_trc(ioc, event);
  346. switch (event) {
  347. case IOC_E_ENABLE:
  348. break;
  349. case IOC_E_DISABLE:
  350. bfa_hb_timer_stop(ioc);
  351. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  352. break;
  353. case IOC_E_PFFAILED:
  354. case IOC_E_HWERROR:
  355. bfa_hb_timer_stop(ioc);
  356. /* !!! fall through !!! */
  357. case IOC_E_HBFAIL:
  358. bfa_ioc_fail_notify(ioc);
  359. if (ioc->iocpf.auto_recover)
  360. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
  361. else
  362. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  363. if (event != IOC_E_PFFAILED)
  364. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
  365. break;
  366. default:
  367. bfa_sm_fault(ioc, event);
  368. }
  369. }
  370. static void
  371. bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
  372. {
  373. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  374. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
  375. BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
  376. }
  377. /*
  378. * IOC is being disabled
  379. */
  380. static void
  381. bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
  382. {
  383. bfa_trc(ioc, event);
  384. switch (event) {
  385. case IOC_E_DISABLED:
  386. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
  387. break;
  388. case IOC_E_HWERROR:
  389. /*
  390. * No state change. Will move to disabled state
  391. * after iocpf sm completes failure processing and
  392. * moves to disabled state.
  393. */
  394. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
  395. break;
  396. default:
  397. bfa_sm_fault(ioc, event);
  398. }
  399. }
  400. /*
  401. * IOC disable completion entry.
  402. */
  403. static void
  404. bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
  405. {
  406. bfa_ioc_disable_comp(ioc);
  407. }
  408. static void
  409. bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
  410. {
  411. bfa_trc(ioc, event);
  412. switch (event) {
  413. case IOC_E_ENABLE:
  414. bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
  415. break;
  416. case IOC_E_DISABLE:
  417. ioc->cbfn->disable_cbfn(ioc->bfa);
  418. break;
  419. case IOC_E_DETACH:
  420. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  421. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
  422. break;
  423. default:
  424. bfa_sm_fault(ioc, event);
  425. }
  426. }
  427. static void
  428. bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
  429. {
  430. bfa_trc(ioc, 0);
  431. }
  432. /*
  433. * Hardware initialization retry.
  434. */
  435. static void
  436. bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
  437. {
  438. bfa_trc(ioc, event);
  439. switch (event) {
  440. case IOC_E_ENABLED:
  441. bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
  442. break;
  443. case IOC_E_PFFAILED:
  444. case IOC_E_HWERROR:
  445. /*
  446. * Initialization retry failed.
  447. */
  448. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  449. if (event != IOC_E_PFFAILED)
  450. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
  451. break;
  452. case IOC_E_INITFAILED:
  453. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  454. break;
  455. case IOC_E_ENABLE:
  456. break;
  457. case IOC_E_DISABLE:
  458. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  459. break;
  460. case IOC_E_DETACH:
  461. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  462. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
  463. break;
  464. default:
  465. bfa_sm_fault(ioc, event);
  466. }
  467. }
  468. static void
  469. bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
  470. {
  471. bfa_trc(ioc, 0);
  472. }
  473. /*
  474. * IOC failure.
  475. */
  476. static void
  477. bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
  478. {
  479. bfa_trc(ioc, event);
  480. switch (event) {
  481. case IOC_E_ENABLE:
  482. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  483. break;
  484. case IOC_E_DISABLE:
  485. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  486. break;
  487. case IOC_E_DETACH:
  488. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  489. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
  490. break;
  491. case IOC_E_HWERROR:
  492. /*
  493. * HB failure notification, ignore.
  494. */
  495. break;
  496. default:
  497. bfa_sm_fault(ioc, event);
  498. }
  499. }
  500. /*
  501. * IOCPF State Machine
  502. */
  503. /*
  504. * Reset entry actions -- initialize state machine
  505. */
  506. static void
  507. bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
  508. {
  509. iocpf->retry_count = 0;
  510. iocpf->auto_recover = bfa_auto_recover;
  511. }
  512. /*
  513. * Beginning state. IOC is in reset state.
  514. */
  515. static void
  516. bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  517. {
  518. struct bfa_ioc_s *ioc = iocpf->ioc;
  519. bfa_trc(ioc, event);
  520. switch (event) {
  521. case IOCPF_E_ENABLE:
  522. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
  523. break;
  524. case IOCPF_E_STOP:
  525. break;
  526. default:
  527. bfa_sm_fault(ioc, event);
  528. }
  529. }
  530. /*
  531. * Semaphore should be acquired for version check.
  532. */
  533. static void
  534. bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
  535. {
  536. bfa_ioc_hw_sem_get(iocpf->ioc);
  537. }
  538. /*
  539. * Awaiting h/w semaphore to continue with version check.
  540. */
  541. static void
  542. bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  543. {
  544. struct bfa_ioc_s *ioc = iocpf->ioc;
  545. bfa_trc(ioc, event);
  546. switch (event) {
  547. case IOCPF_E_SEMLOCKED:
  548. if (bfa_ioc_firmware_lock(ioc)) {
  549. if (bfa_ioc_sync_start(ioc)) {
  550. iocpf->retry_count = 0;
  551. bfa_ioc_sync_join(ioc);
  552. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  553. } else {
  554. bfa_ioc_firmware_unlock(ioc);
  555. writel(1, ioc->ioc_regs.ioc_sem_reg);
  556. bfa_sem_timer_start(ioc);
  557. }
  558. } else {
  559. writel(1, ioc->ioc_regs.ioc_sem_reg);
  560. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
  561. }
  562. break;
  563. case IOCPF_E_DISABLE:
  564. bfa_sem_timer_stop(ioc);
  565. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  566. bfa_fsm_send_event(ioc, IOC_E_DISABLED);
  567. break;
  568. case IOCPF_E_STOP:
  569. bfa_sem_timer_stop(ioc);
  570. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  571. break;
  572. default:
  573. bfa_sm_fault(ioc, event);
  574. }
  575. }
  576. /*
  577. * Notify enable completion callback.
  578. */
  579. static void
  580. bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
  581. {
  582. /*
  583. * Call only the first time sm enters fwmismatch state.
  584. */
  585. if (iocpf->retry_count == 0)
  586. bfa_ioc_pf_fwmismatch(iocpf->ioc);
  587. iocpf->retry_count++;
  588. bfa_iocpf_timer_start(iocpf->ioc);
  589. }
  590. /*
  591. * Awaiting firmware version match.
  592. */
  593. static void
  594. bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  595. {
  596. struct bfa_ioc_s *ioc = iocpf->ioc;
  597. bfa_trc(ioc, event);
  598. switch (event) {
  599. case IOCPF_E_TIMEOUT:
  600. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
  601. break;
  602. case IOCPF_E_DISABLE:
  603. bfa_iocpf_timer_stop(ioc);
  604. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  605. bfa_fsm_send_event(ioc, IOC_E_DISABLED);
  606. break;
  607. case IOCPF_E_STOP:
  608. bfa_iocpf_timer_stop(ioc);
  609. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  610. break;
  611. default:
  612. bfa_sm_fault(ioc, event);
  613. }
  614. }
  615. /*
  616. * Request for semaphore.
  617. */
  618. static void
  619. bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
  620. {
  621. bfa_ioc_hw_sem_get(iocpf->ioc);
  622. }
  623. /*
  624. * Awaiting semaphore for h/w initialzation.
  625. */
  626. static void
  627. bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  628. {
  629. struct bfa_ioc_s *ioc = iocpf->ioc;
  630. bfa_trc(ioc, event);
  631. switch (event) {
  632. case IOCPF_E_SEMLOCKED:
  633. if (bfa_ioc_sync_complete(ioc)) {
  634. bfa_ioc_sync_join(ioc);
  635. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  636. } else {
  637. writel(1, ioc->ioc_regs.ioc_sem_reg);
  638. bfa_sem_timer_start(ioc);
  639. }
  640. break;
  641. case IOCPF_E_DISABLE:
  642. bfa_sem_timer_stop(ioc);
  643. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  644. break;
  645. default:
  646. bfa_sm_fault(ioc, event);
  647. }
  648. }
  649. static void
  650. bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
  651. {
  652. bfa_iocpf_timer_start(iocpf->ioc);
  653. bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
  654. }
  655. /*
  656. * Hardware is being initialized. Interrupts are enabled.
  657. * Holding hardware semaphore lock.
  658. */
  659. static void
  660. bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  661. {
  662. struct bfa_ioc_s *ioc = iocpf->ioc;
  663. bfa_trc(ioc, event);
  664. switch (event) {
  665. case IOCPF_E_FWREADY:
  666. bfa_iocpf_timer_stop(ioc);
  667. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
  668. break;
  669. case IOCPF_E_INITFAIL:
  670. bfa_iocpf_timer_stop(ioc);
  671. /*
  672. * !!! fall through !!!
  673. */
  674. case IOCPF_E_TIMEOUT:
  675. writel(1, ioc->ioc_regs.ioc_sem_reg);
  676. if (event == IOCPF_E_TIMEOUT)
  677. bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
  678. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
  679. break;
  680. case IOCPF_E_DISABLE:
  681. bfa_iocpf_timer_stop(ioc);
  682. bfa_ioc_sync_leave(ioc);
  683. writel(1, ioc->ioc_regs.ioc_sem_reg);
  684. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  685. break;
  686. default:
  687. bfa_sm_fault(ioc, event);
  688. }
  689. }
  690. static void
  691. bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
  692. {
  693. bfa_iocpf_timer_start(iocpf->ioc);
  694. bfa_ioc_send_enable(iocpf->ioc);
  695. }
  696. /*
  697. * Host IOC function is being enabled, awaiting response from firmware.
  698. * Semaphore is acquired.
  699. */
  700. static void
  701. bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  702. {
  703. struct bfa_ioc_s *ioc = iocpf->ioc;
  704. bfa_trc(ioc, event);
  705. switch (event) {
  706. case IOCPF_E_FWRSP_ENABLE:
  707. bfa_iocpf_timer_stop(ioc);
  708. writel(1, ioc->ioc_regs.ioc_sem_reg);
  709. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
  710. break;
  711. case IOCPF_E_INITFAIL:
  712. bfa_iocpf_timer_stop(ioc);
  713. /*
  714. * !!! fall through !!!
  715. */
  716. case IOCPF_E_TIMEOUT:
  717. writel(1, ioc->ioc_regs.ioc_sem_reg);
  718. if (event == IOCPF_E_TIMEOUT)
  719. bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
  720. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
  721. break;
  722. case IOCPF_E_DISABLE:
  723. bfa_iocpf_timer_stop(ioc);
  724. writel(1, ioc->ioc_regs.ioc_sem_reg);
  725. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
  726. break;
  727. case IOCPF_E_FWREADY:
  728. bfa_ioc_send_enable(ioc);
  729. break;
  730. default:
  731. bfa_sm_fault(ioc, event);
  732. }
  733. }
  734. static void
  735. bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
  736. {
  737. bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
  738. }
  739. static void
  740. bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  741. {
  742. struct bfa_ioc_s *ioc = iocpf->ioc;
  743. bfa_trc(ioc, event);
  744. switch (event) {
  745. case IOCPF_E_DISABLE:
  746. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
  747. break;
  748. case IOCPF_E_GETATTRFAIL:
  749. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
  750. break;
  751. case IOCPF_E_FAIL:
  752. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
  753. break;
  754. case IOCPF_E_FWREADY:
  755. if (bfa_ioc_is_operational(ioc)) {
  756. bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
  757. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
  758. } else {
  759. bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
  760. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
  761. }
  762. break;
  763. default:
  764. bfa_sm_fault(ioc, event);
  765. }
  766. }
  767. static void
  768. bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
  769. {
  770. bfa_iocpf_timer_start(iocpf->ioc);
  771. bfa_ioc_send_disable(iocpf->ioc);
  772. }
  773. /*
  774. * IOC is being disabled
  775. */
  776. static void
  777. bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  778. {
  779. struct bfa_ioc_s *ioc = iocpf->ioc;
  780. bfa_trc(ioc, event);
  781. switch (event) {
  782. case IOCPF_E_FWRSP_DISABLE:
  783. case IOCPF_E_FWREADY:
  784. bfa_iocpf_timer_stop(ioc);
  785. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  786. break;
  787. case IOCPF_E_FAIL:
  788. bfa_iocpf_timer_stop(ioc);
  789. /*
  790. * !!! fall through !!!
  791. */
  792. case IOCPF_E_TIMEOUT:
  793. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  794. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  795. break;
  796. case IOCPF_E_FWRSP_ENABLE:
  797. break;
  798. default:
  799. bfa_sm_fault(ioc, event);
  800. }
  801. }
  802. static void
  803. bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
  804. {
  805. bfa_ioc_hw_sem_get(iocpf->ioc);
  806. }
  807. /*
  808. * IOC hb ack request is being removed.
  809. */
  810. static void
  811. bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  812. {
  813. struct bfa_ioc_s *ioc = iocpf->ioc;
  814. bfa_trc(ioc, event);
  815. switch (event) {
  816. case IOCPF_E_SEMLOCKED:
  817. bfa_ioc_sync_leave(ioc);
  818. writel(1, ioc->ioc_regs.ioc_sem_reg);
  819. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  820. break;
  821. case IOCPF_E_FAIL:
  822. break;
  823. default:
  824. bfa_sm_fault(ioc, event);
  825. }
  826. }
  827. /*
  828. * IOC disable completion entry.
  829. */
  830. static void
  831. bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
  832. {
  833. bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
  834. }
  835. static void
  836. bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  837. {
  838. struct bfa_ioc_s *ioc = iocpf->ioc;
  839. bfa_trc(ioc, event);
  840. switch (event) {
  841. case IOCPF_E_ENABLE:
  842. iocpf->retry_count = 0;
  843. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  844. break;
  845. case IOCPF_E_STOP:
  846. bfa_ioc_firmware_unlock(ioc);
  847. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  848. break;
  849. default:
  850. bfa_sm_fault(ioc, event);
  851. }
  852. }
  853. static void
  854. bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
  855. {
  856. bfa_ioc_hw_sem_get(iocpf->ioc);
  857. }
  858. /*
  859. * Hardware initialization failed.
  860. */
  861. static void
  862. bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  863. {
  864. struct bfa_ioc_s *ioc = iocpf->ioc;
  865. bfa_trc(ioc, event);
  866. switch (event) {
  867. case IOCPF_E_SEMLOCKED:
  868. bfa_ioc_notify_fail(ioc);
  869. bfa_ioc_sync_ack(ioc);
  870. iocpf->retry_count++;
  871. if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
  872. bfa_ioc_sync_leave(ioc);
  873. writel(1, ioc->ioc_regs.ioc_sem_reg);
  874. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
  875. } else {
  876. if (bfa_ioc_sync_complete(ioc))
  877. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  878. else {
  879. writel(1, ioc->ioc_regs.ioc_sem_reg);
  880. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  881. }
  882. }
  883. break;
  884. case IOCPF_E_DISABLE:
  885. bfa_sem_timer_stop(ioc);
  886. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  887. break;
  888. case IOCPF_E_STOP:
  889. bfa_sem_timer_stop(ioc);
  890. bfa_ioc_firmware_unlock(ioc);
  891. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  892. break;
  893. case IOCPF_E_FAIL:
  894. break;
  895. default:
  896. bfa_sm_fault(ioc, event);
  897. }
  898. }
  899. static void
  900. bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
  901. {
  902. bfa_fsm_send_event(iocpf->ioc, IOC_E_INITFAILED);
  903. }
  904. /*
  905. * Hardware initialization failed.
  906. */
  907. static void
  908. bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  909. {
  910. struct bfa_ioc_s *ioc = iocpf->ioc;
  911. bfa_trc(ioc, event);
  912. switch (event) {
  913. case IOCPF_E_DISABLE:
  914. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  915. break;
  916. case IOCPF_E_STOP:
  917. bfa_ioc_firmware_unlock(ioc);
  918. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  919. break;
  920. default:
  921. bfa_sm_fault(ioc, event);
  922. }
  923. }
  924. static void
  925. bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
  926. {
  927. /*
  928. * Mark IOC as failed in hardware and stop firmware.
  929. */
  930. bfa_ioc_lpu_stop(iocpf->ioc);
  931. /*
  932. * Flush any queued up mailbox requests.
  933. */
  934. bfa_ioc_mbox_hbfail(iocpf->ioc);
  935. bfa_ioc_hw_sem_get(iocpf->ioc);
  936. }
  937. static void
  938. bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  939. {
  940. struct bfa_ioc_s *ioc = iocpf->ioc;
  941. bfa_trc(ioc, event);
  942. switch (event) {
  943. case IOCPF_E_SEMLOCKED:
  944. iocpf->retry_count = 0;
  945. bfa_ioc_sync_ack(ioc);
  946. bfa_ioc_notify_fail(ioc);
  947. if (!iocpf->auto_recover) {
  948. bfa_ioc_sync_leave(ioc);
  949. writel(1, ioc->ioc_regs.ioc_sem_reg);
  950. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  951. } else {
  952. if (bfa_ioc_sync_complete(ioc))
  953. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  954. else {
  955. writel(1, ioc->ioc_regs.ioc_sem_reg);
  956. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  957. }
  958. }
  959. break;
  960. case IOCPF_E_DISABLE:
  961. bfa_sem_timer_stop(ioc);
  962. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  963. break;
  964. case IOCPF_E_FAIL:
  965. break;
  966. default:
  967. bfa_sm_fault(ioc, event);
  968. }
  969. }
  970. static void
  971. bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
  972. {
  973. }
  974. /*
  975. * IOC is in failed state.
  976. */
  977. static void
  978. bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  979. {
  980. struct bfa_ioc_s *ioc = iocpf->ioc;
  981. bfa_trc(ioc, event);
  982. switch (event) {
  983. case IOCPF_E_DISABLE:
  984. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  985. break;
  986. default:
  987. bfa_sm_fault(ioc, event);
  988. }
  989. }
  990. /*
  991. * BFA IOC private functions
  992. */
  993. static void
  994. bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
  995. {
  996. struct list_head *qe;
  997. struct bfa_ioc_hbfail_notify_s *notify;
  998. ioc->cbfn->disable_cbfn(ioc->bfa);
  999. /*
  1000. * Notify common modules registered for notification.
  1001. */
  1002. list_for_each(qe, &ioc->hb_notify_q) {
  1003. notify = (struct bfa_ioc_hbfail_notify_s *) qe;
  1004. notify->cbfn(notify->cbarg);
  1005. }
  1006. }
  1007. bfa_boolean_t
  1008. bfa_ioc_sem_get(void __iomem *sem_reg)
  1009. {
  1010. u32 r32;
  1011. int cnt = 0;
  1012. #define BFA_SEM_SPINCNT 3000
  1013. r32 = readl(sem_reg);
  1014. while (r32 && (cnt < BFA_SEM_SPINCNT)) {
  1015. cnt++;
  1016. udelay(2);
  1017. r32 = readl(sem_reg);
  1018. }
  1019. if (r32 == 0)
  1020. return BFA_TRUE;
  1021. WARN_ON(cnt >= BFA_SEM_SPINCNT);
  1022. return BFA_FALSE;
  1023. }
  1024. static void
  1025. bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
  1026. {
  1027. u32 r32;
  1028. /*
  1029. * First read to the semaphore register will return 0, subsequent reads
  1030. * will return 1. Semaphore is released by writing 1 to the register
  1031. */
  1032. r32 = readl(ioc->ioc_regs.ioc_sem_reg);
  1033. if (r32 == 0) {
  1034. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
  1035. return;
  1036. }
  1037. bfa_sem_timer_start(ioc);
  1038. }
  1039. /*
  1040. * Initialize LPU local memory (aka secondary memory / SRAM)
  1041. */
  1042. static void
  1043. bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
  1044. {
  1045. u32 pss_ctl;
  1046. int i;
  1047. #define PSS_LMEM_INIT_TIME 10000
  1048. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  1049. pss_ctl &= ~__PSS_LMEM_RESET;
  1050. pss_ctl |= __PSS_LMEM_INIT_EN;
  1051. /*
  1052. * i2c workaround 12.5khz clock
  1053. */
  1054. pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
  1055. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1056. /*
  1057. * wait for memory initialization to be complete
  1058. */
  1059. i = 0;
  1060. do {
  1061. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  1062. i++;
  1063. } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
  1064. /*
  1065. * If memory initialization is not successful, IOC timeout will catch
  1066. * such failures.
  1067. */
  1068. WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
  1069. bfa_trc(ioc, pss_ctl);
  1070. pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
  1071. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1072. }
  1073. static void
  1074. bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
  1075. {
  1076. u32 pss_ctl;
  1077. /*
  1078. * Take processor out of reset.
  1079. */
  1080. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  1081. pss_ctl &= ~__PSS_LPU0_RESET;
  1082. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1083. }
  1084. static void
  1085. bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
  1086. {
  1087. u32 pss_ctl;
  1088. /*
  1089. * Put processors in reset.
  1090. */
  1091. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  1092. pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
  1093. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1094. }
  1095. /*
  1096. * Get driver and firmware versions.
  1097. */
  1098. void
  1099. bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
  1100. {
  1101. u32 pgnum, pgoff;
  1102. u32 loff = 0;
  1103. int i;
  1104. u32 *fwsig = (u32 *) fwhdr;
  1105. pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
  1106. pgoff = PSS_SMEM_PGOFF(loff);
  1107. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1108. for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
  1109. i++) {
  1110. fwsig[i] =
  1111. bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
  1112. loff += sizeof(u32);
  1113. }
  1114. }
  1115. /*
  1116. * Returns TRUE if same.
  1117. */
  1118. bfa_boolean_t
  1119. bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
  1120. {
  1121. struct bfi_ioc_image_hdr_s *drv_fwhdr;
  1122. int i;
  1123. drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
  1124. bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
  1125. for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
  1126. if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
  1127. bfa_trc(ioc, i);
  1128. bfa_trc(ioc, fwhdr->md5sum[i]);
  1129. bfa_trc(ioc, drv_fwhdr->md5sum[i]);
  1130. return BFA_FALSE;
  1131. }
  1132. }
  1133. bfa_trc(ioc, fwhdr->md5sum[0]);
  1134. return BFA_TRUE;
  1135. }
  1136. /*
  1137. * Return true if current running version is valid. Firmware signature and
  1138. * execution context (driver/bios) must match.
  1139. */
  1140. static bfa_boolean_t
  1141. bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
  1142. {
  1143. struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
  1144. bfa_ioc_fwver_get(ioc, &fwhdr);
  1145. drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
  1146. bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
  1147. if (fwhdr.signature != drv_fwhdr->signature) {
  1148. bfa_trc(ioc, fwhdr.signature);
  1149. bfa_trc(ioc, drv_fwhdr->signature);
  1150. return BFA_FALSE;
  1151. }
  1152. if (swab32(fwhdr.param) != boot_env) {
  1153. bfa_trc(ioc, fwhdr.param);
  1154. bfa_trc(ioc, boot_env);
  1155. return BFA_FALSE;
  1156. }
  1157. return bfa_ioc_fwver_cmp(ioc, &fwhdr);
  1158. }
  1159. /*
  1160. * Conditionally flush any pending message from firmware at start.
  1161. */
  1162. static void
  1163. bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
  1164. {
  1165. u32 r32;
  1166. r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
  1167. if (r32)
  1168. writel(1, ioc->ioc_regs.lpu_mbox_cmd);
  1169. }
  1170. static void
  1171. bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
  1172. {
  1173. enum bfi_ioc_state ioc_fwstate;
  1174. bfa_boolean_t fwvalid;
  1175. u32 boot_type;
  1176. u32 boot_env;
  1177. ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  1178. if (force)
  1179. ioc_fwstate = BFI_IOC_UNINIT;
  1180. bfa_trc(ioc, ioc_fwstate);
  1181. boot_type = BFI_BOOT_TYPE_NORMAL;
  1182. boot_env = BFI_BOOT_LOADER_OS;
  1183. /*
  1184. * check if firmware is valid
  1185. */
  1186. fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
  1187. BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
  1188. if (!fwvalid) {
  1189. bfa_ioc_boot(ioc, boot_type, boot_env);
  1190. return;
  1191. }
  1192. /*
  1193. * If hardware initialization is in progress (initialized by other IOC),
  1194. * just wait for an initialization completion interrupt.
  1195. */
  1196. if (ioc_fwstate == BFI_IOC_INITING) {
  1197. ioc->cbfn->reset_cbfn(ioc->bfa);
  1198. return;
  1199. }
  1200. /*
  1201. * If IOC function is disabled and firmware version is same,
  1202. * just re-enable IOC.
  1203. *
  1204. * If option rom, IOC must not be in operational state. With
  1205. * convergence, IOC will be in operational state when 2nd driver
  1206. * is loaded.
  1207. */
  1208. if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
  1209. /*
  1210. * When using MSI-X any pending firmware ready event should
  1211. * be flushed. Otherwise MSI-X interrupts are not delivered.
  1212. */
  1213. bfa_ioc_msgflush(ioc);
  1214. ioc->cbfn->reset_cbfn(ioc->bfa);
  1215. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
  1216. return;
  1217. }
  1218. /*
  1219. * Initialize the h/w for any other states.
  1220. */
  1221. bfa_ioc_boot(ioc, boot_type, boot_env);
  1222. }
  1223. static void
  1224. bfa_ioc_timeout(void *ioc_arg)
  1225. {
  1226. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  1227. bfa_trc(ioc, 0);
  1228. bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
  1229. }
  1230. void
  1231. bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
  1232. {
  1233. u32 *msgp = (u32 *) ioc_msg;
  1234. u32 i;
  1235. bfa_trc(ioc, msgp[0]);
  1236. bfa_trc(ioc, len);
  1237. WARN_ON(len > BFI_IOC_MSGLEN_MAX);
  1238. /*
  1239. * first write msg to mailbox registers
  1240. */
  1241. for (i = 0; i < len / sizeof(u32); i++)
  1242. writel(cpu_to_le32(msgp[i]),
  1243. ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
  1244. for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
  1245. writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
  1246. /*
  1247. * write 1 to mailbox CMD to trigger LPU event
  1248. */
  1249. writel(1, ioc->ioc_regs.hfn_mbox_cmd);
  1250. (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
  1251. }
  1252. static void
  1253. bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
  1254. {
  1255. struct bfi_ioc_ctrl_req_s enable_req;
  1256. struct timeval tv;
  1257. bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
  1258. bfa_ioc_portid(ioc));
  1259. enable_req.ioc_class = ioc->ioc_mc;
  1260. do_gettimeofday(&tv);
  1261. enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
  1262. bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
  1263. }
  1264. static void
  1265. bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
  1266. {
  1267. struct bfi_ioc_ctrl_req_s disable_req;
  1268. bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
  1269. bfa_ioc_portid(ioc));
  1270. bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
  1271. }
  1272. static void
  1273. bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
  1274. {
  1275. struct bfi_ioc_getattr_req_s attr_req;
  1276. bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
  1277. bfa_ioc_portid(ioc));
  1278. bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
  1279. bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
  1280. }
  1281. static void
  1282. bfa_ioc_hb_check(void *cbarg)
  1283. {
  1284. struct bfa_ioc_s *ioc = cbarg;
  1285. u32 hb_count;
  1286. hb_count = readl(ioc->ioc_regs.heartbeat);
  1287. if (ioc->hb_count == hb_count) {
  1288. bfa_ioc_recover(ioc);
  1289. return;
  1290. } else {
  1291. ioc->hb_count = hb_count;
  1292. }
  1293. bfa_ioc_mbox_poll(ioc);
  1294. bfa_hb_timer_start(ioc);
  1295. }
  1296. static void
  1297. bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
  1298. {
  1299. ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
  1300. bfa_hb_timer_start(ioc);
  1301. }
  1302. /*
  1303. * Initiate a full firmware download.
  1304. */
  1305. static void
  1306. bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
  1307. u32 boot_env)
  1308. {
  1309. u32 *fwimg;
  1310. u32 pgnum, pgoff;
  1311. u32 loff = 0;
  1312. u32 chunkno = 0;
  1313. u32 i;
  1314. /*
  1315. * Initialize LMEM first before code download
  1316. */
  1317. bfa_ioc_lmem_init(ioc);
  1318. bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
  1319. fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
  1320. pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
  1321. pgoff = PSS_SMEM_PGOFF(loff);
  1322. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1323. for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
  1324. if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
  1325. chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
  1326. fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
  1327. BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
  1328. }
  1329. /*
  1330. * write smem
  1331. */
  1332. bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
  1333. fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
  1334. loff += sizeof(u32);
  1335. /*
  1336. * handle page offset wrap around
  1337. */
  1338. loff = PSS_SMEM_PGOFF(loff);
  1339. if (loff == 0) {
  1340. pgnum++;
  1341. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1342. }
  1343. }
  1344. writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
  1345. ioc->ioc_regs.host_page_num_fn);
  1346. /*
  1347. * Set boot type and boot param at the end.
  1348. */
  1349. bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
  1350. swab32(boot_type));
  1351. bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
  1352. swab32(boot_env));
  1353. }
  1354. /*
  1355. * Update BFA configuration from firmware configuration.
  1356. */
  1357. static void
  1358. bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
  1359. {
  1360. struct bfi_ioc_attr_s *attr = ioc->attr;
  1361. attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
  1362. attr->card_type = be32_to_cpu(attr->card_type);
  1363. attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
  1364. bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
  1365. }
  1366. /*
  1367. * Attach time initialization of mbox logic.
  1368. */
  1369. static void
  1370. bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
  1371. {
  1372. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1373. int mc;
  1374. INIT_LIST_HEAD(&mod->cmd_q);
  1375. for (mc = 0; mc < BFI_MC_MAX; mc++) {
  1376. mod->mbhdlr[mc].cbfn = NULL;
  1377. mod->mbhdlr[mc].cbarg = ioc->bfa;
  1378. }
  1379. }
  1380. /*
  1381. * Mbox poll timer -- restarts any pending mailbox requests.
  1382. */
  1383. static void
  1384. bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
  1385. {
  1386. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1387. struct bfa_mbox_cmd_s *cmd;
  1388. u32 stat;
  1389. /*
  1390. * If no command pending, do nothing
  1391. */
  1392. if (list_empty(&mod->cmd_q))
  1393. return;
  1394. /*
  1395. * If previous command is not yet fetched by firmware, do nothing
  1396. */
  1397. stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
  1398. if (stat)
  1399. return;
  1400. /*
  1401. * Enqueue command to firmware.
  1402. */
  1403. bfa_q_deq(&mod->cmd_q, &cmd);
  1404. bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
  1405. }
  1406. /*
  1407. * Cleanup any pending requests.
  1408. */
  1409. static void
  1410. bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
  1411. {
  1412. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1413. struct bfa_mbox_cmd_s *cmd;
  1414. while (!list_empty(&mod->cmd_q))
  1415. bfa_q_deq(&mod->cmd_q, &cmd);
  1416. }
  1417. /*
  1418. * Read data from SMEM to host through PCI memmap
  1419. *
  1420. * @param[in] ioc memory for IOC
  1421. * @param[in] tbuf app memory to store data from smem
  1422. * @param[in] soff smem offset
  1423. * @param[in] sz size of smem in bytes
  1424. */
  1425. static bfa_status_t
  1426. bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
  1427. {
  1428. u32 pgnum, loff;
  1429. __be32 r32;
  1430. int i, len;
  1431. u32 *buf = tbuf;
  1432. pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
  1433. loff = PSS_SMEM_PGOFF(soff);
  1434. bfa_trc(ioc, pgnum);
  1435. bfa_trc(ioc, loff);
  1436. bfa_trc(ioc, sz);
  1437. /*
  1438. * Hold semaphore to serialize pll init and fwtrc.
  1439. */
  1440. if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
  1441. bfa_trc(ioc, 0);
  1442. return BFA_STATUS_FAILED;
  1443. }
  1444. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1445. len = sz/sizeof(u32);
  1446. bfa_trc(ioc, len);
  1447. for (i = 0; i < len; i++) {
  1448. r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
  1449. buf[i] = be32_to_cpu(r32);
  1450. loff += sizeof(u32);
  1451. /*
  1452. * handle page offset wrap around
  1453. */
  1454. loff = PSS_SMEM_PGOFF(loff);
  1455. if (loff == 0) {
  1456. pgnum++;
  1457. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1458. }
  1459. }
  1460. writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
  1461. ioc->ioc_regs.host_page_num_fn);
  1462. /*
  1463. * release semaphore.
  1464. */
  1465. writel(1, ioc->ioc_regs.ioc_init_sem_reg);
  1466. bfa_trc(ioc, pgnum);
  1467. return BFA_STATUS_OK;
  1468. }
  1469. /*
  1470. * Clear SMEM data from host through PCI memmap
  1471. *
  1472. * @param[in] ioc memory for IOC
  1473. * @param[in] soff smem offset
  1474. * @param[in] sz size of smem in bytes
  1475. */
  1476. static bfa_status_t
  1477. bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
  1478. {
  1479. int i, len;
  1480. u32 pgnum, loff;
  1481. pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
  1482. loff = PSS_SMEM_PGOFF(soff);
  1483. bfa_trc(ioc, pgnum);
  1484. bfa_trc(ioc, loff);
  1485. bfa_trc(ioc, sz);
  1486. /*
  1487. * Hold semaphore to serialize pll init and fwtrc.
  1488. */
  1489. if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
  1490. bfa_trc(ioc, 0);
  1491. return BFA_STATUS_FAILED;
  1492. }
  1493. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1494. len = sz/sizeof(u32); /* len in words */
  1495. bfa_trc(ioc, len);
  1496. for (i = 0; i < len; i++) {
  1497. bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
  1498. loff += sizeof(u32);
  1499. /*
  1500. * handle page offset wrap around
  1501. */
  1502. loff = PSS_SMEM_PGOFF(loff);
  1503. if (loff == 0) {
  1504. pgnum++;
  1505. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1506. }
  1507. }
  1508. writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
  1509. ioc->ioc_regs.host_page_num_fn);
  1510. /*
  1511. * release semaphore.
  1512. */
  1513. writel(1, ioc->ioc_regs.ioc_init_sem_reg);
  1514. bfa_trc(ioc, pgnum);
  1515. return BFA_STATUS_OK;
  1516. }
  1517. static void
  1518. bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
  1519. {
  1520. struct list_head *qe;
  1521. struct bfa_ioc_hbfail_notify_s *notify;
  1522. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  1523. /*
  1524. * Notify driver and common modules registered for notification.
  1525. */
  1526. ioc->cbfn->hbfail_cbfn(ioc->bfa);
  1527. list_for_each(qe, &ioc->hb_notify_q) {
  1528. notify = (struct bfa_ioc_hbfail_notify_s *) qe;
  1529. notify->cbfn(notify->cbarg);
  1530. }
  1531. bfa_ioc_debug_save_ftrc(ioc);
  1532. BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
  1533. "Heart Beat of IOC has failed\n");
  1534. }
  1535. static void
  1536. bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
  1537. {
  1538. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  1539. /*
  1540. * Provide enable completion callback.
  1541. */
  1542. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  1543. BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
  1544. "Running firmware version is incompatible "
  1545. "with the driver version\n");
  1546. }
  1547. bfa_status_t
  1548. bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
  1549. {
  1550. /*
  1551. * Hold semaphore so that nobody can access the chip during init.
  1552. */
  1553. bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
  1554. bfa_ioc_pll_init_asic(ioc);
  1555. ioc->pllinit = BFA_TRUE;
  1556. /*
  1557. * release semaphore.
  1558. */
  1559. writel(1, ioc->ioc_regs.ioc_init_sem_reg);
  1560. return BFA_STATUS_OK;
  1561. }
  1562. /*
  1563. * Interface used by diag module to do firmware boot with memory test
  1564. * as the entry vector.
  1565. */
  1566. void
  1567. bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
  1568. {
  1569. void __iomem *rb;
  1570. bfa_ioc_stats(ioc, ioc_boots);
  1571. if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
  1572. return;
  1573. /*
  1574. * Initialize IOC state of all functions on a chip reset.
  1575. */
  1576. rb = ioc->pcidev.pci_bar_kva;
  1577. if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
  1578. writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
  1579. writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
  1580. } else {
  1581. writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
  1582. writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
  1583. }
  1584. bfa_ioc_msgflush(ioc);
  1585. bfa_ioc_download_fw(ioc, boot_type, boot_env);
  1586. /*
  1587. * Enable interrupts just before starting LPU
  1588. */
  1589. ioc->cbfn->reset_cbfn(ioc->bfa);
  1590. bfa_ioc_lpu_start(ioc);
  1591. }
  1592. /*
  1593. * Enable/disable IOC failure auto recovery.
  1594. */
  1595. void
  1596. bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
  1597. {
  1598. bfa_auto_recover = auto_recover;
  1599. }
  1600. bfa_boolean_t
  1601. bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
  1602. {
  1603. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
  1604. }
  1605. bfa_boolean_t
  1606. bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
  1607. {
  1608. u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
  1609. return ((r32 != BFI_IOC_UNINIT) &&
  1610. (r32 != BFI_IOC_INITING) &&
  1611. (r32 != BFI_IOC_MEMTEST));
  1612. }
  1613. void
  1614. bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
  1615. {
  1616. __be32 *msgp = mbmsg;
  1617. u32 r32;
  1618. int i;
  1619. /*
  1620. * read the MBOX msg
  1621. */
  1622. for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
  1623. i++) {
  1624. r32 = readl(ioc->ioc_regs.lpu_mbox +
  1625. i * sizeof(u32));
  1626. msgp[i] = cpu_to_be32(r32);
  1627. }
  1628. /*
  1629. * turn off mailbox interrupt by clearing mailbox status
  1630. */
  1631. writel(1, ioc->ioc_regs.lpu_mbox_cmd);
  1632. readl(ioc->ioc_regs.lpu_mbox_cmd);
  1633. }
  1634. void
  1635. bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
  1636. {
  1637. union bfi_ioc_i2h_msg_u *msg;
  1638. struct bfa_iocpf_s *iocpf = &ioc->iocpf;
  1639. msg = (union bfi_ioc_i2h_msg_u *) m;
  1640. bfa_ioc_stats(ioc, ioc_isrs);
  1641. switch (msg->mh.msg_id) {
  1642. case BFI_IOC_I2H_HBEAT:
  1643. break;
  1644. case BFI_IOC_I2H_READY_EVENT:
  1645. bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
  1646. break;
  1647. case BFI_IOC_I2H_ENABLE_REPLY:
  1648. bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
  1649. break;
  1650. case BFI_IOC_I2H_DISABLE_REPLY:
  1651. bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
  1652. break;
  1653. case BFI_IOC_I2H_GETATTR_REPLY:
  1654. bfa_ioc_getattr_reply(ioc);
  1655. break;
  1656. default:
  1657. bfa_trc(ioc, msg->mh.msg_id);
  1658. WARN_ON(1);
  1659. }
  1660. }
  1661. /*
  1662. * IOC attach time initialization and setup.
  1663. *
  1664. * @param[in] ioc memory for IOC
  1665. * @param[in] bfa driver instance structure
  1666. */
  1667. void
  1668. bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
  1669. struct bfa_timer_mod_s *timer_mod)
  1670. {
  1671. ioc->bfa = bfa;
  1672. ioc->cbfn = cbfn;
  1673. ioc->timer_mod = timer_mod;
  1674. ioc->fcmode = BFA_FALSE;
  1675. ioc->pllinit = BFA_FALSE;
  1676. ioc->dbg_fwsave_once = BFA_TRUE;
  1677. ioc->iocpf.ioc = ioc;
  1678. bfa_ioc_mbox_attach(ioc);
  1679. INIT_LIST_HEAD(&ioc->hb_notify_q);
  1680. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  1681. bfa_fsm_send_event(ioc, IOC_E_RESET);
  1682. }
  1683. /*
  1684. * Driver detach time IOC cleanup.
  1685. */
  1686. void
  1687. bfa_ioc_detach(struct bfa_ioc_s *ioc)
  1688. {
  1689. bfa_fsm_send_event(ioc, IOC_E_DETACH);
  1690. }
  1691. /*
  1692. * Setup IOC PCI properties.
  1693. *
  1694. * @param[in] pcidev PCI device information for this IOC
  1695. */
  1696. void
  1697. bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
  1698. enum bfi_mclass mc)
  1699. {
  1700. ioc->ioc_mc = mc;
  1701. ioc->pcidev = *pcidev;
  1702. ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
  1703. ioc->cna = ioc->ctdev && !ioc->fcmode;
  1704. /*
  1705. * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
  1706. */
  1707. if (ioc->ctdev)
  1708. bfa_ioc_set_ct_hwif(ioc);
  1709. else
  1710. bfa_ioc_set_cb_hwif(ioc);
  1711. bfa_ioc_map_port(ioc);
  1712. bfa_ioc_reg_init(ioc);
  1713. }
  1714. /*
  1715. * Initialize IOC dma memory
  1716. *
  1717. * @param[in] dm_kva kernel virtual address of IOC dma memory
  1718. * @param[in] dm_pa physical address of IOC dma memory
  1719. */
  1720. void
  1721. bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
  1722. {
  1723. /*
  1724. * dma memory for firmware attribute
  1725. */
  1726. ioc->attr_dma.kva = dm_kva;
  1727. ioc->attr_dma.pa = dm_pa;
  1728. ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
  1729. }
  1730. void
  1731. bfa_ioc_enable(struct bfa_ioc_s *ioc)
  1732. {
  1733. bfa_ioc_stats(ioc, ioc_enables);
  1734. ioc->dbg_fwsave_once = BFA_TRUE;
  1735. bfa_fsm_send_event(ioc, IOC_E_ENABLE);
  1736. }
  1737. void
  1738. bfa_ioc_disable(struct bfa_ioc_s *ioc)
  1739. {
  1740. bfa_ioc_stats(ioc, ioc_disables);
  1741. bfa_fsm_send_event(ioc, IOC_E_DISABLE);
  1742. }
  1743. /*
  1744. * Initialize memory for saving firmware trace. Driver must initialize
  1745. * trace memory before call bfa_ioc_enable().
  1746. */
  1747. void
  1748. bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
  1749. {
  1750. ioc->dbg_fwsave = dbg_fwsave;
  1751. ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
  1752. }
  1753. /*
  1754. * Register mailbox message handler functions
  1755. *
  1756. * @param[in] ioc IOC instance
  1757. * @param[in] mcfuncs message class handler functions
  1758. */
  1759. void
  1760. bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
  1761. {
  1762. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1763. int mc;
  1764. for (mc = 0; mc < BFI_MC_MAX; mc++)
  1765. mod->mbhdlr[mc].cbfn = mcfuncs[mc];
  1766. }
  1767. /*
  1768. * Register mailbox message handler function, to be called by common modules
  1769. */
  1770. void
  1771. bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
  1772. bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
  1773. {
  1774. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1775. mod->mbhdlr[mc].cbfn = cbfn;
  1776. mod->mbhdlr[mc].cbarg = cbarg;
  1777. }
  1778. /*
  1779. * Queue a mailbox command request to firmware. Waits if mailbox is busy.
  1780. * Responsibility of caller to serialize
  1781. *
  1782. * @param[in] ioc IOC instance
  1783. * @param[i] cmd Mailbox command
  1784. */
  1785. void
  1786. bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
  1787. {
  1788. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1789. u32 stat;
  1790. /*
  1791. * If a previous command is pending, queue new command
  1792. */
  1793. if (!list_empty(&mod->cmd_q)) {
  1794. list_add_tail(&cmd->qe, &mod->cmd_q);
  1795. return;
  1796. }
  1797. /*
  1798. * If mailbox is busy, queue command for poll timer
  1799. */
  1800. stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
  1801. if (stat) {
  1802. list_add_tail(&cmd->qe, &mod->cmd_q);
  1803. return;
  1804. }
  1805. /*
  1806. * mailbox is free -- queue command to firmware
  1807. */
  1808. bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
  1809. }
  1810. /*
  1811. * Handle mailbox interrupts
  1812. */
  1813. void
  1814. bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
  1815. {
  1816. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1817. struct bfi_mbmsg_s m;
  1818. int mc;
  1819. bfa_ioc_msgget(ioc, &m);
  1820. /*
  1821. * Treat IOC message class as special.
  1822. */
  1823. mc = m.mh.msg_class;
  1824. if (mc == BFI_MC_IOC) {
  1825. bfa_ioc_isr(ioc, &m);
  1826. return;
  1827. }
  1828. if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
  1829. return;
  1830. mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
  1831. }
  1832. void
  1833. bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
  1834. {
  1835. bfa_fsm_send_event(ioc, IOC_E_HWERROR);
  1836. }
  1837. void
  1838. bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
  1839. {
  1840. ioc->fcmode = BFA_TRUE;
  1841. ioc->port_id = bfa_ioc_pcifn(ioc);
  1842. }
  1843. /*
  1844. * return true if IOC is disabled
  1845. */
  1846. bfa_boolean_t
  1847. bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
  1848. {
  1849. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
  1850. bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
  1851. }
  1852. /*
  1853. * return true if IOC firmware is different.
  1854. */
  1855. bfa_boolean_t
  1856. bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
  1857. {
  1858. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
  1859. bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
  1860. bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
  1861. }
  1862. #define bfa_ioc_state_disabled(__sm) \
  1863. (((__sm) == BFI_IOC_UNINIT) || \
  1864. ((__sm) == BFI_IOC_INITING) || \
  1865. ((__sm) == BFI_IOC_HWINIT) || \
  1866. ((__sm) == BFI_IOC_DISABLED) || \
  1867. ((__sm) == BFI_IOC_FAIL) || \
  1868. ((__sm) == BFI_IOC_CFG_DISABLED))
  1869. /*
  1870. * Check if adapter is disabled -- both IOCs should be in a disabled
  1871. * state.
  1872. */
  1873. bfa_boolean_t
  1874. bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
  1875. {
  1876. u32 ioc_state;
  1877. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  1878. if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
  1879. return BFA_FALSE;
  1880. ioc_state = readl(rb + BFA_IOC0_STATE_REG);
  1881. if (!bfa_ioc_state_disabled(ioc_state))
  1882. return BFA_FALSE;
  1883. if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
  1884. ioc_state = readl(rb + BFA_IOC1_STATE_REG);
  1885. if (!bfa_ioc_state_disabled(ioc_state))
  1886. return BFA_FALSE;
  1887. }
  1888. return BFA_TRUE;
  1889. }
  1890. /*
  1891. * Reset IOC fwstate registers.
  1892. */
  1893. void
  1894. bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
  1895. {
  1896. writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
  1897. writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
  1898. }
  1899. #define BFA_MFG_NAME "Brocade"
  1900. void
  1901. bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
  1902. struct bfa_adapter_attr_s *ad_attr)
  1903. {
  1904. struct bfi_ioc_attr_s *ioc_attr;
  1905. ioc_attr = ioc->attr;
  1906. bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
  1907. bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
  1908. bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
  1909. bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
  1910. memcpy(&ad_attr->vpd, &ioc_attr->vpd,
  1911. sizeof(struct bfa_mfg_vpd_s));
  1912. ad_attr->nports = bfa_ioc_get_nports(ioc);
  1913. ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
  1914. bfa_ioc_get_adapter_model(ioc, ad_attr->model);
  1915. /* For now, model descr uses same model string */
  1916. bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
  1917. ad_attr->card_type = ioc_attr->card_type;
  1918. ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
  1919. if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
  1920. ad_attr->prototype = 1;
  1921. else
  1922. ad_attr->prototype = 0;
  1923. ad_attr->pwwn = ioc->attr->pwwn;
  1924. ad_attr->mac = bfa_ioc_get_mac(ioc);
  1925. ad_attr->pcie_gen = ioc_attr->pcie_gen;
  1926. ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
  1927. ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
  1928. ad_attr->asic_rev = ioc_attr->asic_rev;
  1929. bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
  1930. ad_attr->cna_capable = ioc->cna;
  1931. ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna &&
  1932. !ad_attr->is_mezz;
  1933. }
  1934. enum bfa_ioc_type_e
  1935. bfa_ioc_get_type(struct bfa_ioc_s *ioc)
  1936. {
  1937. if (!ioc->ctdev || ioc->fcmode)
  1938. return BFA_IOC_TYPE_FC;
  1939. else if (ioc->ioc_mc == BFI_MC_IOCFC)
  1940. return BFA_IOC_TYPE_FCoE;
  1941. else if (ioc->ioc_mc == BFI_MC_LL)
  1942. return BFA_IOC_TYPE_LL;
  1943. else {
  1944. WARN_ON(ioc->ioc_mc != BFI_MC_LL);
  1945. return BFA_IOC_TYPE_LL;
  1946. }
  1947. }
  1948. void
  1949. bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
  1950. {
  1951. memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
  1952. memcpy((void *)serial_num,
  1953. (void *)ioc->attr->brcd_serialnum,
  1954. BFA_ADAPTER_SERIAL_NUM_LEN);
  1955. }
  1956. void
  1957. bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
  1958. {
  1959. memset((void *)fw_ver, 0, BFA_VERSION_LEN);
  1960. memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
  1961. }
  1962. void
  1963. bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
  1964. {
  1965. WARN_ON(!chip_rev);
  1966. memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
  1967. chip_rev[0] = 'R';
  1968. chip_rev[1] = 'e';
  1969. chip_rev[2] = 'v';
  1970. chip_rev[3] = '-';
  1971. chip_rev[4] = ioc->attr->asic_rev;
  1972. chip_rev[5] = '\0';
  1973. }
  1974. void
  1975. bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
  1976. {
  1977. memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
  1978. memcpy(optrom_ver, ioc->attr->optrom_version,
  1979. BFA_VERSION_LEN);
  1980. }
  1981. void
  1982. bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
  1983. {
  1984. memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
  1985. memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
  1986. }
  1987. void
  1988. bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
  1989. {
  1990. struct bfi_ioc_attr_s *ioc_attr;
  1991. WARN_ON(!model);
  1992. memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
  1993. ioc_attr = ioc->attr;
  1994. /*
  1995. * model name
  1996. */
  1997. snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
  1998. BFA_MFG_NAME, ioc_attr->card_type);
  1999. }
  2000. enum bfa_ioc_state
  2001. bfa_ioc_get_state(struct bfa_ioc_s *ioc)
  2002. {
  2003. enum bfa_iocpf_state iocpf_st;
  2004. enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
  2005. if (ioc_st == BFA_IOC_ENABLING ||
  2006. ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
  2007. iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
  2008. switch (iocpf_st) {
  2009. case BFA_IOCPF_SEMWAIT:
  2010. ioc_st = BFA_IOC_SEMWAIT;
  2011. break;
  2012. case BFA_IOCPF_HWINIT:
  2013. ioc_st = BFA_IOC_HWINIT;
  2014. break;
  2015. case BFA_IOCPF_FWMISMATCH:
  2016. ioc_st = BFA_IOC_FWMISMATCH;
  2017. break;
  2018. case BFA_IOCPF_FAIL:
  2019. ioc_st = BFA_IOC_FAIL;
  2020. break;
  2021. case BFA_IOCPF_INITFAIL:
  2022. ioc_st = BFA_IOC_INITFAIL;
  2023. break;
  2024. default:
  2025. break;
  2026. }
  2027. }
  2028. return ioc_st;
  2029. }
  2030. void
  2031. bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
  2032. {
  2033. memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
  2034. ioc_attr->state = bfa_ioc_get_state(ioc);
  2035. ioc_attr->port_id = ioc->port_id;
  2036. ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
  2037. bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
  2038. ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
  2039. ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
  2040. bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
  2041. }
  2042. mac_t
  2043. bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
  2044. {
  2045. /*
  2046. * Check the IOC type and return the appropriate MAC
  2047. */
  2048. if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
  2049. return ioc->attr->fcoe_mac;
  2050. else
  2051. return ioc->attr->mac;
  2052. }
  2053. mac_t
  2054. bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
  2055. {
  2056. mac_t m;
  2057. m = ioc->attr->mfg_mac;
  2058. if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
  2059. m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
  2060. else
  2061. bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
  2062. bfa_ioc_pcifn(ioc));
  2063. return m;
  2064. }
  2065. bfa_boolean_t
  2066. bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
  2067. {
  2068. return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
  2069. }
  2070. /*
  2071. * Retrieve saved firmware trace from a prior IOC failure.
  2072. */
  2073. bfa_status_t
  2074. bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
  2075. {
  2076. int tlen;
  2077. if (ioc->dbg_fwsave_len == 0)
  2078. return BFA_STATUS_ENOFSAVE;
  2079. tlen = *trclen;
  2080. if (tlen > ioc->dbg_fwsave_len)
  2081. tlen = ioc->dbg_fwsave_len;
  2082. memcpy(trcdata, ioc->dbg_fwsave, tlen);
  2083. *trclen = tlen;
  2084. return BFA_STATUS_OK;
  2085. }
  2086. /*
  2087. * Retrieve saved firmware trace from a prior IOC failure.
  2088. */
  2089. bfa_status_t
  2090. bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
  2091. {
  2092. u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
  2093. int tlen;
  2094. bfa_status_t status;
  2095. bfa_trc(ioc, *trclen);
  2096. tlen = *trclen;
  2097. if (tlen > BFA_DBG_FWTRC_LEN)
  2098. tlen = BFA_DBG_FWTRC_LEN;
  2099. status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
  2100. *trclen = tlen;
  2101. return status;
  2102. }
  2103. static void
  2104. bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
  2105. {
  2106. struct bfa_mbox_cmd_s cmd;
  2107. struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
  2108. bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
  2109. bfa_ioc_portid(ioc));
  2110. req->ioc_class = ioc->ioc_mc;
  2111. bfa_ioc_mbox_queue(ioc, &cmd);
  2112. }
  2113. static void
  2114. bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
  2115. {
  2116. u32 fwsync_iter = 1000;
  2117. bfa_ioc_send_fwsync(ioc);
  2118. /*
  2119. * After sending a fw sync mbox command wait for it to
  2120. * take effect. We will not wait for a response because
  2121. * 1. fw_sync mbox cmd doesn't have a response.
  2122. * 2. Even if we implement that, interrupts might not
  2123. * be enabled when we call this function.
  2124. * So, just keep checking if any mbox cmd is pending, and
  2125. * after waiting for a reasonable amount of time, go ahead.
  2126. * It is possible that fw has crashed and the mbox command
  2127. * is never acknowledged.
  2128. */
  2129. while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
  2130. fwsync_iter--;
  2131. }
  2132. /*
  2133. * Dump firmware smem
  2134. */
  2135. bfa_status_t
  2136. bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
  2137. u32 *offset, int *buflen)
  2138. {
  2139. u32 loff;
  2140. int dlen;
  2141. bfa_status_t status;
  2142. u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
  2143. if (*offset >= smem_len) {
  2144. *offset = *buflen = 0;
  2145. return BFA_STATUS_EINVAL;
  2146. }
  2147. loff = *offset;
  2148. dlen = *buflen;
  2149. /*
  2150. * First smem read, sync smem before proceeding
  2151. * No need to sync before reading every chunk.
  2152. */
  2153. if (loff == 0)
  2154. bfa_ioc_fwsync(ioc);
  2155. if ((loff + dlen) >= smem_len)
  2156. dlen = smem_len - loff;
  2157. status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
  2158. if (status != BFA_STATUS_OK) {
  2159. *offset = *buflen = 0;
  2160. return status;
  2161. }
  2162. *offset += dlen;
  2163. if (*offset >= smem_len)
  2164. *offset = 0;
  2165. *buflen = dlen;
  2166. return status;
  2167. }
  2168. /*
  2169. * Firmware statistics
  2170. */
  2171. bfa_status_t
  2172. bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
  2173. {
  2174. u32 loff = BFI_IOC_FWSTATS_OFF + \
  2175. BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
  2176. int tlen;
  2177. bfa_status_t status;
  2178. if (ioc->stats_busy) {
  2179. bfa_trc(ioc, ioc->stats_busy);
  2180. return BFA_STATUS_DEVBUSY;
  2181. }
  2182. ioc->stats_busy = BFA_TRUE;
  2183. tlen = sizeof(struct bfa_fw_stats_s);
  2184. status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
  2185. ioc->stats_busy = BFA_FALSE;
  2186. return status;
  2187. }
  2188. bfa_status_t
  2189. bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
  2190. {
  2191. u32 loff = BFI_IOC_FWSTATS_OFF + \
  2192. BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
  2193. int tlen;
  2194. bfa_status_t status;
  2195. if (ioc->stats_busy) {
  2196. bfa_trc(ioc, ioc->stats_busy);
  2197. return BFA_STATUS_DEVBUSY;
  2198. }
  2199. ioc->stats_busy = BFA_TRUE;
  2200. tlen = sizeof(struct bfa_fw_stats_s);
  2201. status = bfa_ioc_smem_clr(ioc, loff, tlen);
  2202. ioc->stats_busy = BFA_FALSE;
  2203. return status;
  2204. }
  2205. /*
  2206. * Save firmware trace if configured.
  2207. */
  2208. static void
  2209. bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
  2210. {
  2211. int tlen;
  2212. if (ioc->dbg_fwsave_once) {
  2213. ioc->dbg_fwsave_once = BFA_FALSE;
  2214. if (ioc->dbg_fwsave_len) {
  2215. tlen = ioc->dbg_fwsave_len;
  2216. bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
  2217. }
  2218. }
  2219. }
  2220. /*
  2221. * Firmware failure detected. Start recovery actions.
  2222. */
  2223. static void
  2224. bfa_ioc_recover(struct bfa_ioc_s *ioc)
  2225. {
  2226. bfa_ioc_stats(ioc, ioc_hbfails);
  2227. bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
  2228. }
  2229. static void
  2230. bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
  2231. {
  2232. if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
  2233. return;
  2234. }
  2235. /*
  2236. * BFA IOC PF private functions
  2237. */
  2238. static void
  2239. bfa_iocpf_timeout(void *ioc_arg)
  2240. {
  2241. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  2242. bfa_trc(ioc, 0);
  2243. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
  2244. }
  2245. static void
  2246. bfa_iocpf_sem_timeout(void *ioc_arg)
  2247. {
  2248. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  2249. bfa_ioc_hw_sem_get(ioc);
  2250. }
  2251. /*
  2252. * bfa timer function
  2253. */
  2254. void
  2255. bfa_timer_beat(struct bfa_timer_mod_s *mod)
  2256. {
  2257. struct list_head *qh = &mod->timer_q;
  2258. struct list_head *qe, *qe_next;
  2259. struct bfa_timer_s *elem;
  2260. struct list_head timedout_q;
  2261. INIT_LIST_HEAD(&timedout_q);
  2262. qe = bfa_q_next(qh);
  2263. while (qe != qh) {
  2264. qe_next = bfa_q_next(qe);
  2265. elem = (struct bfa_timer_s *) qe;
  2266. if (elem->timeout <= BFA_TIMER_FREQ) {
  2267. elem->timeout = 0;
  2268. list_del(&elem->qe);
  2269. list_add_tail(&elem->qe, &timedout_q);
  2270. } else {
  2271. elem->timeout -= BFA_TIMER_FREQ;
  2272. }
  2273. qe = qe_next; /* go to next elem */
  2274. }
  2275. /*
  2276. * Pop all the timeout entries
  2277. */
  2278. while (!list_empty(&timedout_q)) {
  2279. bfa_q_deq(&timedout_q, &elem);
  2280. elem->timercb(elem->arg);
  2281. }
  2282. }
  2283. /*
  2284. * Should be called with lock protection
  2285. */
  2286. void
  2287. bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
  2288. void (*timercb) (void *), void *arg, unsigned int timeout)
  2289. {
  2290. WARN_ON(timercb == NULL);
  2291. WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
  2292. timer->timeout = timeout;
  2293. timer->timercb = timercb;
  2294. timer->arg = arg;
  2295. list_add_tail(&timer->qe, &mod->timer_q);
  2296. }
  2297. /*
  2298. * Should be called with lock protection
  2299. */
  2300. void
  2301. bfa_timer_stop(struct bfa_timer_s *timer)
  2302. {
  2303. WARN_ON(list_empty(&timer->qe));
  2304. list_del(&timer->qe);
  2305. }