bfa_ioc.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include "bfa_ioc.h"
  18. #include "bfi_ctreg.h"
  19. #include "bfa_defs.h"
  20. #include "bfa_defs_svc.h"
  21. #include "bfad_drv.h"
  22. BFA_TRC_FILE(CNA, IOC);
  23. /*
  24. * IOC local definitions
  25. */
  26. #define BFA_IOC_TOV 3000 /* msecs */
  27. #define BFA_IOC_HWSEM_TOV 500 /* msecs */
  28. #define BFA_IOC_HB_TOV 500 /* msecs */
  29. #define BFA_IOC_HWINIT_MAX 2
  30. #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
  31. #define bfa_ioc_timer_start(__ioc) \
  32. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
  33. bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
  34. #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
  35. #define bfa_hb_timer_start(__ioc) \
  36. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
  37. bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
  38. #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
  39. #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
  40. /*
  41. * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
  42. */
  43. #define bfa_ioc_firmware_lock(__ioc) \
  44. ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
  45. #define bfa_ioc_firmware_unlock(__ioc) \
  46. ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
  47. #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
  48. #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
  49. #define bfa_ioc_notify_hbfail(__ioc) \
  50. ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
  51. #ifdef BFA_IOC_IS_UEFI
  52. #define bfa_ioc_is_bios_optrom(__ioc) (0)
  53. #define bfa_ioc_is_uefi(__ioc) BFA_IOC_IS_UEFI
  54. #else
  55. #define bfa_ioc_is_bios_optrom(__ioc) \
  56. (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
  57. #define bfa_ioc_is_uefi(__ioc) (0)
  58. #endif
  59. #define bfa_ioc_mbox_cmd_pending(__ioc) \
  60. (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
  61. readl((__ioc)->ioc_regs.hfn_mbox_cmd))
  62. bfa_boolean_t bfa_auto_recover = BFA_TRUE;
  63. /*
  64. * forward declarations
  65. */
  66. static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
  67. static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
  68. static void bfa_ioc_timeout(void *ioc);
  69. static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
  70. static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
  71. static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
  72. static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
  73. static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
  74. static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
  75. static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
  76. static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
  77. static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
  78. static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
  79. static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
  80. /*
  81. * IOC state machine definitions/declarations
  82. */
  83. enum ioc_event {
  84. IOC_E_RESET = 1, /* IOC reset request */
  85. IOC_E_ENABLE = 2, /* IOC enable request */
  86. IOC_E_DISABLE = 3, /* IOC disable request */
  87. IOC_E_DETACH = 4, /* driver detach cleanup */
  88. IOC_E_ENABLED = 5, /* f/w enabled */
  89. IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
  90. IOC_E_DISABLED = 7, /* f/w disabled */
  91. IOC_E_FAILED = 8, /* failure notice by iocpf sm */
  92. IOC_E_HBFAIL = 9, /* heartbeat failure */
  93. IOC_E_HWERROR = 10, /* hardware error interrupt */
  94. IOC_E_TIMEOUT = 11, /* timeout */
  95. };
  96. bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
  97. bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
  98. bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
  99. bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
  100. bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
  101. bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event);
  102. bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
  103. bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
  104. bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
  105. static struct bfa_sm_table_s ioc_sm_table[] = {
  106. {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
  107. {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
  108. {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
  109. {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
  110. {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
  111. {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
  112. {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
  113. {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
  114. {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
  115. };
  116. /*
  117. * IOCPF state machine definitions/declarations
  118. */
  119. #define bfa_iocpf_timer_start(__ioc) \
  120. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
  121. bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
  122. #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
  123. #define bfa_iocpf_recovery_timer_start(__ioc) \
  124. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
  125. bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER)
  126. #define bfa_sem_timer_start(__ioc) \
  127. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
  128. bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
  129. #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
  130. /*
  131. * Forward declareations for iocpf state machine
  132. */
  133. static void bfa_iocpf_timeout(void *ioc_arg);
  134. static void bfa_iocpf_sem_timeout(void *ioc_arg);
  135. /*
  136. * IOCPF state machine events
  137. */
  138. enum iocpf_event {
  139. IOCPF_E_ENABLE = 1, /* IOCPF enable request */
  140. IOCPF_E_DISABLE = 2, /* IOCPF disable request */
  141. IOCPF_E_STOP = 3, /* stop on driver detach */
  142. IOCPF_E_FWREADY = 4, /* f/w initialization done */
  143. IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
  144. IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
  145. IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
  146. IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
  147. IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
  148. IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
  149. IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
  150. };
  151. /*
  152. * IOCPF states
  153. */
  154. enum bfa_iocpf_state {
  155. BFA_IOCPF_RESET = 1, /* IOC is in reset state */
  156. BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
  157. BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
  158. BFA_IOCPF_READY = 4, /* IOCPF is initialized */
  159. BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
  160. BFA_IOCPF_FAIL = 6, /* IOCPF failed */
  161. BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
  162. BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
  163. BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
  164. };
  165. bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
  166. bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
  167. bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
  168. bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
  169. bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
  170. bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
  171. bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
  172. bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
  173. bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
  174. bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
  175. bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
  176. static struct bfa_sm_table_s iocpf_sm_table[] = {
  177. {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
  178. {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
  179. {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
  180. {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
  181. {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
  182. {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
  183. {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
  184. {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
  185. {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
  186. {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
  187. {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
  188. };
  189. /*
  190. * IOC State Machine
  191. */
  192. /*
  193. * Beginning state. IOC uninit state.
  194. */
  195. static void
  196. bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
  197. {
  198. }
  199. /*
  200. * IOC is in uninit state.
  201. */
  202. static void
  203. bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
  204. {
  205. bfa_trc(ioc, event);
  206. switch (event) {
  207. case IOC_E_RESET:
  208. bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
  209. break;
  210. default:
  211. bfa_sm_fault(ioc, event);
  212. }
  213. }
  214. /*
  215. * Reset entry actions -- initialize state machine
  216. */
  217. static void
  218. bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
  219. {
  220. bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
  221. }
  222. /*
  223. * IOC is in reset state.
  224. */
  225. static void
  226. bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
  227. {
  228. bfa_trc(ioc, event);
  229. switch (event) {
  230. case IOC_E_ENABLE:
  231. bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
  232. break;
  233. case IOC_E_DISABLE:
  234. bfa_ioc_disable_comp(ioc);
  235. break;
  236. case IOC_E_DETACH:
  237. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  238. break;
  239. default:
  240. bfa_sm_fault(ioc, event);
  241. }
  242. }
  243. static void
  244. bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
  245. {
  246. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
  247. }
  248. /*
  249. * Host IOC function is being enabled, awaiting response from firmware.
  250. * Semaphore is acquired.
  251. */
  252. static void
  253. bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
  254. {
  255. bfa_trc(ioc, event);
  256. switch (event) {
  257. case IOC_E_ENABLED:
  258. bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
  259. break;
  260. case IOC_E_FAILED:
  261. bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
  262. break;
  263. case IOC_E_HWERROR:
  264. bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
  265. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
  266. break;
  267. case IOC_E_DISABLE:
  268. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  269. break;
  270. case IOC_E_DETACH:
  271. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  272. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
  273. break;
  274. case IOC_E_ENABLE:
  275. break;
  276. default:
  277. bfa_sm_fault(ioc, event);
  278. }
  279. }
  280. static void
  281. bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
  282. {
  283. bfa_ioc_timer_start(ioc);
  284. bfa_ioc_send_getattr(ioc);
  285. }
  286. /*
  287. * IOC configuration in progress. Timer is active.
  288. */
  289. static void
  290. bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
  291. {
  292. bfa_trc(ioc, event);
  293. switch (event) {
  294. case IOC_E_FWRSP_GETATTR:
  295. bfa_ioc_timer_stop(ioc);
  296. bfa_ioc_check_attr_wwns(ioc);
  297. bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
  298. break;
  299. case IOC_E_FAILED:
  300. bfa_ioc_timer_stop(ioc);
  301. bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
  302. break;
  303. case IOC_E_HWERROR:
  304. bfa_ioc_timer_stop(ioc);
  305. /* fall through */
  306. case IOC_E_TIMEOUT:
  307. bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
  308. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
  309. break;
  310. case IOC_E_DISABLE:
  311. bfa_ioc_timer_stop(ioc);
  312. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  313. break;
  314. case IOC_E_ENABLE:
  315. break;
  316. default:
  317. bfa_sm_fault(ioc, event);
  318. }
  319. }
  320. static void
  321. bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
  322. {
  323. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  324. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
  325. bfa_ioc_hb_monitor(ioc);
  326. BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
  327. }
  328. static void
  329. bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
  330. {
  331. bfa_trc(ioc, event);
  332. switch (event) {
  333. case IOC_E_ENABLE:
  334. break;
  335. case IOC_E_DISABLE:
  336. bfa_hb_timer_stop(ioc);
  337. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  338. break;
  339. case IOC_E_FAILED:
  340. bfa_hb_timer_stop(ioc);
  341. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  342. break;
  343. case IOC_E_HWERROR:
  344. bfa_hb_timer_stop(ioc);
  345. /* !!! fall through !!! */
  346. case IOC_E_HBFAIL:
  347. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  348. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
  349. break;
  350. default:
  351. bfa_sm_fault(ioc, event);
  352. }
  353. }
  354. static void
  355. bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
  356. {
  357. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  358. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
  359. BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
  360. }
  361. /*
  362. * IOC is being disabled
  363. */
  364. static void
  365. bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
  366. {
  367. bfa_trc(ioc, event);
  368. switch (event) {
  369. case IOC_E_DISABLED:
  370. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
  371. break;
  372. case IOC_E_HWERROR:
  373. /*
  374. * No state change. Will move to disabled state
  375. * after iocpf sm completes failure processing and
  376. * moves to disabled state.
  377. */
  378. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
  379. break;
  380. default:
  381. bfa_sm_fault(ioc, event);
  382. }
  383. }
  384. /*
  385. * IOC disable completion entry.
  386. */
  387. static void
  388. bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
  389. {
  390. bfa_ioc_disable_comp(ioc);
  391. }
  392. static void
  393. bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
  394. {
  395. bfa_trc(ioc, event);
  396. switch (event) {
  397. case IOC_E_ENABLE:
  398. bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
  399. break;
  400. case IOC_E_DISABLE:
  401. ioc->cbfn->disable_cbfn(ioc->bfa);
  402. break;
  403. case IOC_E_DETACH:
  404. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  405. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
  406. break;
  407. default:
  408. bfa_sm_fault(ioc, event);
  409. }
  410. }
  411. static void
  412. bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
  413. {
  414. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  415. }
  416. /*
  417. * Hardware initialization failed.
  418. */
  419. static void
  420. bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
  421. {
  422. bfa_trc(ioc, event);
  423. switch (event) {
  424. case IOC_E_ENABLED:
  425. bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
  426. break;
  427. case IOC_E_FAILED:
  428. /*
  429. * Initialization failure during iocpf init retry.
  430. */
  431. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  432. break;
  433. case IOC_E_DISABLE:
  434. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  435. break;
  436. case IOC_E_DETACH:
  437. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  438. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
  439. break;
  440. default:
  441. bfa_sm_fault(ioc, event);
  442. }
  443. }
  444. static void
  445. bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
  446. {
  447. struct list_head *qe;
  448. struct bfa_ioc_hbfail_notify_s *notify;
  449. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  450. /*
  451. * Notify driver and common modules registered for notification.
  452. */
  453. ioc->cbfn->hbfail_cbfn(ioc->bfa);
  454. list_for_each(qe, &ioc->hb_notify_q) {
  455. notify = (struct bfa_ioc_hbfail_notify_s *) qe;
  456. notify->cbfn(notify->cbarg);
  457. }
  458. BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
  459. "Heart Beat of IOC has failed\n");
  460. }
  461. /*
  462. * IOC failure.
  463. */
  464. static void
  465. bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
  466. {
  467. bfa_trc(ioc, event);
  468. switch (event) {
  469. case IOC_E_FAILED:
  470. /*
  471. * Initialization failure during iocpf recovery.
  472. * !!! Fall through !!!
  473. */
  474. case IOC_E_ENABLE:
  475. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  476. break;
  477. case IOC_E_ENABLED:
  478. bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
  479. break;
  480. case IOC_E_DISABLE:
  481. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  482. break;
  483. case IOC_E_HWERROR:
  484. /*
  485. * HB failure notification, ignore.
  486. */
  487. break;
  488. default:
  489. bfa_sm_fault(ioc, event);
  490. }
  491. }
  492. /*
  493. * IOCPF State Machine
  494. */
  495. /*
  496. * Reset entry actions -- initialize state machine
  497. */
  498. static void
  499. bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
  500. {
  501. iocpf->retry_count = 0;
  502. iocpf->auto_recover = bfa_auto_recover;
  503. }
  504. /*
  505. * Beginning state. IOC is in reset state.
  506. */
  507. static void
  508. bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  509. {
  510. struct bfa_ioc_s *ioc = iocpf->ioc;
  511. bfa_trc(ioc, event);
  512. switch (event) {
  513. case IOCPF_E_ENABLE:
  514. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
  515. break;
  516. case IOCPF_E_STOP:
  517. break;
  518. default:
  519. bfa_sm_fault(ioc, event);
  520. }
  521. }
  522. /*
  523. * Semaphore should be acquired for version check.
  524. */
  525. static void
  526. bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
  527. {
  528. bfa_ioc_hw_sem_get(iocpf->ioc);
  529. }
  530. /*
  531. * Awaiting h/w semaphore to continue with version check.
  532. */
  533. static void
  534. bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  535. {
  536. struct bfa_ioc_s *ioc = iocpf->ioc;
  537. bfa_trc(ioc, event);
  538. switch (event) {
  539. case IOCPF_E_SEMLOCKED:
  540. if (bfa_ioc_firmware_lock(ioc)) {
  541. iocpf->retry_count = 0;
  542. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  543. } else {
  544. writel(1, ioc->ioc_regs.ioc_sem_reg);
  545. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
  546. }
  547. break;
  548. case IOCPF_E_DISABLE:
  549. bfa_sem_timer_stop(ioc);
  550. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  551. bfa_fsm_send_event(ioc, IOC_E_DISABLED);
  552. break;
  553. case IOCPF_E_STOP:
  554. bfa_sem_timer_stop(ioc);
  555. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  556. break;
  557. default:
  558. bfa_sm_fault(ioc, event);
  559. }
  560. }
  561. /*
  562. * Notify enable completion callback.
  563. */
  564. static void
  565. bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
  566. {
  567. /*
  568. * Call only the first time sm enters fwmismatch state.
  569. */
  570. if (iocpf->retry_count == 0)
  571. bfa_ioc_pf_fwmismatch(iocpf->ioc);
  572. iocpf->retry_count++;
  573. bfa_iocpf_timer_start(iocpf->ioc);
  574. }
  575. /*
  576. * Awaiting firmware version match.
  577. */
  578. static void
  579. bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  580. {
  581. struct bfa_ioc_s *ioc = iocpf->ioc;
  582. bfa_trc(ioc, event);
  583. switch (event) {
  584. case IOCPF_E_TIMEOUT:
  585. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
  586. break;
  587. case IOCPF_E_DISABLE:
  588. bfa_iocpf_timer_stop(ioc);
  589. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  590. bfa_fsm_send_event(ioc, IOC_E_DISABLED);
  591. break;
  592. case IOCPF_E_STOP:
  593. bfa_iocpf_timer_stop(ioc);
  594. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  595. break;
  596. default:
  597. bfa_sm_fault(ioc, event);
  598. }
  599. }
  600. /*
  601. * Request for semaphore.
  602. */
  603. static void
  604. bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
  605. {
  606. bfa_ioc_hw_sem_get(iocpf->ioc);
  607. }
  608. /*
  609. * Awaiting semaphore for h/w initialzation.
  610. */
  611. static void
  612. bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  613. {
  614. struct bfa_ioc_s *ioc = iocpf->ioc;
  615. bfa_trc(ioc, event);
  616. switch (event) {
  617. case IOCPF_E_SEMLOCKED:
  618. iocpf->retry_count = 0;
  619. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  620. break;
  621. case IOCPF_E_DISABLE:
  622. bfa_sem_timer_stop(ioc);
  623. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  624. break;
  625. default:
  626. bfa_sm_fault(ioc, event);
  627. }
  628. }
  629. static void
  630. bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
  631. {
  632. bfa_iocpf_timer_start(iocpf->ioc);
  633. bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
  634. }
  635. /*
  636. * Hardware is being initialized. Interrupts are enabled.
  637. * Holding hardware semaphore lock.
  638. */
  639. static void
  640. bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  641. {
  642. struct bfa_ioc_s *ioc = iocpf->ioc;
  643. bfa_trc(ioc, event);
  644. switch (event) {
  645. case IOCPF_E_FWREADY:
  646. bfa_iocpf_timer_stop(ioc);
  647. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
  648. break;
  649. case IOCPF_E_INITFAIL:
  650. bfa_iocpf_timer_stop(ioc);
  651. /*
  652. * !!! fall through !!!
  653. */
  654. case IOCPF_E_TIMEOUT:
  655. iocpf->retry_count++;
  656. if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
  657. bfa_iocpf_timer_start(ioc);
  658. bfa_ioc_hwinit(ioc, BFA_TRUE);
  659. break;
  660. }
  661. writel(1, ioc->ioc_regs.ioc_sem_reg);
  662. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
  663. if (event == IOCPF_E_TIMEOUT)
  664. bfa_fsm_send_event(ioc, IOC_E_FAILED);
  665. break;
  666. case IOCPF_E_DISABLE:
  667. writel(1, ioc->ioc_regs.ioc_sem_reg);
  668. bfa_iocpf_timer_stop(ioc);
  669. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  670. break;
  671. default:
  672. bfa_sm_fault(ioc, event);
  673. }
  674. }
  675. static void
  676. bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
  677. {
  678. bfa_iocpf_timer_start(iocpf->ioc);
  679. bfa_ioc_send_enable(iocpf->ioc);
  680. }
  681. /*
  682. * Host IOC function is being enabled, awaiting response from firmware.
  683. * Semaphore is acquired.
  684. */
  685. static void
  686. bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  687. {
  688. struct bfa_ioc_s *ioc = iocpf->ioc;
  689. bfa_trc(ioc, event);
  690. switch (event) {
  691. case IOCPF_E_FWRSP_ENABLE:
  692. bfa_iocpf_timer_stop(ioc);
  693. writel(1, ioc->ioc_regs.ioc_sem_reg);
  694. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
  695. break;
  696. case IOCPF_E_INITFAIL:
  697. bfa_iocpf_timer_stop(ioc);
  698. /*
  699. * !!! fall through !!!
  700. */
  701. case IOCPF_E_TIMEOUT:
  702. iocpf->retry_count++;
  703. if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
  704. writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
  705. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  706. break;
  707. }
  708. writel(1, ioc->ioc_regs.ioc_sem_reg);
  709. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
  710. if (event == IOCPF_E_TIMEOUT)
  711. bfa_fsm_send_event(ioc, IOC_E_FAILED);
  712. break;
  713. case IOCPF_E_DISABLE:
  714. bfa_iocpf_timer_stop(ioc);
  715. writel(1, ioc->ioc_regs.ioc_sem_reg);
  716. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
  717. break;
  718. case IOCPF_E_FWREADY:
  719. bfa_ioc_send_enable(ioc);
  720. break;
  721. default:
  722. bfa_sm_fault(ioc, event);
  723. }
  724. }
  725. static void
  726. bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
  727. {
  728. bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
  729. }
  730. static void
  731. bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  732. {
  733. struct bfa_ioc_s *ioc = iocpf->ioc;
  734. bfa_trc(ioc, event);
  735. switch (event) {
  736. case IOCPF_E_DISABLE:
  737. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
  738. break;
  739. case IOCPF_E_GETATTRFAIL:
  740. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
  741. break;
  742. case IOCPF_E_FAIL:
  743. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  744. break;
  745. case IOCPF_E_FWREADY:
  746. if (bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op))
  747. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  748. else
  749. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
  750. bfa_fsm_send_event(ioc, IOC_E_FAILED);
  751. break;
  752. default:
  753. bfa_sm_fault(ioc, event);
  754. }
  755. }
  756. static void
  757. bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
  758. {
  759. bfa_iocpf_timer_start(iocpf->ioc);
  760. bfa_ioc_send_disable(iocpf->ioc);
  761. }
  762. /*
  763. * IOC is being disabled
  764. */
  765. static void
  766. bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  767. {
  768. struct bfa_ioc_s *ioc = iocpf->ioc;
  769. bfa_trc(ioc, event);
  770. switch (event) {
  771. case IOCPF_E_FWRSP_DISABLE:
  772. case IOCPF_E_FWREADY:
  773. bfa_iocpf_timer_stop(ioc);
  774. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  775. break;
  776. case IOCPF_E_FAIL:
  777. bfa_iocpf_timer_stop(ioc);
  778. /*
  779. * !!! fall through !!!
  780. */
  781. case IOCPF_E_TIMEOUT:
  782. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  783. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  784. break;
  785. case IOCPF_E_FWRSP_ENABLE:
  786. break;
  787. default:
  788. bfa_sm_fault(ioc, event);
  789. }
  790. }
  791. /*
  792. * IOC disable completion entry.
  793. */
  794. static void
  795. bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
  796. {
  797. bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
  798. }
  799. static void
  800. bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  801. {
  802. struct bfa_ioc_s *ioc = iocpf->ioc;
  803. bfa_trc(ioc, event);
  804. switch (event) {
  805. case IOCPF_E_ENABLE:
  806. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  807. break;
  808. case IOCPF_E_STOP:
  809. bfa_ioc_firmware_unlock(ioc);
  810. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  811. break;
  812. default:
  813. bfa_sm_fault(ioc, event);
  814. }
  815. }
  816. static void
  817. bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
  818. {
  819. bfa_iocpf_timer_start(iocpf->ioc);
  820. }
  821. /*
  822. * Hardware initialization failed.
  823. */
  824. static void
  825. bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  826. {
  827. struct bfa_ioc_s *ioc = iocpf->ioc;
  828. bfa_trc(ioc, event);
  829. switch (event) {
  830. case IOCPF_E_DISABLE:
  831. bfa_iocpf_timer_stop(ioc);
  832. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  833. break;
  834. case IOCPF_E_STOP:
  835. bfa_iocpf_timer_stop(ioc);
  836. bfa_ioc_firmware_unlock(ioc);
  837. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  838. break;
  839. case IOCPF_E_TIMEOUT:
  840. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  841. break;
  842. default:
  843. bfa_sm_fault(ioc, event);
  844. }
  845. }
  846. static void
  847. bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
  848. {
  849. /*
  850. * Mark IOC as failed in hardware and stop firmware.
  851. */
  852. bfa_ioc_lpu_stop(iocpf->ioc);
  853. writel(BFI_IOC_FAIL, iocpf->ioc->ioc_regs.ioc_fwstate);
  854. /*
  855. * Notify other functions on HB failure.
  856. */
  857. bfa_ioc_notify_hbfail(iocpf->ioc);
  858. /*
  859. * Flush any queued up mailbox requests.
  860. */
  861. bfa_ioc_mbox_hbfail(iocpf->ioc);
  862. if (iocpf->auto_recover)
  863. bfa_iocpf_recovery_timer_start(iocpf->ioc);
  864. }
  865. /*
  866. * IOC is in failed state.
  867. */
  868. static void
  869. bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  870. {
  871. struct bfa_ioc_s *ioc = iocpf->ioc;
  872. bfa_trc(ioc, event);
  873. switch (event) {
  874. case IOCPF_E_DISABLE:
  875. if (iocpf->auto_recover)
  876. bfa_iocpf_timer_stop(ioc);
  877. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  878. break;
  879. case IOCPF_E_TIMEOUT:
  880. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  881. break;
  882. default:
  883. bfa_sm_fault(ioc, event);
  884. }
  885. }
  886. /*
  887. * BFA IOC private functions
  888. */
  889. static void
  890. bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
  891. {
  892. struct list_head *qe;
  893. struct bfa_ioc_hbfail_notify_s *notify;
  894. ioc->cbfn->disable_cbfn(ioc->bfa);
  895. /*
  896. * Notify common modules registered for notification.
  897. */
  898. list_for_each(qe, &ioc->hb_notify_q) {
  899. notify = (struct bfa_ioc_hbfail_notify_s *) qe;
  900. notify->cbfn(notify->cbarg);
  901. }
  902. }
  903. bfa_boolean_t
  904. bfa_ioc_sem_get(void __iomem *sem_reg)
  905. {
  906. u32 r32;
  907. int cnt = 0;
  908. #define BFA_SEM_SPINCNT 3000
  909. r32 = readl(sem_reg);
  910. while (r32 && (cnt < BFA_SEM_SPINCNT)) {
  911. cnt++;
  912. udelay(2);
  913. r32 = readl(sem_reg);
  914. }
  915. if (r32 == 0)
  916. return BFA_TRUE;
  917. bfa_assert(cnt < BFA_SEM_SPINCNT);
  918. return BFA_FALSE;
  919. }
  920. static void
  921. bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
  922. {
  923. u32 r32;
  924. /*
  925. * First read to the semaphore register will return 0, subsequent reads
  926. * will return 1. Semaphore is released by writing 1 to the register
  927. */
  928. r32 = readl(ioc->ioc_regs.ioc_sem_reg);
  929. if (r32 == 0) {
  930. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
  931. return;
  932. }
  933. bfa_sem_timer_start(ioc);
  934. }
  935. /*
  936. * Initialize LPU local memory (aka secondary memory / SRAM)
  937. */
  938. static void
  939. bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
  940. {
  941. u32 pss_ctl;
  942. int i;
  943. #define PSS_LMEM_INIT_TIME 10000
  944. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  945. pss_ctl &= ~__PSS_LMEM_RESET;
  946. pss_ctl |= __PSS_LMEM_INIT_EN;
  947. /*
  948. * i2c workaround 12.5khz clock
  949. */
  950. pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
  951. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  952. /*
  953. * wait for memory initialization to be complete
  954. */
  955. i = 0;
  956. do {
  957. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  958. i++;
  959. } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
  960. /*
  961. * If memory initialization is not successful, IOC timeout will catch
  962. * such failures.
  963. */
  964. bfa_assert(pss_ctl & __PSS_LMEM_INIT_DONE);
  965. bfa_trc(ioc, pss_ctl);
  966. pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
  967. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  968. }
  969. static void
  970. bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
  971. {
  972. u32 pss_ctl;
  973. /*
  974. * Take processor out of reset.
  975. */
  976. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  977. pss_ctl &= ~__PSS_LPU0_RESET;
  978. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  979. }
  980. static void
  981. bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
  982. {
  983. u32 pss_ctl;
  984. /*
  985. * Put processors in reset.
  986. */
  987. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  988. pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
  989. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  990. }
  991. /*
  992. * Get driver and firmware versions.
  993. */
  994. void
  995. bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
  996. {
  997. u32 pgnum, pgoff;
  998. u32 loff = 0;
  999. int i;
  1000. u32 *fwsig = (u32 *) fwhdr;
  1001. pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
  1002. pgoff = PSS_SMEM_PGOFF(loff);
  1003. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1004. for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
  1005. i++) {
  1006. fwsig[i] =
  1007. bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
  1008. loff += sizeof(u32);
  1009. }
  1010. }
  1011. /*
  1012. * Returns TRUE if same.
  1013. */
  1014. bfa_boolean_t
  1015. bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
  1016. {
  1017. struct bfi_ioc_image_hdr_s *drv_fwhdr;
  1018. int i;
  1019. drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
  1020. bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
  1021. for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
  1022. if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
  1023. bfa_trc(ioc, i);
  1024. bfa_trc(ioc, fwhdr->md5sum[i]);
  1025. bfa_trc(ioc, drv_fwhdr->md5sum[i]);
  1026. return BFA_FALSE;
  1027. }
  1028. }
  1029. bfa_trc(ioc, fwhdr->md5sum[0]);
  1030. return BFA_TRUE;
  1031. }
  1032. /*
  1033. * Return true if current running version is valid. Firmware signature and
  1034. * execution context (driver/bios) must match.
  1035. */
  1036. static bfa_boolean_t
  1037. bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
  1038. {
  1039. struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
  1040. /*
  1041. * If bios/efi boot (flash based) -- return true
  1042. */
  1043. if (bfa_ioc_is_bios_optrom(ioc))
  1044. return BFA_TRUE;
  1045. bfa_ioc_fwver_get(ioc, &fwhdr);
  1046. drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
  1047. bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
  1048. if (fwhdr.signature != drv_fwhdr->signature) {
  1049. bfa_trc(ioc, fwhdr.signature);
  1050. bfa_trc(ioc, drv_fwhdr->signature);
  1051. return BFA_FALSE;
  1052. }
  1053. if (swab32(fwhdr.param) != boot_env) {
  1054. bfa_trc(ioc, fwhdr.param);
  1055. bfa_trc(ioc, boot_env);
  1056. return BFA_FALSE;
  1057. }
  1058. return bfa_ioc_fwver_cmp(ioc, &fwhdr);
  1059. }
  1060. /*
  1061. * Conditionally flush any pending message from firmware at start.
  1062. */
  1063. static void
  1064. bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
  1065. {
  1066. u32 r32;
  1067. r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
  1068. if (r32)
  1069. writel(1, ioc->ioc_regs.lpu_mbox_cmd);
  1070. }
  1071. static void
  1072. bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
  1073. {
  1074. enum bfi_ioc_state ioc_fwstate;
  1075. bfa_boolean_t fwvalid;
  1076. u32 boot_type;
  1077. u32 boot_env;
  1078. ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  1079. if (force)
  1080. ioc_fwstate = BFI_IOC_UNINIT;
  1081. bfa_trc(ioc, ioc_fwstate);
  1082. boot_type = BFI_BOOT_TYPE_NORMAL;
  1083. boot_env = BFI_BOOT_LOADER_OS;
  1084. /*
  1085. * Flash based firmware boot BIOS env.
  1086. */
  1087. if (bfa_ioc_is_bios_optrom(ioc)) {
  1088. boot_type = BFI_BOOT_TYPE_FLASH;
  1089. boot_env = BFI_BOOT_LOADER_BIOS;
  1090. }
  1091. /*
  1092. * Flash based firmware boot UEFI env.
  1093. */
  1094. if (bfa_ioc_is_uefi(ioc)) {
  1095. boot_type = BFI_BOOT_TYPE_FLASH;
  1096. boot_env = BFI_BOOT_LOADER_UEFI;
  1097. }
  1098. /*
  1099. * check if firmware is valid
  1100. */
  1101. fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
  1102. BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
  1103. if (!fwvalid) {
  1104. bfa_ioc_boot(ioc, boot_type, boot_env);
  1105. return;
  1106. }
  1107. /*
  1108. * If hardware initialization is in progress (initialized by other IOC),
  1109. * just wait for an initialization completion interrupt.
  1110. */
  1111. if (ioc_fwstate == BFI_IOC_INITING) {
  1112. ioc->cbfn->reset_cbfn(ioc->bfa);
  1113. return;
  1114. }
  1115. /*
  1116. * If IOC function is disabled and firmware version is same,
  1117. * just re-enable IOC.
  1118. *
  1119. * If option rom, IOC must not be in operational state. With
  1120. * convergence, IOC will be in operational state when 2nd driver
  1121. * is loaded.
  1122. */
  1123. if (ioc_fwstate == BFI_IOC_DISABLED ||
  1124. (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
  1125. /*
  1126. * When using MSI-X any pending firmware ready event should
  1127. * be flushed. Otherwise MSI-X interrupts are not delivered.
  1128. */
  1129. bfa_ioc_msgflush(ioc);
  1130. ioc->cbfn->reset_cbfn(ioc->bfa);
  1131. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
  1132. return;
  1133. }
  1134. /*
  1135. * Initialize the h/w for any other states.
  1136. */
  1137. bfa_ioc_boot(ioc, boot_type, boot_env);
  1138. }
  1139. static void
  1140. bfa_ioc_timeout(void *ioc_arg)
  1141. {
  1142. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  1143. bfa_trc(ioc, 0);
  1144. bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
  1145. }
  1146. void
  1147. bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
  1148. {
  1149. u32 *msgp = (u32 *) ioc_msg;
  1150. u32 i;
  1151. bfa_trc(ioc, msgp[0]);
  1152. bfa_trc(ioc, len);
  1153. bfa_assert(len <= BFI_IOC_MSGLEN_MAX);
  1154. /*
  1155. * first write msg to mailbox registers
  1156. */
  1157. for (i = 0; i < len / sizeof(u32); i++)
  1158. writel(cpu_to_le32(msgp[i]),
  1159. ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
  1160. for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
  1161. writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
  1162. /*
  1163. * write 1 to mailbox CMD to trigger LPU event
  1164. */
  1165. writel(1, ioc->ioc_regs.hfn_mbox_cmd);
  1166. (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
  1167. }
  1168. static void
  1169. bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
  1170. {
  1171. struct bfi_ioc_ctrl_req_s enable_req;
  1172. struct bfa_timeval_s tv;
  1173. bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
  1174. bfa_ioc_portid(ioc));
  1175. enable_req.ioc_class = ioc->ioc_mc;
  1176. bfa_os_gettimeofday(&tv);
  1177. enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
  1178. bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
  1179. }
  1180. static void
  1181. bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
  1182. {
  1183. struct bfi_ioc_ctrl_req_s disable_req;
  1184. bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
  1185. bfa_ioc_portid(ioc));
  1186. bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
  1187. }
  1188. static void
  1189. bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
  1190. {
  1191. struct bfi_ioc_getattr_req_s attr_req;
  1192. bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
  1193. bfa_ioc_portid(ioc));
  1194. bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
  1195. bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
  1196. }
  1197. static void
  1198. bfa_ioc_hb_check(void *cbarg)
  1199. {
  1200. struct bfa_ioc_s *ioc = cbarg;
  1201. u32 hb_count;
  1202. hb_count = readl(ioc->ioc_regs.heartbeat);
  1203. if (ioc->hb_count == hb_count) {
  1204. printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count);
  1205. bfa_ioc_recover(ioc);
  1206. return;
  1207. } else {
  1208. ioc->hb_count = hb_count;
  1209. }
  1210. bfa_ioc_mbox_poll(ioc);
  1211. bfa_hb_timer_start(ioc);
  1212. }
  1213. static void
  1214. bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
  1215. {
  1216. ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
  1217. bfa_hb_timer_start(ioc);
  1218. }
  1219. /*
  1220. * Initiate a full firmware download.
  1221. */
  1222. static void
  1223. bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
  1224. u32 boot_env)
  1225. {
  1226. u32 *fwimg;
  1227. u32 pgnum, pgoff;
  1228. u32 loff = 0;
  1229. u32 chunkno = 0;
  1230. u32 i;
  1231. /*
  1232. * Initialize LMEM first before code download
  1233. */
  1234. bfa_ioc_lmem_init(ioc);
  1235. bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
  1236. fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
  1237. pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
  1238. pgoff = PSS_SMEM_PGOFF(loff);
  1239. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1240. for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
  1241. if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
  1242. chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
  1243. fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
  1244. BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
  1245. }
  1246. /*
  1247. * write smem
  1248. */
  1249. bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
  1250. fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
  1251. loff += sizeof(u32);
  1252. /*
  1253. * handle page offset wrap around
  1254. */
  1255. loff = PSS_SMEM_PGOFF(loff);
  1256. if (loff == 0) {
  1257. pgnum++;
  1258. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1259. }
  1260. }
  1261. writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
  1262. ioc->ioc_regs.host_page_num_fn);
  1263. /*
  1264. * Set boot type and boot param at the end.
  1265. */
  1266. bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
  1267. swab32(boot_type));
  1268. bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
  1269. swab32(boot_env));
  1270. }
  1271. /*
  1272. * Update BFA configuration from firmware configuration.
  1273. */
  1274. static void
  1275. bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
  1276. {
  1277. struct bfi_ioc_attr_s *attr = ioc->attr;
  1278. attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
  1279. attr->card_type = be32_to_cpu(attr->card_type);
  1280. attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
  1281. bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
  1282. }
  1283. /*
  1284. * Attach time initialization of mbox logic.
  1285. */
  1286. static void
  1287. bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
  1288. {
  1289. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1290. int mc;
  1291. INIT_LIST_HEAD(&mod->cmd_q);
  1292. for (mc = 0; mc < BFI_MC_MAX; mc++) {
  1293. mod->mbhdlr[mc].cbfn = NULL;
  1294. mod->mbhdlr[mc].cbarg = ioc->bfa;
  1295. }
  1296. }
  1297. /*
  1298. * Mbox poll timer -- restarts any pending mailbox requests.
  1299. */
  1300. static void
  1301. bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
  1302. {
  1303. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1304. struct bfa_mbox_cmd_s *cmd;
  1305. u32 stat;
  1306. /*
  1307. * If no command pending, do nothing
  1308. */
  1309. if (list_empty(&mod->cmd_q))
  1310. return;
  1311. /*
  1312. * If previous command is not yet fetched by firmware, do nothing
  1313. */
  1314. stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
  1315. if (stat)
  1316. return;
  1317. /*
  1318. * Enqueue command to firmware.
  1319. */
  1320. bfa_q_deq(&mod->cmd_q, &cmd);
  1321. bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
  1322. }
  1323. /*
  1324. * Cleanup any pending requests.
  1325. */
  1326. static void
  1327. bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
  1328. {
  1329. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1330. struct bfa_mbox_cmd_s *cmd;
  1331. while (!list_empty(&mod->cmd_q))
  1332. bfa_q_deq(&mod->cmd_q, &cmd);
  1333. }
  1334. /*
  1335. * Read data from SMEM to host through PCI memmap
  1336. *
  1337. * @param[in] ioc memory for IOC
  1338. * @param[in] tbuf app memory to store data from smem
  1339. * @param[in] soff smem offset
  1340. * @param[in] sz size of smem in bytes
  1341. */
  1342. static bfa_status_t
  1343. bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
  1344. {
  1345. u32 pgnum, loff;
  1346. __be32 r32;
  1347. int i, len;
  1348. u32 *buf = tbuf;
  1349. pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
  1350. loff = PSS_SMEM_PGOFF(soff);
  1351. bfa_trc(ioc, pgnum);
  1352. bfa_trc(ioc, loff);
  1353. bfa_trc(ioc, sz);
  1354. /*
  1355. * Hold semaphore to serialize pll init and fwtrc.
  1356. */
  1357. if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
  1358. bfa_trc(ioc, 0);
  1359. return BFA_STATUS_FAILED;
  1360. }
  1361. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1362. len = sz/sizeof(u32);
  1363. bfa_trc(ioc, len);
  1364. for (i = 0; i < len; i++) {
  1365. r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
  1366. buf[i] = be32_to_cpu(r32);
  1367. loff += sizeof(u32);
  1368. /*
  1369. * handle page offset wrap around
  1370. */
  1371. loff = PSS_SMEM_PGOFF(loff);
  1372. if (loff == 0) {
  1373. pgnum++;
  1374. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1375. }
  1376. }
  1377. writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
  1378. ioc->ioc_regs.host_page_num_fn);
  1379. /*
  1380. * release semaphore.
  1381. */
  1382. writel(1, ioc->ioc_regs.ioc_init_sem_reg);
  1383. bfa_trc(ioc, pgnum);
  1384. return BFA_STATUS_OK;
  1385. }
  1386. /*
  1387. * Clear SMEM data from host through PCI memmap
  1388. *
  1389. * @param[in] ioc memory for IOC
  1390. * @param[in] soff smem offset
  1391. * @param[in] sz size of smem in bytes
  1392. */
  1393. static bfa_status_t
  1394. bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
  1395. {
  1396. int i, len;
  1397. u32 pgnum, loff;
  1398. pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
  1399. loff = PSS_SMEM_PGOFF(soff);
  1400. bfa_trc(ioc, pgnum);
  1401. bfa_trc(ioc, loff);
  1402. bfa_trc(ioc, sz);
  1403. /*
  1404. * Hold semaphore to serialize pll init and fwtrc.
  1405. */
  1406. if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
  1407. bfa_trc(ioc, 0);
  1408. return BFA_STATUS_FAILED;
  1409. }
  1410. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1411. len = sz/sizeof(u32); /* len in words */
  1412. bfa_trc(ioc, len);
  1413. for (i = 0; i < len; i++) {
  1414. bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
  1415. loff += sizeof(u32);
  1416. /*
  1417. * handle page offset wrap around
  1418. */
  1419. loff = PSS_SMEM_PGOFF(loff);
  1420. if (loff == 0) {
  1421. pgnum++;
  1422. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1423. }
  1424. }
  1425. writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
  1426. ioc->ioc_regs.host_page_num_fn);
  1427. /*
  1428. * release semaphore.
  1429. */
  1430. writel(1, ioc->ioc_regs.ioc_init_sem_reg);
  1431. bfa_trc(ioc, pgnum);
  1432. return BFA_STATUS_OK;
  1433. }
  1434. static void
  1435. bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
  1436. {
  1437. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  1438. /*
  1439. * Provide enable completion callback.
  1440. */
  1441. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  1442. BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
  1443. "Running firmware version is incompatible "
  1444. "with the driver version\n");
  1445. }
  1446. bfa_status_t
  1447. bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
  1448. {
  1449. /*
  1450. * Hold semaphore so that nobody can access the chip during init.
  1451. */
  1452. bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
  1453. bfa_ioc_pll_init_asic(ioc);
  1454. ioc->pllinit = BFA_TRUE;
  1455. /*
  1456. * release semaphore.
  1457. */
  1458. writel(1, ioc->ioc_regs.ioc_init_sem_reg);
  1459. return BFA_STATUS_OK;
  1460. }
  1461. /*
  1462. * Interface used by diag module to do firmware boot with memory test
  1463. * as the entry vector.
  1464. */
  1465. void
  1466. bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
  1467. {
  1468. void __iomem *rb;
  1469. bfa_ioc_stats(ioc, ioc_boots);
  1470. if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
  1471. return;
  1472. /*
  1473. * Initialize IOC state of all functions on a chip reset.
  1474. */
  1475. rb = ioc->pcidev.pci_bar_kva;
  1476. if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
  1477. writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
  1478. writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
  1479. } else {
  1480. writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
  1481. writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
  1482. }
  1483. bfa_ioc_msgflush(ioc);
  1484. bfa_ioc_download_fw(ioc, boot_type, boot_env);
  1485. /*
  1486. * Enable interrupts just before starting LPU
  1487. */
  1488. ioc->cbfn->reset_cbfn(ioc->bfa);
  1489. bfa_ioc_lpu_start(ioc);
  1490. }
  1491. /*
  1492. * Enable/disable IOC failure auto recovery.
  1493. */
  1494. void
  1495. bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
  1496. {
  1497. bfa_auto_recover = auto_recover;
  1498. }
  1499. bfa_boolean_t
  1500. bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
  1501. {
  1502. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
  1503. }
  1504. bfa_boolean_t
  1505. bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
  1506. {
  1507. u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
  1508. return ((r32 != BFI_IOC_UNINIT) &&
  1509. (r32 != BFI_IOC_INITING) &&
  1510. (r32 != BFI_IOC_MEMTEST));
  1511. }
  1512. void
  1513. bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
  1514. {
  1515. __be32 *msgp = mbmsg;
  1516. u32 r32;
  1517. int i;
  1518. /*
  1519. * read the MBOX msg
  1520. */
  1521. for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
  1522. i++) {
  1523. r32 = readl(ioc->ioc_regs.lpu_mbox +
  1524. i * sizeof(u32));
  1525. msgp[i] = cpu_to_be32(r32);
  1526. }
  1527. /*
  1528. * turn off mailbox interrupt by clearing mailbox status
  1529. */
  1530. writel(1, ioc->ioc_regs.lpu_mbox_cmd);
  1531. readl(ioc->ioc_regs.lpu_mbox_cmd);
  1532. }
  1533. void
  1534. bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
  1535. {
  1536. union bfi_ioc_i2h_msg_u *msg;
  1537. struct bfa_iocpf_s *iocpf = &ioc->iocpf;
  1538. msg = (union bfi_ioc_i2h_msg_u *) m;
  1539. bfa_ioc_stats(ioc, ioc_isrs);
  1540. switch (msg->mh.msg_id) {
  1541. case BFI_IOC_I2H_HBEAT:
  1542. break;
  1543. case BFI_IOC_I2H_READY_EVENT:
  1544. bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
  1545. break;
  1546. case BFI_IOC_I2H_ENABLE_REPLY:
  1547. bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
  1548. break;
  1549. case BFI_IOC_I2H_DISABLE_REPLY:
  1550. bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
  1551. break;
  1552. case BFI_IOC_I2H_GETATTR_REPLY:
  1553. bfa_ioc_getattr_reply(ioc);
  1554. break;
  1555. default:
  1556. bfa_trc(ioc, msg->mh.msg_id);
  1557. bfa_assert(0);
  1558. }
  1559. }
  1560. /*
  1561. * IOC attach time initialization and setup.
  1562. *
  1563. * @param[in] ioc memory for IOC
  1564. * @param[in] bfa driver instance structure
  1565. */
  1566. void
  1567. bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
  1568. struct bfa_timer_mod_s *timer_mod)
  1569. {
  1570. ioc->bfa = bfa;
  1571. ioc->cbfn = cbfn;
  1572. ioc->timer_mod = timer_mod;
  1573. ioc->fcmode = BFA_FALSE;
  1574. ioc->pllinit = BFA_FALSE;
  1575. ioc->dbg_fwsave_once = BFA_TRUE;
  1576. ioc->iocpf.ioc = ioc;
  1577. bfa_ioc_mbox_attach(ioc);
  1578. INIT_LIST_HEAD(&ioc->hb_notify_q);
  1579. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  1580. bfa_fsm_send_event(ioc, IOC_E_RESET);
  1581. }
  1582. /*
  1583. * Driver detach time IOC cleanup.
  1584. */
  1585. void
  1586. bfa_ioc_detach(struct bfa_ioc_s *ioc)
  1587. {
  1588. bfa_fsm_send_event(ioc, IOC_E_DETACH);
  1589. }
  1590. /*
  1591. * Setup IOC PCI properties.
  1592. *
  1593. * @param[in] pcidev PCI device information for this IOC
  1594. */
  1595. void
  1596. bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
  1597. enum bfi_mclass mc)
  1598. {
  1599. ioc->ioc_mc = mc;
  1600. ioc->pcidev = *pcidev;
  1601. ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
  1602. ioc->cna = ioc->ctdev && !ioc->fcmode;
  1603. /*
  1604. * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
  1605. */
  1606. if (ioc->ctdev)
  1607. bfa_ioc_set_ct_hwif(ioc);
  1608. else
  1609. bfa_ioc_set_cb_hwif(ioc);
  1610. bfa_ioc_map_port(ioc);
  1611. bfa_ioc_reg_init(ioc);
  1612. }
  1613. /*
  1614. * Initialize IOC dma memory
  1615. *
  1616. * @param[in] dm_kva kernel virtual address of IOC dma memory
  1617. * @param[in] dm_pa physical address of IOC dma memory
  1618. */
  1619. void
  1620. bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
  1621. {
  1622. /*
  1623. * dma memory for firmware attribute
  1624. */
  1625. ioc->attr_dma.kva = dm_kva;
  1626. ioc->attr_dma.pa = dm_pa;
  1627. ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
  1628. }
  1629. void
  1630. bfa_ioc_enable(struct bfa_ioc_s *ioc)
  1631. {
  1632. bfa_ioc_stats(ioc, ioc_enables);
  1633. ioc->dbg_fwsave_once = BFA_TRUE;
  1634. bfa_fsm_send_event(ioc, IOC_E_ENABLE);
  1635. }
  1636. void
  1637. bfa_ioc_disable(struct bfa_ioc_s *ioc)
  1638. {
  1639. bfa_ioc_stats(ioc, ioc_disables);
  1640. bfa_fsm_send_event(ioc, IOC_E_DISABLE);
  1641. }
  1642. /*
  1643. * Initialize memory for saving firmware trace. Driver must initialize
  1644. * trace memory before call bfa_ioc_enable().
  1645. */
  1646. void
  1647. bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
  1648. {
  1649. ioc->dbg_fwsave = dbg_fwsave;
  1650. ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
  1651. }
  1652. /*
  1653. * Register mailbox message handler functions
  1654. *
  1655. * @param[in] ioc IOC instance
  1656. * @param[in] mcfuncs message class handler functions
  1657. */
  1658. void
  1659. bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
  1660. {
  1661. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1662. int mc;
  1663. for (mc = 0; mc < BFI_MC_MAX; mc++)
  1664. mod->mbhdlr[mc].cbfn = mcfuncs[mc];
  1665. }
  1666. /*
  1667. * Register mailbox message handler function, to be called by common modules
  1668. */
  1669. void
  1670. bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
  1671. bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
  1672. {
  1673. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1674. mod->mbhdlr[mc].cbfn = cbfn;
  1675. mod->mbhdlr[mc].cbarg = cbarg;
  1676. }
  1677. /*
  1678. * Queue a mailbox command request to firmware. Waits if mailbox is busy.
  1679. * Responsibility of caller to serialize
  1680. *
  1681. * @param[in] ioc IOC instance
  1682. * @param[i] cmd Mailbox command
  1683. */
  1684. void
  1685. bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
  1686. {
  1687. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1688. u32 stat;
  1689. /*
  1690. * If a previous command is pending, queue new command
  1691. */
  1692. if (!list_empty(&mod->cmd_q)) {
  1693. list_add_tail(&cmd->qe, &mod->cmd_q);
  1694. return;
  1695. }
  1696. /*
  1697. * If mailbox is busy, queue command for poll timer
  1698. */
  1699. stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
  1700. if (stat) {
  1701. list_add_tail(&cmd->qe, &mod->cmd_q);
  1702. return;
  1703. }
  1704. /*
  1705. * mailbox is free -- queue command to firmware
  1706. */
  1707. bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
  1708. }
  1709. /*
  1710. * Handle mailbox interrupts
  1711. */
  1712. void
  1713. bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
  1714. {
  1715. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1716. struct bfi_mbmsg_s m;
  1717. int mc;
  1718. bfa_ioc_msgget(ioc, &m);
  1719. /*
  1720. * Treat IOC message class as special.
  1721. */
  1722. mc = m.mh.msg_class;
  1723. if (mc == BFI_MC_IOC) {
  1724. bfa_ioc_isr(ioc, &m);
  1725. return;
  1726. }
  1727. if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
  1728. return;
  1729. mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
  1730. }
  1731. void
  1732. bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
  1733. {
  1734. bfa_fsm_send_event(ioc, IOC_E_HWERROR);
  1735. }
  1736. void
  1737. bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
  1738. {
  1739. ioc->fcmode = BFA_TRUE;
  1740. ioc->port_id = bfa_ioc_pcifn(ioc);
  1741. }
  1742. /*
  1743. * return true if IOC is disabled
  1744. */
  1745. bfa_boolean_t
  1746. bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
  1747. {
  1748. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
  1749. bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
  1750. }
  1751. /*
  1752. * return true if IOC firmware is different.
  1753. */
  1754. bfa_boolean_t
  1755. bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
  1756. {
  1757. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
  1758. bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
  1759. bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
  1760. }
  1761. #define bfa_ioc_state_disabled(__sm) \
  1762. (((__sm) == BFI_IOC_UNINIT) || \
  1763. ((__sm) == BFI_IOC_INITING) || \
  1764. ((__sm) == BFI_IOC_HWINIT) || \
  1765. ((__sm) == BFI_IOC_DISABLED) || \
  1766. ((__sm) == BFI_IOC_FAIL) || \
  1767. ((__sm) == BFI_IOC_CFG_DISABLED))
  1768. /*
  1769. * Check if adapter is disabled -- both IOCs should be in a disabled
  1770. * state.
  1771. */
  1772. bfa_boolean_t
  1773. bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
  1774. {
  1775. u32 ioc_state;
  1776. void __iomem *rb = ioc->pcidev.pci_bar_kva;
  1777. if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
  1778. return BFA_FALSE;
  1779. ioc_state = readl(rb + BFA_IOC0_STATE_REG);
  1780. if (!bfa_ioc_state_disabled(ioc_state))
  1781. return BFA_FALSE;
  1782. if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
  1783. ioc_state = readl(rb + BFA_IOC1_STATE_REG);
  1784. if (!bfa_ioc_state_disabled(ioc_state))
  1785. return BFA_FALSE;
  1786. }
  1787. return BFA_TRUE;
  1788. }
  1789. #define BFA_MFG_NAME "Brocade"
  1790. void
  1791. bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
  1792. struct bfa_adapter_attr_s *ad_attr)
  1793. {
  1794. struct bfi_ioc_attr_s *ioc_attr;
  1795. ioc_attr = ioc->attr;
  1796. bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
  1797. bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
  1798. bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
  1799. bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
  1800. memcpy(&ad_attr->vpd, &ioc_attr->vpd,
  1801. sizeof(struct bfa_mfg_vpd_s));
  1802. ad_attr->nports = bfa_ioc_get_nports(ioc);
  1803. ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
  1804. bfa_ioc_get_adapter_model(ioc, ad_attr->model);
  1805. /* For now, model descr uses same model string */
  1806. bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
  1807. ad_attr->card_type = ioc_attr->card_type;
  1808. ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
  1809. if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
  1810. ad_attr->prototype = 1;
  1811. else
  1812. ad_attr->prototype = 0;
  1813. ad_attr->pwwn = ioc->attr->pwwn;
  1814. ad_attr->mac = bfa_ioc_get_mac(ioc);
  1815. ad_attr->pcie_gen = ioc_attr->pcie_gen;
  1816. ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
  1817. ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
  1818. ad_attr->asic_rev = ioc_attr->asic_rev;
  1819. bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
  1820. ad_attr->cna_capable = ioc->cna;
  1821. ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
  1822. }
  1823. enum bfa_ioc_type_e
  1824. bfa_ioc_get_type(struct bfa_ioc_s *ioc)
  1825. {
  1826. if (!ioc->ctdev || ioc->fcmode)
  1827. return BFA_IOC_TYPE_FC;
  1828. else if (ioc->ioc_mc == BFI_MC_IOCFC)
  1829. return BFA_IOC_TYPE_FCoE;
  1830. else if (ioc->ioc_mc == BFI_MC_LL)
  1831. return BFA_IOC_TYPE_LL;
  1832. else {
  1833. bfa_assert(ioc->ioc_mc == BFI_MC_LL);
  1834. return BFA_IOC_TYPE_LL;
  1835. }
  1836. }
  1837. void
  1838. bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
  1839. {
  1840. memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
  1841. memcpy((void *)serial_num,
  1842. (void *)ioc->attr->brcd_serialnum,
  1843. BFA_ADAPTER_SERIAL_NUM_LEN);
  1844. }
  1845. void
  1846. bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
  1847. {
  1848. memset((void *)fw_ver, 0, BFA_VERSION_LEN);
  1849. memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
  1850. }
  1851. void
  1852. bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
  1853. {
  1854. bfa_assert(chip_rev);
  1855. memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
  1856. chip_rev[0] = 'R';
  1857. chip_rev[1] = 'e';
  1858. chip_rev[2] = 'v';
  1859. chip_rev[3] = '-';
  1860. chip_rev[4] = ioc->attr->asic_rev;
  1861. chip_rev[5] = '\0';
  1862. }
  1863. void
  1864. bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
  1865. {
  1866. memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
  1867. memcpy(optrom_ver, ioc->attr->optrom_version,
  1868. BFA_VERSION_LEN);
  1869. }
  1870. void
  1871. bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
  1872. {
  1873. memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
  1874. memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
  1875. }
  1876. void
  1877. bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
  1878. {
  1879. struct bfi_ioc_attr_s *ioc_attr;
  1880. bfa_assert(model);
  1881. memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
  1882. ioc_attr = ioc->attr;
  1883. /*
  1884. * model name
  1885. */
  1886. snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
  1887. BFA_MFG_NAME, ioc_attr->card_type);
  1888. }
  1889. enum bfa_ioc_state
  1890. bfa_ioc_get_state(struct bfa_ioc_s *ioc)
  1891. {
  1892. enum bfa_iocpf_state iocpf_st;
  1893. enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
  1894. if (ioc_st == BFA_IOC_ENABLING ||
  1895. ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
  1896. iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
  1897. switch (iocpf_st) {
  1898. case BFA_IOCPF_SEMWAIT:
  1899. ioc_st = BFA_IOC_SEMWAIT;
  1900. break;
  1901. case BFA_IOCPF_HWINIT:
  1902. ioc_st = BFA_IOC_HWINIT;
  1903. break;
  1904. case BFA_IOCPF_FWMISMATCH:
  1905. ioc_st = BFA_IOC_FWMISMATCH;
  1906. break;
  1907. case BFA_IOCPF_FAIL:
  1908. ioc_st = BFA_IOC_FAIL;
  1909. break;
  1910. case BFA_IOCPF_INITFAIL:
  1911. ioc_st = BFA_IOC_INITFAIL;
  1912. break;
  1913. default:
  1914. break;
  1915. }
  1916. }
  1917. return ioc_st;
  1918. }
  1919. void
  1920. bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
  1921. {
  1922. memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
  1923. ioc_attr->state = bfa_ioc_get_state(ioc);
  1924. ioc_attr->port_id = ioc->port_id;
  1925. ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
  1926. bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
  1927. ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
  1928. ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
  1929. bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
  1930. }
  1931. mac_t
  1932. bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
  1933. {
  1934. /*
  1935. * Check the IOC type and return the appropriate MAC
  1936. */
  1937. if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
  1938. return ioc->attr->fcoe_mac;
  1939. else
  1940. return ioc->attr->mac;
  1941. }
  1942. mac_t
  1943. bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
  1944. {
  1945. mac_t m;
  1946. m = ioc->attr->mfg_mac;
  1947. if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
  1948. m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
  1949. else
  1950. bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
  1951. bfa_ioc_pcifn(ioc));
  1952. return m;
  1953. }
  1954. bfa_boolean_t
  1955. bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
  1956. {
  1957. return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
  1958. }
  1959. /*
  1960. * Retrieve saved firmware trace from a prior IOC failure.
  1961. */
  1962. bfa_status_t
  1963. bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
  1964. {
  1965. int tlen;
  1966. if (ioc->dbg_fwsave_len == 0)
  1967. return BFA_STATUS_ENOFSAVE;
  1968. tlen = *trclen;
  1969. if (tlen > ioc->dbg_fwsave_len)
  1970. tlen = ioc->dbg_fwsave_len;
  1971. memcpy(trcdata, ioc->dbg_fwsave, tlen);
  1972. *trclen = tlen;
  1973. return BFA_STATUS_OK;
  1974. }
  1975. /*
  1976. * Retrieve saved firmware trace from a prior IOC failure.
  1977. */
  1978. bfa_status_t
  1979. bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
  1980. {
  1981. u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
  1982. int tlen;
  1983. bfa_status_t status;
  1984. bfa_trc(ioc, *trclen);
  1985. tlen = *trclen;
  1986. if (tlen > BFA_DBG_FWTRC_LEN)
  1987. tlen = BFA_DBG_FWTRC_LEN;
  1988. status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
  1989. *trclen = tlen;
  1990. return status;
  1991. }
  1992. static void
  1993. bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
  1994. {
  1995. struct bfa_mbox_cmd_s cmd;
  1996. struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
  1997. bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
  1998. bfa_ioc_portid(ioc));
  1999. req->ioc_class = ioc->ioc_mc;
  2000. bfa_ioc_mbox_queue(ioc, &cmd);
  2001. }
  2002. static void
  2003. bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
  2004. {
  2005. u32 fwsync_iter = 1000;
  2006. bfa_ioc_send_fwsync(ioc);
  2007. /*
  2008. * After sending a fw sync mbox command wait for it to
  2009. * take effect. We will not wait for a response because
  2010. * 1. fw_sync mbox cmd doesn't have a response.
  2011. * 2. Even if we implement that, interrupts might not
  2012. * be enabled when we call this function.
  2013. * So, just keep checking if any mbox cmd is pending, and
  2014. * after waiting for a reasonable amount of time, go ahead.
  2015. * It is possible that fw has crashed and the mbox command
  2016. * is never acknowledged.
  2017. */
  2018. while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
  2019. fwsync_iter--;
  2020. }
  2021. /*
  2022. * Dump firmware smem
  2023. */
  2024. bfa_status_t
  2025. bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
  2026. u32 *offset, int *buflen)
  2027. {
  2028. u32 loff;
  2029. int dlen;
  2030. bfa_status_t status;
  2031. u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
  2032. if (*offset >= smem_len) {
  2033. *offset = *buflen = 0;
  2034. return BFA_STATUS_EINVAL;
  2035. }
  2036. loff = *offset;
  2037. dlen = *buflen;
  2038. /*
  2039. * First smem read, sync smem before proceeding
  2040. * No need to sync before reading every chunk.
  2041. */
  2042. if (loff == 0)
  2043. bfa_ioc_fwsync(ioc);
  2044. if ((loff + dlen) >= smem_len)
  2045. dlen = smem_len - loff;
  2046. status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
  2047. if (status != BFA_STATUS_OK) {
  2048. *offset = *buflen = 0;
  2049. return status;
  2050. }
  2051. *offset += dlen;
  2052. if (*offset >= smem_len)
  2053. *offset = 0;
  2054. *buflen = dlen;
  2055. return status;
  2056. }
  2057. /*
  2058. * Firmware statistics
  2059. */
  2060. bfa_status_t
  2061. bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
  2062. {
  2063. u32 loff = BFI_IOC_FWSTATS_OFF + \
  2064. BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
  2065. int tlen;
  2066. bfa_status_t status;
  2067. if (ioc->stats_busy) {
  2068. bfa_trc(ioc, ioc->stats_busy);
  2069. return BFA_STATUS_DEVBUSY;
  2070. }
  2071. ioc->stats_busy = BFA_TRUE;
  2072. tlen = sizeof(struct bfa_fw_stats_s);
  2073. status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
  2074. ioc->stats_busy = BFA_FALSE;
  2075. return status;
  2076. }
  2077. bfa_status_t
  2078. bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
  2079. {
  2080. u32 loff = BFI_IOC_FWSTATS_OFF + \
  2081. BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
  2082. int tlen;
  2083. bfa_status_t status;
  2084. if (ioc->stats_busy) {
  2085. bfa_trc(ioc, ioc->stats_busy);
  2086. return BFA_STATUS_DEVBUSY;
  2087. }
  2088. ioc->stats_busy = BFA_TRUE;
  2089. tlen = sizeof(struct bfa_fw_stats_s);
  2090. status = bfa_ioc_smem_clr(ioc, loff, tlen);
  2091. ioc->stats_busy = BFA_FALSE;
  2092. return status;
  2093. }
  2094. /*
  2095. * Save firmware trace if configured.
  2096. */
  2097. static void
  2098. bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
  2099. {
  2100. int tlen;
  2101. if (ioc->dbg_fwsave_len) {
  2102. tlen = ioc->dbg_fwsave_len;
  2103. bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
  2104. }
  2105. }
  2106. /*
  2107. * Firmware failure detected. Start recovery actions.
  2108. */
  2109. static void
  2110. bfa_ioc_recover(struct bfa_ioc_s *ioc)
  2111. {
  2112. if (ioc->dbg_fwsave_once) {
  2113. ioc->dbg_fwsave_once = BFA_FALSE;
  2114. bfa_ioc_debug_save(ioc);
  2115. }
  2116. bfa_ioc_stats(ioc, ioc_hbfails);
  2117. bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
  2118. }
  2119. static void
  2120. bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
  2121. {
  2122. if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
  2123. return;
  2124. }
  2125. /*
  2126. * BFA IOC PF private functions
  2127. */
  2128. static void
  2129. bfa_iocpf_timeout(void *ioc_arg)
  2130. {
  2131. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  2132. bfa_trc(ioc, 0);
  2133. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
  2134. }
  2135. static void
  2136. bfa_iocpf_sem_timeout(void *ioc_arg)
  2137. {
  2138. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  2139. bfa_ioc_hw_sem_get(ioc);
  2140. }
  2141. /*
  2142. * bfa timer function
  2143. */
  2144. void
  2145. bfa_timer_beat(struct bfa_timer_mod_s *mod)
  2146. {
  2147. struct list_head *qh = &mod->timer_q;
  2148. struct list_head *qe, *qe_next;
  2149. struct bfa_timer_s *elem;
  2150. struct list_head timedout_q;
  2151. INIT_LIST_HEAD(&timedout_q);
  2152. qe = bfa_q_next(qh);
  2153. while (qe != qh) {
  2154. qe_next = bfa_q_next(qe);
  2155. elem = (struct bfa_timer_s *) qe;
  2156. if (elem->timeout <= BFA_TIMER_FREQ) {
  2157. elem->timeout = 0;
  2158. list_del(&elem->qe);
  2159. list_add_tail(&elem->qe, &timedout_q);
  2160. } else {
  2161. elem->timeout -= BFA_TIMER_FREQ;
  2162. }
  2163. qe = qe_next; /* go to next elem */
  2164. }
  2165. /*
  2166. * Pop all the timeout entries
  2167. */
  2168. while (!list_empty(&timedout_q)) {
  2169. bfa_q_deq(&timedout_q, &elem);
  2170. elem->timercb(elem->arg);
  2171. }
  2172. }
  2173. /*
  2174. * Should be called with lock protection
  2175. */
  2176. void
  2177. bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
  2178. void (*timercb) (void *), void *arg, unsigned int timeout)
  2179. {
  2180. bfa_assert(timercb != NULL);
  2181. bfa_assert(!bfa_q_is_on_q(&mod->timer_q, timer));
  2182. timer->timeout = timeout;
  2183. timer->timercb = timercb;
  2184. timer->arg = arg;
  2185. list_add_tail(&timer->qe, &mod->timer_q);
  2186. }
  2187. /*
  2188. * Should be called with lock protection
  2189. */
  2190. void
  2191. bfa_timer_stop(struct bfa_timer_s *timer)
  2192. {
  2193. bfa_assert(!list_empty(&timer->qe));
  2194. list_del(&timer->qe);
  2195. }