bfa_ioc.c 66 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991
  1. /*
  2. * Linux network driver for Brocade Converged Network Adapter.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License (GPL) Version 2 as
  6. * published by the Free Software Foundation
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. /*
  14. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  15. * All rights reserved
  16. * www.brocade.com
  17. */
  18. #include "bfa_ioc.h"
  19. #include "bfi_reg.h"
  20. #include "bfa_defs.h"
  21. /**
  22. * IOC local definitions
  23. */
  24. /**
  25. * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
  26. */
  27. #define bfa_ioc_firmware_lock(__ioc) \
  28. ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
  29. #define bfa_ioc_firmware_unlock(__ioc) \
  30. ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
  31. #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
  32. #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
  33. #define bfa_ioc_notify_fail(__ioc) \
  34. ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
  35. #define bfa_ioc_sync_start(__ioc) \
  36. ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
  37. #define bfa_ioc_sync_join(__ioc) \
  38. ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
  39. #define bfa_ioc_sync_leave(__ioc) \
  40. ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
  41. #define bfa_ioc_sync_ack(__ioc) \
  42. ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
  43. #define bfa_ioc_sync_complete(__ioc) \
  44. ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
  45. #define bfa_ioc_mbox_cmd_pending(__ioc) \
  46. (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
  47. readl((__ioc)->ioc_regs.hfn_mbox_cmd))
  48. static bool bfa_nw_auto_recover = true;
  49. /*
  50. * forward declarations
  51. */
  52. static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
  53. static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
  54. static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
  55. static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
  56. static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc);
  57. static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
  58. static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
  59. static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
  60. static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
  61. static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
  62. static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
  63. static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
  64. static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
  65. static void bfa_ioc_recover(struct bfa_ioc *ioc);
  66. static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
  67. static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
  68. static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
  69. static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc);
  70. static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
  71. static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
  72. static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
  73. static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
  74. static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
  75. static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
  76. static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
  77. u32 boot_param);
  78. static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
  79. static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
  80. char *serial_num);
  81. static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
  82. char *fw_ver);
  83. static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
  84. char *chip_rev);
  85. static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
  86. char *optrom_ver);
  87. static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
  88. char *manufacturer);
  89. static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
  90. static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
  91. /**
  92. * IOC state machine definitions/declarations
  93. */
  94. enum ioc_event {
  95. IOC_E_RESET = 1, /*!< IOC reset request */
  96. IOC_E_ENABLE = 2, /*!< IOC enable request */
  97. IOC_E_DISABLE = 3, /*!< IOC disable request */
  98. IOC_E_DETACH = 4, /*!< driver detach cleanup */
  99. IOC_E_ENABLED = 5, /*!< f/w enabled */
  100. IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */
  101. IOC_E_DISABLED = 7, /*!< f/w disabled */
  102. IOC_E_PFFAILED = 8, /*!< failure notice by iocpf sm */
  103. IOC_E_HBFAIL = 9, /*!< heartbeat failure */
  104. IOC_E_HWERROR = 10, /*!< hardware error interrupt */
  105. IOC_E_TIMEOUT = 11, /*!< timeout */
  106. IOC_E_HWFAILED = 12, /*!< PCI mapping failure notice */
  107. };
  108. bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
  109. bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
  110. bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
  111. bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
  112. bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
  113. bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
  114. bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
  115. bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
  116. bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
  117. bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
  118. static struct bfa_sm_table ioc_sm_table[] = {
  119. {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
  120. {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
  121. {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
  122. {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
  123. {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
  124. {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
  125. {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
  126. {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
  127. {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
  128. {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
  129. };
  130. /*
  131. * Forward declareations for iocpf state machine
  132. */
  133. static void bfa_iocpf_enable(struct bfa_ioc *ioc);
  134. static void bfa_iocpf_disable(struct bfa_ioc *ioc);
  135. static void bfa_iocpf_fail(struct bfa_ioc *ioc);
  136. static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
  137. static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
  138. static void bfa_iocpf_stop(struct bfa_ioc *ioc);
  139. /**
  140. * IOCPF state machine events
  141. */
  142. enum iocpf_event {
  143. IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
  144. IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
  145. IOCPF_E_STOP = 3, /*!< stop on driver detach */
  146. IOCPF_E_FWREADY = 4, /*!< f/w initialization done */
  147. IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */
  148. IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */
  149. IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */
  150. IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */
  151. IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */
  152. IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
  153. IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */
  154. IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */
  155. };
  156. /**
  157. * IOCPF states
  158. */
  159. enum bfa_iocpf_state {
  160. BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */
  161. BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
  162. BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */
  163. BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */
  164. BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */
  165. BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */
  166. BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */
  167. BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */
  168. BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */
  169. };
  170. bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
  171. bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
  172. bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
  173. bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
  174. bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
  175. bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
  176. bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
  177. bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
  178. enum iocpf_event);
  179. bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
  180. bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
  181. bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
  182. bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
  183. bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
  184. enum iocpf_event);
  185. bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
  186. static struct bfa_sm_table iocpf_sm_table[] = {
  187. {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
  188. {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
  189. {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
  190. {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
  191. {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
  192. {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
  193. {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
  194. {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
  195. {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
  196. {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
  197. {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
  198. {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
  199. {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
  200. {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
  201. };
  202. /**
  203. * IOC State Machine
  204. */
  205. /**
  206. * Beginning state. IOC uninit state.
  207. */
  208. static void
  209. bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
  210. {
  211. }
  212. /**
  213. * IOC is in uninit state.
  214. */
  215. static void
  216. bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
  217. {
  218. switch (event) {
  219. case IOC_E_RESET:
  220. bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
  221. break;
  222. default:
  223. bfa_sm_fault(event);
  224. }
  225. }
  226. /**
  227. * Reset entry actions -- initialize state machine
  228. */
  229. static void
  230. bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
  231. {
  232. bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
  233. }
  234. /**
  235. * IOC is in reset state.
  236. */
  237. static void
  238. bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
  239. {
  240. switch (event) {
  241. case IOC_E_ENABLE:
  242. bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
  243. break;
  244. case IOC_E_DISABLE:
  245. bfa_ioc_disable_comp(ioc);
  246. break;
  247. case IOC_E_DETACH:
  248. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  249. break;
  250. default:
  251. bfa_sm_fault(event);
  252. }
  253. }
  254. static void
  255. bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
  256. {
  257. bfa_iocpf_enable(ioc);
  258. }
  259. /**
  260. * Host IOC function is being enabled, awaiting response from firmware.
  261. * Semaphore is acquired.
  262. */
  263. static void
  264. bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
  265. {
  266. switch (event) {
  267. case IOC_E_ENABLED:
  268. bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
  269. break;
  270. case IOC_E_PFFAILED:
  271. /* !!! fall through !!! */
  272. case IOC_E_HWERROR:
  273. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  274. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  275. if (event != IOC_E_PFFAILED)
  276. bfa_iocpf_initfail(ioc);
  277. break;
  278. case IOC_E_HWFAILED:
  279. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  280. bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
  281. break;
  282. case IOC_E_DISABLE:
  283. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  284. break;
  285. case IOC_E_DETACH:
  286. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  287. bfa_iocpf_stop(ioc);
  288. break;
  289. case IOC_E_ENABLE:
  290. break;
  291. default:
  292. bfa_sm_fault(event);
  293. }
  294. }
  295. /**
  296. * Semaphore should be acquired for version check.
  297. */
  298. static void
  299. bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
  300. {
  301. mod_timer(&ioc->ioc_timer, jiffies +
  302. msecs_to_jiffies(BFA_IOC_TOV));
  303. bfa_ioc_send_getattr(ioc);
  304. }
  305. /**
  306. * IOC configuration in progress. Timer is active.
  307. */
  308. static void
  309. bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
  310. {
  311. switch (event) {
  312. case IOC_E_FWRSP_GETATTR:
  313. del_timer(&ioc->ioc_timer);
  314. bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
  315. break;
  316. case IOC_E_PFFAILED:
  317. case IOC_E_HWERROR:
  318. del_timer(&ioc->ioc_timer);
  319. /* fall through */
  320. case IOC_E_TIMEOUT:
  321. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  322. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  323. if (event != IOC_E_PFFAILED)
  324. bfa_iocpf_getattrfail(ioc);
  325. break;
  326. case IOC_E_DISABLE:
  327. del_timer(&ioc->ioc_timer);
  328. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  329. break;
  330. case IOC_E_ENABLE:
  331. break;
  332. default:
  333. bfa_sm_fault(event);
  334. }
  335. }
  336. static void
  337. bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
  338. {
  339. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
  340. bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
  341. bfa_ioc_hb_monitor(ioc);
  342. }
  343. static void
  344. bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
  345. {
  346. switch (event) {
  347. case IOC_E_ENABLE:
  348. break;
  349. case IOC_E_DISABLE:
  350. bfa_ioc_hb_stop(ioc);
  351. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  352. break;
  353. case IOC_E_PFFAILED:
  354. case IOC_E_HWERROR:
  355. bfa_ioc_hb_stop(ioc);
  356. /* !!! fall through !!! */
  357. case IOC_E_HBFAIL:
  358. if (ioc->iocpf.auto_recover)
  359. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
  360. else
  361. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  362. bfa_ioc_fail_notify(ioc);
  363. if (event != IOC_E_PFFAILED)
  364. bfa_iocpf_fail(ioc);
  365. break;
  366. default:
  367. bfa_sm_fault(event);
  368. }
  369. }
  370. static void
  371. bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
  372. {
  373. bfa_iocpf_disable(ioc);
  374. }
  375. /**
  376. * IOC is being disabled
  377. */
  378. static void
  379. bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
  380. {
  381. switch (event) {
  382. case IOC_E_DISABLED:
  383. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
  384. break;
  385. case IOC_E_HWERROR:
  386. /*
  387. * No state change. Will move to disabled state
  388. * after iocpf sm completes failure processing and
  389. * moves to disabled state.
  390. */
  391. bfa_iocpf_fail(ioc);
  392. break;
  393. case IOC_E_HWFAILED:
  394. bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
  395. bfa_ioc_disable_comp(ioc);
  396. break;
  397. default:
  398. bfa_sm_fault(event);
  399. }
  400. }
  401. /**
  402. * IOC disable completion entry.
  403. */
  404. static void
  405. bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
  406. {
  407. bfa_ioc_disable_comp(ioc);
  408. }
  409. static void
  410. bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
  411. {
  412. switch (event) {
  413. case IOC_E_ENABLE:
  414. bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
  415. break;
  416. case IOC_E_DISABLE:
  417. ioc->cbfn->disable_cbfn(ioc->bfa);
  418. break;
  419. case IOC_E_DETACH:
  420. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  421. bfa_iocpf_stop(ioc);
  422. break;
  423. default:
  424. bfa_sm_fault(event);
  425. }
  426. }
  427. static void
  428. bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
  429. {
  430. }
  431. /**
  432. * Hardware initialization retry.
  433. */
  434. static void
  435. bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
  436. {
  437. switch (event) {
  438. case IOC_E_ENABLED:
  439. bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
  440. break;
  441. case IOC_E_PFFAILED:
  442. case IOC_E_HWERROR:
  443. /**
  444. * Initialization retry failed.
  445. */
  446. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  447. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  448. if (event != IOC_E_PFFAILED)
  449. bfa_iocpf_initfail(ioc);
  450. break;
  451. case IOC_E_HWFAILED:
  452. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  453. bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
  454. break;
  455. case IOC_E_ENABLE:
  456. break;
  457. case IOC_E_DISABLE:
  458. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  459. break;
  460. case IOC_E_DETACH:
  461. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  462. bfa_iocpf_stop(ioc);
  463. break;
  464. default:
  465. bfa_sm_fault(event);
  466. }
  467. }
  468. static void
  469. bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
  470. {
  471. }
  472. /**
  473. * IOC failure.
  474. */
  475. static void
  476. bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
  477. {
  478. switch (event) {
  479. case IOC_E_ENABLE:
  480. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  481. break;
  482. case IOC_E_DISABLE:
  483. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  484. break;
  485. case IOC_E_DETACH:
  486. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  487. bfa_iocpf_stop(ioc);
  488. break;
  489. case IOC_E_HWERROR:
  490. /* HB failure notification, ignore. */
  491. break;
  492. default:
  493. bfa_sm_fault(event);
  494. }
  495. }
  496. static void
  497. bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
  498. {
  499. }
  500. /**
  501. * IOC failure.
  502. */
  503. static void
  504. bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
  505. {
  506. switch (event) {
  507. case IOC_E_ENABLE:
  508. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  509. break;
  510. case IOC_E_DISABLE:
  511. ioc->cbfn->disable_cbfn(ioc->bfa);
  512. break;
  513. case IOC_E_DETACH:
  514. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  515. break;
  516. default:
  517. bfa_sm_fault(event);
  518. }
  519. }
  520. /**
  521. * IOCPF State Machine
  522. */
  523. /**
  524. * Reset entry actions -- initialize state machine
  525. */
  526. static void
  527. bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
  528. {
  529. iocpf->fw_mismatch_notified = false;
  530. iocpf->auto_recover = bfa_nw_auto_recover;
  531. }
  532. /**
  533. * Beginning state. IOC is in reset state.
  534. */
  535. static void
  536. bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
  537. {
  538. switch (event) {
  539. case IOCPF_E_ENABLE:
  540. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
  541. break;
  542. case IOCPF_E_STOP:
  543. break;
  544. default:
  545. bfa_sm_fault(event);
  546. }
  547. }
  548. /**
  549. * Semaphore should be acquired for version check.
  550. */
  551. static void
  552. bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
  553. {
  554. bfa_ioc_hw_sem_init(iocpf->ioc);
  555. bfa_ioc_hw_sem_get(iocpf->ioc);
  556. }
  557. /**
  558. * Awaiting h/w semaphore to continue with version check.
  559. */
  560. static void
  561. bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
  562. {
  563. struct bfa_ioc *ioc = iocpf->ioc;
  564. switch (event) {
  565. case IOCPF_E_SEMLOCKED:
  566. if (bfa_ioc_firmware_lock(ioc)) {
  567. if (bfa_ioc_sync_start(ioc)) {
  568. bfa_ioc_sync_join(ioc);
  569. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  570. } else {
  571. bfa_ioc_firmware_unlock(ioc);
  572. bfa_nw_ioc_hw_sem_release(ioc);
  573. mod_timer(&ioc->sem_timer, jiffies +
  574. msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
  575. }
  576. } else {
  577. bfa_nw_ioc_hw_sem_release(ioc);
  578. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
  579. }
  580. break;
  581. case IOCPF_E_SEM_ERROR:
  582. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  583. bfa_ioc_pf_hwfailed(ioc);
  584. break;
  585. case IOCPF_E_DISABLE:
  586. bfa_ioc_hw_sem_get_cancel(ioc);
  587. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  588. bfa_ioc_pf_disabled(ioc);
  589. break;
  590. case IOCPF_E_STOP:
  591. bfa_ioc_hw_sem_get_cancel(ioc);
  592. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  593. break;
  594. default:
  595. bfa_sm_fault(event);
  596. }
  597. }
  598. /**
  599. * Notify enable completion callback
  600. */
  601. static void
  602. bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
  603. {
  604. /* Call only the first time sm enters fwmismatch state. */
  605. if (!iocpf->fw_mismatch_notified)
  606. bfa_ioc_pf_fwmismatch(iocpf->ioc);
  607. iocpf->fw_mismatch_notified = true;
  608. mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
  609. msecs_to_jiffies(BFA_IOC_TOV));
  610. }
  611. /**
  612. * Awaiting firmware version match.
  613. */
  614. static void
  615. bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
  616. {
  617. struct bfa_ioc *ioc = iocpf->ioc;
  618. switch (event) {
  619. case IOCPF_E_TIMEOUT:
  620. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
  621. break;
  622. case IOCPF_E_DISABLE:
  623. del_timer(&ioc->iocpf_timer);
  624. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  625. bfa_ioc_pf_disabled(ioc);
  626. break;
  627. case IOCPF_E_STOP:
  628. del_timer(&ioc->iocpf_timer);
  629. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  630. break;
  631. default:
  632. bfa_sm_fault(event);
  633. }
  634. }
  635. /**
  636. * Request for semaphore.
  637. */
  638. static void
  639. bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
  640. {
  641. bfa_ioc_hw_sem_get(iocpf->ioc);
  642. }
  643. /**
  644. * Awaiting semaphore for h/w initialzation.
  645. */
  646. static void
  647. bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
  648. {
  649. struct bfa_ioc *ioc = iocpf->ioc;
  650. switch (event) {
  651. case IOCPF_E_SEMLOCKED:
  652. if (bfa_ioc_sync_complete(ioc)) {
  653. bfa_ioc_sync_join(ioc);
  654. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  655. } else {
  656. bfa_nw_ioc_hw_sem_release(ioc);
  657. mod_timer(&ioc->sem_timer, jiffies +
  658. msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
  659. }
  660. break;
  661. case IOCPF_E_SEM_ERROR:
  662. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  663. bfa_ioc_pf_hwfailed(ioc);
  664. break;
  665. case IOCPF_E_DISABLE:
  666. bfa_ioc_hw_sem_get_cancel(ioc);
  667. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  668. break;
  669. default:
  670. bfa_sm_fault(event);
  671. }
  672. }
  673. static void
  674. bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
  675. {
  676. iocpf->poll_time = 0;
  677. bfa_ioc_reset(iocpf->ioc, false);
  678. }
  679. /**
  680. * Hardware is being initialized. Interrupts are enabled.
  681. * Holding hardware semaphore lock.
  682. */
  683. static void
  684. bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
  685. {
  686. struct bfa_ioc *ioc = iocpf->ioc;
  687. switch (event) {
  688. case IOCPF_E_FWREADY:
  689. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
  690. break;
  691. case IOCPF_E_TIMEOUT:
  692. bfa_nw_ioc_hw_sem_release(ioc);
  693. bfa_ioc_pf_failed(ioc);
  694. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
  695. break;
  696. case IOCPF_E_DISABLE:
  697. del_timer(&ioc->iocpf_timer);
  698. bfa_ioc_sync_leave(ioc);
  699. bfa_nw_ioc_hw_sem_release(ioc);
  700. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  701. break;
  702. default:
  703. bfa_sm_fault(event);
  704. }
  705. }
  706. static void
  707. bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
  708. {
  709. mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
  710. msecs_to_jiffies(BFA_IOC_TOV));
  711. /**
  712. * Enable Interrupts before sending fw IOC ENABLE cmd.
  713. */
  714. iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
  715. bfa_ioc_send_enable(iocpf->ioc);
  716. }
  717. /**
  718. * Host IOC function is being enabled, awaiting response from firmware.
  719. * Semaphore is acquired.
  720. */
  721. static void
  722. bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
  723. {
  724. struct bfa_ioc *ioc = iocpf->ioc;
  725. switch (event) {
  726. case IOCPF_E_FWRSP_ENABLE:
  727. del_timer(&ioc->iocpf_timer);
  728. bfa_nw_ioc_hw_sem_release(ioc);
  729. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
  730. break;
  731. case IOCPF_E_INITFAIL:
  732. del_timer(&ioc->iocpf_timer);
  733. /*
  734. * !!! fall through !!!
  735. */
  736. case IOCPF_E_TIMEOUT:
  737. bfa_nw_ioc_hw_sem_release(ioc);
  738. if (event == IOCPF_E_TIMEOUT)
  739. bfa_ioc_pf_failed(ioc);
  740. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
  741. break;
  742. case IOCPF_E_DISABLE:
  743. del_timer(&ioc->iocpf_timer);
  744. bfa_nw_ioc_hw_sem_release(ioc);
  745. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
  746. break;
  747. default:
  748. bfa_sm_fault(event);
  749. }
  750. }
  751. static void
  752. bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
  753. {
  754. bfa_ioc_pf_enabled(iocpf->ioc);
  755. }
  756. static void
  757. bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
  758. {
  759. switch (event) {
  760. case IOCPF_E_DISABLE:
  761. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
  762. break;
  763. case IOCPF_E_GETATTRFAIL:
  764. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
  765. break;
  766. case IOCPF_E_FAIL:
  767. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
  768. break;
  769. default:
  770. bfa_sm_fault(event);
  771. }
  772. }
  773. static void
  774. bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
  775. {
  776. mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
  777. msecs_to_jiffies(BFA_IOC_TOV));
  778. bfa_ioc_send_disable(iocpf->ioc);
  779. }
  780. /**
  781. * IOC is being disabled
  782. */
  783. static void
  784. bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
  785. {
  786. struct bfa_ioc *ioc = iocpf->ioc;
  787. switch (event) {
  788. case IOCPF_E_FWRSP_DISABLE:
  789. del_timer(&ioc->iocpf_timer);
  790. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  791. break;
  792. case IOCPF_E_FAIL:
  793. del_timer(&ioc->iocpf_timer);
  794. /*
  795. * !!! fall through !!!
  796. */
  797. case IOCPF_E_TIMEOUT:
  798. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  799. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  800. break;
  801. case IOCPF_E_FWRSP_ENABLE:
  802. break;
  803. default:
  804. bfa_sm_fault(event);
  805. }
  806. }
  807. static void
  808. bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
  809. {
  810. bfa_ioc_hw_sem_get(iocpf->ioc);
  811. }
  812. /**
  813. * IOC hb ack request is being removed.
  814. */
  815. static void
  816. bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
  817. {
  818. struct bfa_ioc *ioc = iocpf->ioc;
  819. switch (event) {
  820. case IOCPF_E_SEMLOCKED:
  821. bfa_ioc_sync_leave(ioc);
  822. bfa_nw_ioc_hw_sem_release(ioc);
  823. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  824. break;
  825. case IOCPF_E_SEM_ERROR:
  826. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  827. bfa_ioc_pf_hwfailed(ioc);
  828. break;
  829. case IOCPF_E_FAIL:
  830. break;
  831. default:
  832. bfa_sm_fault(event);
  833. }
  834. }
  835. /**
  836. * IOC disable completion entry.
  837. */
  838. static void
  839. bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
  840. {
  841. bfa_ioc_mbox_flush(iocpf->ioc);
  842. bfa_ioc_pf_disabled(iocpf->ioc);
  843. }
  844. static void
  845. bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
  846. {
  847. struct bfa_ioc *ioc = iocpf->ioc;
  848. switch (event) {
  849. case IOCPF_E_ENABLE:
  850. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  851. break;
  852. case IOCPF_E_STOP:
  853. bfa_ioc_firmware_unlock(ioc);
  854. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  855. break;
  856. default:
  857. bfa_sm_fault(event);
  858. }
  859. }
  860. static void
  861. bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
  862. {
  863. bfa_nw_ioc_debug_save_ftrc(iocpf->ioc);
  864. bfa_ioc_hw_sem_get(iocpf->ioc);
  865. }
  866. /**
  867. * Hardware initialization failed.
  868. */
  869. static void
  870. bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
  871. {
  872. struct bfa_ioc *ioc = iocpf->ioc;
  873. switch (event) {
  874. case IOCPF_E_SEMLOCKED:
  875. bfa_ioc_notify_fail(ioc);
  876. bfa_ioc_sync_leave(ioc);
  877. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  878. bfa_nw_ioc_hw_sem_release(ioc);
  879. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
  880. break;
  881. case IOCPF_E_SEM_ERROR:
  882. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  883. bfa_ioc_pf_hwfailed(ioc);
  884. break;
  885. case IOCPF_E_DISABLE:
  886. bfa_ioc_hw_sem_get_cancel(ioc);
  887. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  888. break;
  889. case IOCPF_E_STOP:
  890. bfa_ioc_hw_sem_get_cancel(ioc);
  891. bfa_ioc_firmware_unlock(ioc);
  892. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  893. break;
  894. case IOCPF_E_FAIL:
  895. break;
  896. default:
  897. bfa_sm_fault(event);
  898. }
  899. }
  900. static void
  901. bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
  902. {
  903. }
  904. /**
  905. * Hardware initialization failed.
  906. */
  907. static void
  908. bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
  909. {
  910. struct bfa_ioc *ioc = iocpf->ioc;
  911. switch (event) {
  912. case IOCPF_E_DISABLE:
  913. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  914. break;
  915. case IOCPF_E_STOP:
  916. bfa_ioc_firmware_unlock(ioc);
  917. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  918. break;
  919. default:
  920. bfa_sm_fault(event);
  921. }
  922. }
  923. static void
  924. bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
  925. {
  926. /**
  927. * Mark IOC as failed in hardware and stop firmware.
  928. */
  929. bfa_ioc_lpu_stop(iocpf->ioc);
  930. /**
  931. * Flush any queued up mailbox requests.
  932. */
  933. bfa_ioc_mbox_flush(iocpf->ioc);
  934. bfa_ioc_hw_sem_get(iocpf->ioc);
  935. }
  936. /**
  937. * IOC is in failed state.
  938. */
  939. static void
  940. bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
  941. {
  942. struct bfa_ioc *ioc = iocpf->ioc;
  943. switch (event) {
  944. case IOCPF_E_SEMLOCKED:
  945. bfa_ioc_sync_ack(ioc);
  946. bfa_ioc_notify_fail(ioc);
  947. if (!iocpf->auto_recover) {
  948. bfa_ioc_sync_leave(ioc);
  949. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  950. bfa_nw_ioc_hw_sem_release(ioc);
  951. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  952. } else {
  953. if (bfa_ioc_sync_complete(ioc))
  954. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  955. else {
  956. bfa_nw_ioc_hw_sem_release(ioc);
  957. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  958. }
  959. }
  960. break;
  961. case IOCPF_E_SEM_ERROR:
  962. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  963. bfa_ioc_pf_hwfailed(ioc);
  964. break;
  965. case IOCPF_E_DISABLE:
  966. bfa_ioc_hw_sem_get_cancel(ioc);
  967. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  968. break;
  969. case IOCPF_E_FAIL:
  970. break;
  971. default:
  972. bfa_sm_fault(event);
  973. }
  974. }
  975. static void
  976. bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
  977. {
  978. }
  979. /**
  980. * @brief
  981. * IOC is in failed state.
  982. */
  983. static void
  984. bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
  985. {
  986. switch (event) {
  987. case IOCPF_E_DISABLE:
  988. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  989. break;
  990. default:
  991. bfa_sm_fault(event);
  992. }
  993. }
  994. /**
  995. * BFA IOC private functions
  996. */
  997. /**
  998. * Notify common modules registered for notification.
  999. */
  1000. static void
  1001. bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
  1002. {
  1003. struct bfa_ioc_notify *notify;
  1004. struct list_head *qe;
  1005. list_for_each(qe, &ioc->notify_q) {
  1006. notify = (struct bfa_ioc_notify *)qe;
  1007. notify->cbfn(notify->cbarg, event);
  1008. }
  1009. }
  1010. static void
  1011. bfa_ioc_disable_comp(struct bfa_ioc *ioc)
  1012. {
  1013. ioc->cbfn->disable_cbfn(ioc->bfa);
  1014. bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
  1015. }
  1016. bool
  1017. bfa_nw_ioc_sem_get(void __iomem *sem_reg)
  1018. {
  1019. u32 r32;
  1020. int cnt = 0;
  1021. #define BFA_SEM_SPINCNT 3000
  1022. r32 = readl(sem_reg);
  1023. while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
  1024. cnt++;
  1025. udelay(2);
  1026. r32 = readl(sem_reg);
  1027. }
  1028. if (!(r32 & 1))
  1029. return true;
  1030. return false;
  1031. }
  1032. void
  1033. bfa_nw_ioc_sem_release(void __iomem *sem_reg)
  1034. {
  1035. readl(sem_reg);
  1036. writel(1, sem_reg);
  1037. }
  1038. /* Clear fwver hdr */
  1039. static void
  1040. bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
  1041. {
  1042. u32 pgnum, pgoff, loff = 0;
  1043. int i;
  1044. pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
  1045. pgoff = PSS_SMEM_PGOFF(loff);
  1046. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1047. for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) {
  1048. writel(0, ioc->ioc_regs.smem_page_start + loff);
  1049. loff += sizeof(u32);
  1050. }
  1051. }
  1052. static void
  1053. bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
  1054. {
  1055. struct bfi_ioc_image_hdr fwhdr;
  1056. u32 fwstate, r32;
  1057. /* Spin on init semaphore to serialize. */
  1058. r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
  1059. while (r32 & 0x1) {
  1060. udelay(20);
  1061. r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
  1062. }
  1063. fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  1064. if (fwstate == BFI_IOC_UNINIT) {
  1065. writel(1, ioc->ioc_regs.ioc_init_sem_reg);
  1066. return;
  1067. }
  1068. bfa_nw_ioc_fwver_get(ioc, &fwhdr);
  1069. if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
  1070. writel(1, ioc->ioc_regs.ioc_init_sem_reg);
  1071. return;
  1072. }
  1073. bfa_ioc_fwver_clear(ioc);
  1074. writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
  1075. writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
  1076. /*
  1077. * Try to lock and then unlock the semaphore.
  1078. */
  1079. readl(ioc->ioc_regs.ioc_sem_reg);
  1080. writel(1, ioc->ioc_regs.ioc_sem_reg);
  1081. /* Unlock init semaphore */
  1082. writel(1, ioc->ioc_regs.ioc_init_sem_reg);
  1083. }
  1084. static void
  1085. bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
  1086. {
  1087. u32 r32;
  1088. /**
  1089. * First read to the semaphore register will return 0, subsequent reads
  1090. * will return 1. Semaphore is released by writing 1 to the register
  1091. */
  1092. r32 = readl(ioc->ioc_regs.ioc_sem_reg);
  1093. if (r32 == ~0) {
  1094. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
  1095. return;
  1096. }
  1097. if (!(r32 & 1)) {
  1098. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
  1099. return;
  1100. }
  1101. mod_timer(&ioc->sem_timer, jiffies +
  1102. msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
  1103. }
  1104. void
  1105. bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
  1106. {
  1107. writel(1, ioc->ioc_regs.ioc_sem_reg);
  1108. }
  1109. static void
  1110. bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
  1111. {
  1112. del_timer(&ioc->sem_timer);
  1113. }
  1114. /**
  1115. * @brief
  1116. * Initialize LPU local memory (aka secondary memory / SRAM)
  1117. */
  1118. static void
  1119. bfa_ioc_lmem_init(struct bfa_ioc *ioc)
  1120. {
  1121. u32 pss_ctl;
  1122. int i;
  1123. #define PSS_LMEM_INIT_TIME 10000
  1124. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  1125. pss_ctl &= ~__PSS_LMEM_RESET;
  1126. pss_ctl |= __PSS_LMEM_INIT_EN;
  1127. /*
  1128. * i2c workaround 12.5khz clock
  1129. */
  1130. pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
  1131. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1132. /**
  1133. * wait for memory initialization to be complete
  1134. */
  1135. i = 0;
  1136. do {
  1137. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  1138. i++;
  1139. } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
  1140. /**
  1141. * If memory initialization is not successful, IOC timeout will catch
  1142. * such failures.
  1143. */
  1144. BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
  1145. pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
  1146. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1147. }
  1148. static void
  1149. bfa_ioc_lpu_start(struct bfa_ioc *ioc)
  1150. {
  1151. u32 pss_ctl;
  1152. /**
  1153. * Take processor out of reset.
  1154. */
  1155. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  1156. pss_ctl &= ~__PSS_LPU0_RESET;
  1157. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1158. }
  1159. static void
  1160. bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
  1161. {
  1162. u32 pss_ctl;
  1163. /**
  1164. * Put processors in reset.
  1165. */
  1166. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  1167. pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
  1168. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1169. }
  1170. /**
  1171. * Get driver and firmware versions.
  1172. */
  1173. void
  1174. bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
  1175. {
  1176. u32 pgnum;
  1177. u32 loff = 0;
  1178. int i;
  1179. u32 *fwsig = (u32 *) fwhdr;
  1180. pgnum = bfa_ioc_smem_pgnum(ioc, loff);
  1181. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1182. for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
  1183. i++) {
  1184. fwsig[i] =
  1185. swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
  1186. loff += sizeof(u32);
  1187. }
  1188. }
  1189. /**
  1190. * Returns TRUE if same.
  1191. */
  1192. bool
  1193. bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
  1194. {
  1195. struct bfi_ioc_image_hdr *drv_fwhdr;
  1196. int i;
  1197. drv_fwhdr = (struct bfi_ioc_image_hdr *)
  1198. bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
  1199. for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
  1200. if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
  1201. return false;
  1202. }
  1203. return true;
  1204. }
  1205. /**
  1206. * Return true if current running version is valid. Firmware signature and
  1207. * execution context (driver/bios) must match.
  1208. */
  1209. static bool
  1210. bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
  1211. {
  1212. struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
  1213. bfa_nw_ioc_fwver_get(ioc, &fwhdr);
  1214. drv_fwhdr = (struct bfi_ioc_image_hdr *)
  1215. bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
  1216. if (fwhdr.signature != drv_fwhdr->signature)
  1217. return false;
  1218. if (swab32(fwhdr.bootenv) != boot_env)
  1219. return false;
  1220. return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
  1221. }
  1222. /**
  1223. * Conditionally flush any pending message from firmware at start.
  1224. */
  1225. static void
  1226. bfa_ioc_msgflush(struct bfa_ioc *ioc)
  1227. {
  1228. u32 r32;
  1229. r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
  1230. if (r32)
  1231. writel(1, ioc->ioc_regs.lpu_mbox_cmd);
  1232. }
  1233. /**
  1234. * @img ioc_init_logic.jpg
  1235. */
  1236. static void
  1237. bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
  1238. {
  1239. enum bfi_ioc_state ioc_fwstate;
  1240. bool fwvalid;
  1241. u32 boot_env;
  1242. ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  1243. if (force)
  1244. ioc_fwstate = BFI_IOC_UNINIT;
  1245. boot_env = BFI_FWBOOT_ENV_OS;
  1246. /**
  1247. * check if firmware is valid
  1248. */
  1249. fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
  1250. false : bfa_ioc_fwver_valid(ioc, boot_env);
  1251. if (!fwvalid) {
  1252. bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
  1253. bfa_ioc_poll_fwinit(ioc);
  1254. return;
  1255. }
  1256. /**
  1257. * If hardware initialization is in progress (initialized by other IOC),
  1258. * just wait for an initialization completion interrupt.
  1259. */
  1260. if (ioc_fwstate == BFI_IOC_INITING) {
  1261. bfa_ioc_poll_fwinit(ioc);
  1262. return;
  1263. }
  1264. /**
  1265. * If IOC function is disabled and firmware version is same,
  1266. * just re-enable IOC.
  1267. */
  1268. if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
  1269. /**
  1270. * When using MSI-X any pending firmware ready event should
  1271. * be flushed. Otherwise MSI-X interrupts are not delivered.
  1272. */
  1273. bfa_ioc_msgflush(ioc);
  1274. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
  1275. return;
  1276. }
  1277. /**
  1278. * Initialize the h/w for any other states.
  1279. */
  1280. bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
  1281. bfa_ioc_poll_fwinit(ioc);
  1282. }
  1283. void
  1284. bfa_nw_ioc_timeout(void *ioc_arg)
  1285. {
  1286. struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
  1287. bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
  1288. }
  1289. static void
  1290. bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
  1291. {
  1292. u32 *msgp = (u32 *) ioc_msg;
  1293. u32 i;
  1294. BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
  1295. /*
  1296. * first write msg to mailbox registers
  1297. */
  1298. for (i = 0; i < len / sizeof(u32); i++)
  1299. writel(cpu_to_le32(msgp[i]),
  1300. ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
  1301. for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
  1302. writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
  1303. /*
  1304. * write 1 to mailbox CMD to trigger LPU event
  1305. */
  1306. writel(1, ioc->ioc_regs.hfn_mbox_cmd);
  1307. (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
  1308. }
  1309. static void
  1310. bfa_ioc_send_enable(struct bfa_ioc *ioc)
  1311. {
  1312. struct bfi_ioc_ctrl_req enable_req;
  1313. struct timeval tv;
  1314. bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
  1315. bfa_ioc_portid(ioc));
  1316. enable_req.clscode = htons(ioc->clscode);
  1317. do_gettimeofday(&tv);
  1318. enable_req.tv_sec = ntohl(tv.tv_sec);
  1319. bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
  1320. }
  1321. static void
  1322. bfa_ioc_send_disable(struct bfa_ioc *ioc)
  1323. {
  1324. struct bfi_ioc_ctrl_req disable_req;
  1325. bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
  1326. bfa_ioc_portid(ioc));
  1327. bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
  1328. }
  1329. static void
  1330. bfa_ioc_send_getattr(struct bfa_ioc *ioc)
  1331. {
  1332. struct bfi_ioc_getattr_req attr_req;
  1333. bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
  1334. bfa_ioc_portid(ioc));
  1335. bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
  1336. bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
  1337. }
  1338. void
  1339. bfa_nw_ioc_hb_check(void *cbarg)
  1340. {
  1341. struct bfa_ioc *ioc = cbarg;
  1342. u32 hb_count;
  1343. hb_count = readl(ioc->ioc_regs.heartbeat);
  1344. if (ioc->hb_count == hb_count) {
  1345. bfa_ioc_recover(ioc);
  1346. return;
  1347. } else {
  1348. ioc->hb_count = hb_count;
  1349. }
  1350. bfa_ioc_mbox_poll(ioc);
  1351. mod_timer(&ioc->hb_timer, jiffies +
  1352. msecs_to_jiffies(BFA_IOC_HB_TOV));
  1353. }
  1354. static void
  1355. bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
  1356. {
  1357. ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
  1358. mod_timer(&ioc->hb_timer, jiffies +
  1359. msecs_to_jiffies(BFA_IOC_HB_TOV));
  1360. }
  1361. static void
  1362. bfa_ioc_hb_stop(struct bfa_ioc *ioc)
  1363. {
  1364. del_timer(&ioc->hb_timer);
  1365. }
  1366. /**
  1367. * @brief
  1368. * Initiate a full firmware download.
  1369. */
  1370. static void
  1371. bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
  1372. u32 boot_env)
  1373. {
  1374. u32 *fwimg;
  1375. u32 pgnum;
  1376. u32 loff = 0;
  1377. u32 chunkno = 0;
  1378. u32 i;
  1379. u32 asicmode;
  1380. fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
  1381. pgnum = bfa_ioc_smem_pgnum(ioc, loff);
  1382. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1383. for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
  1384. if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
  1385. chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
  1386. fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
  1387. BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
  1388. }
  1389. /**
  1390. * write smem
  1391. */
  1392. writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
  1393. ((ioc->ioc_regs.smem_page_start) + (loff)));
  1394. loff += sizeof(u32);
  1395. /**
  1396. * handle page offset wrap around
  1397. */
  1398. loff = PSS_SMEM_PGOFF(loff);
  1399. if (loff == 0) {
  1400. pgnum++;
  1401. writel(pgnum,
  1402. ioc->ioc_regs.host_page_num_fn);
  1403. }
  1404. }
  1405. writel(bfa_ioc_smem_pgnum(ioc, 0),
  1406. ioc->ioc_regs.host_page_num_fn);
  1407. /*
  1408. * Set boot type, env and device mode at the end.
  1409. */
  1410. asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
  1411. ioc->port0_mode, ioc->port1_mode);
  1412. writel(asicmode, ((ioc->ioc_regs.smem_page_start)
  1413. + BFI_FWBOOT_DEVMODE_OFF));
  1414. writel(boot_type, ((ioc->ioc_regs.smem_page_start)
  1415. + (BFI_FWBOOT_TYPE_OFF)));
  1416. writel(boot_env, ((ioc->ioc_regs.smem_page_start)
  1417. + (BFI_FWBOOT_ENV_OFF)));
  1418. }
  1419. static void
  1420. bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
  1421. {
  1422. bfa_ioc_hwinit(ioc, force);
  1423. }
  1424. /**
  1425. * BFA ioc enable reply by firmware
  1426. */
  1427. static void
  1428. bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
  1429. u8 cap_bm)
  1430. {
  1431. struct bfa_iocpf *iocpf = &ioc->iocpf;
  1432. ioc->port_mode = ioc->port_mode_cfg = port_mode;
  1433. ioc->ad_cap_bm = cap_bm;
  1434. bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
  1435. }
  1436. /**
  1437. * @brief
  1438. * Update BFA configuration from firmware configuration.
  1439. */
  1440. static void
  1441. bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
  1442. {
  1443. struct bfi_ioc_attr *attr = ioc->attr;
  1444. attr->adapter_prop = ntohl(attr->adapter_prop);
  1445. attr->card_type = ntohl(attr->card_type);
  1446. attr->maxfrsize = ntohs(attr->maxfrsize);
  1447. bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
  1448. }
  1449. /**
  1450. * Attach time initialization of mbox logic.
  1451. */
  1452. static void
  1453. bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
  1454. {
  1455. struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
  1456. int mc;
  1457. INIT_LIST_HEAD(&mod->cmd_q);
  1458. for (mc = 0; mc < BFI_MC_MAX; mc++) {
  1459. mod->mbhdlr[mc].cbfn = NULL;
  1460. mod->mbhdlr[mc].cbarg = ioc->bfa;
  1461. }
  1462. }
  1463. /**
  1464. * Mbox poll timer -- restarts any pending mailbox requests.
  1465. */
  1466. static void
  1467. bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
  1468. {
  1469. struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
  1470. struct bfa_mbox_cmd *cmd;
  1471. bfa_mbox_cmd_cbfn_t cbfn;
  1472. void *cbarg;
  1473. u32 stat;
  1474. /**
  1475. * If no command pending, do nothing
  1476. */
  1477. if (list_empty(&mod->cmd_q))
  1478. return;
  1479. /**
  1480. * If previous command is not yet fetched by firmware, do nothing
  1481. */
  1482. stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
  1483. if (stat)
  1484. return;
  1485. /**
  1486. * Enqueue command to firmware.
  1487. */
  1488. bfa_q_deq(&mod->cmd_q, &cmd);
  1489. bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
  1490. /**
  1491. * Give a callback to the client, indicating that the command is sent
  1492. */
  1493. if (cmd->cbfn) {
  1494. cbfn = cmd->cbfn;
  1495. cbarg = cmd->cbarg;
  1496. cmd->cbfn = NULL;
  1497. cbfn(cbarg);
  1498. }
  1499. }
  1500. /**
  1501. * Cleanup any pending requests.
  1502. */
  1503. static void
  1504. bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
  1505. {
  1506. struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
  1507. struct bfa_mbox_cmd *cmd;
  1508. while (!list_empty(&mod->cmd_q))
  1509. bfa_q_deq(&mod->cmd_q, &cmd);
  1510. }
  1511. /**
  1512. * Read data from SMEM to host through PCI memmap
  1513. *
  1514. * @param[in] ioc memory for IOC
  1515. * @param[in] tbuf app memory to store data from smem
  1516. * @param[in] soff smem offset
  1517. * @param[in] sz size of smem in bytes
  1518. */
  1519. static int
  1520. bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
  1521. {
  1522. u32 pgnum, loff, r32;
  1523. int i, len;
  1524. u32 *buf = tbuf;
  1525. pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
  1526. loff = PSS_SMEM_PGOFF(soff);
  1527. /*
  1528. * Hold semaphore to serialize pll init and fwtrc.
  1529. */
  1530. if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0)
  1531. return 1;
  1532. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1533. len = sz/sizeof(u32);
  1534. for (i = 0; i < len; i++) {
  1535. r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
  1536. buf[i] = be32_to_cpu(r32);
  1537. loff += sizeof(u32);
  1538. /**
  1539. * handle page offset wrap around
  1540. */
  1541. loff = PSS_SMEM_PGOFF(loff);
  1542. if (loff == 0) {
  1543. pgnum++;
  1544. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1545. }
  1546. }
  1547. writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
  1548. ioc->ioc_regs.host_page_num_fn);
  1549. /*
  1550. * release semaphore
  1551. */
  1552. readl(ioc->ioc_regs.ioc_init_sem_reg);
  1553. writel(1, ioc->ioc_regs.ioc_init_sem_reg);
  1554. return 0;
  1555. }
  1556. /**
  1557. * Retrieve saved firmware trace from a prior IOC failure.
  1558. */
  1559. int
  1560. bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
  1561. {
  1562. u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id;
  1563. int tlen, status = 0;
  1564. tlen = *trclen;
  1565. if (tlen > BNA_DBG_FWTRC_LEN)
  1566. tlen = BNA_DBG_FWTRC_LEN;
  1567. status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen);
  1568. *trclen = tlen;
  1569. return status;
  1570. }
  1571. /**
  1572. * Save firmware trace if configured.
  1573. */
  1574. static void
  1575. bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
  1576. {
  1577. int tlen;
  1578. if (ioc->dbg_fwsave_once) {
  1579. ioc->dbg_fwsave_once = 0;
  1580. if (ioc->dbg_fwsave_len) {
  1581. tlen = ioc->dbg_fwsave_len;
  1582. bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
  1583. }
  1584. }
  1585. }
  1586. /**
  1587. * Retrieve saved firmware trace from a prior IOC failure.
  1588. */
  1589. int
  1590. bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
  1591. {
  1592. int tlen;
  1593. if (ioc->dbg_fwsave_len == 0)
  1594. return BFA_STATUS_ENOFSAVE;
  1595. tlen = *trclen;
  1596. if (tlen > ioc->dbg_fwsave_len)
  1597. tlen = ioc->dbg_fwsave_len;
  1598. memcpy(trcdata, ioc->dbg_fwsave, tlen);
  1599. *trclen = tlen;
  1600. return BFA_STATUS_OK;
  1601. }
  1602. static void
  1603. bfa_ioc_fail_notify(struct bfa_ioc *ioc)
  1604. {
  1605. /**
  1606. * Notify driver and common modules registered for notification.
  1607. */
  1608. ioc->cbfn->hbfail_cbfn(ioc->bfa);
  1609. bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
  1610. bfa_nw_ioc_debug_save_ftrc(ioc);
  1611. }
  1612. /**
  1613. * IOCPF to IOC interface
  1614. */
  1615. static void
  1616. bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
  1617. {
  1618. bfa_fsm_send_event(ioc, IOC_E_ENABLED);
  1619. }
  1620. static void
  1621. bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
  1622. {
  1623. bfa_fsm_send_event(ioc, IOC_E_DISABLED);
  1624. }
  1625. static void
  1626. bfa_ioc_pf_failed(struct bfa_ioc *ioc)
  1627. {
  1628. bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
  1629. }
  1630. static void
  1631. bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc)
  1632. {
  1633. bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
  1634. }
  1635. static void
  1636. bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
  1637. {
  1638. /**
  1639. * Provide enable completion callback and AEN notification.
  1640. */
  1641. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  1642. }
  1643. /**
  1644. * IOC public
  1645. */
  1646. static enum bfa_status
  1647. bfa_ioc_pll_init(struct bfa_ioc *ioc)
  1648. {
  1649. /*
  1650. * Hold semaphore so that nobody can access the chip during init.
  1651. */
  1652. bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
  1653. bfa_ioc_pll_init_asic(ioc);
  1654. ioc->pllinit = true;
  1655. /* Initialize LMEM */
  1656. bfa_ioc_lmem_init(ioc);
  1657. /*
  1658. * release semaphore.
  1659. */
  1660. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
  1661. return BFA_STATUS_OK;
  1662. }
  1663. /**
  1664. * Interface used by diag module to do firmware boot with memory test
  1665. * as the entry vector.
  1666. */
  1667. static void
  1668. bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
  1669. u32 boot_env)
  1670. {
  1671. bfa_ioc_stats(ioc, ioc_boots);
  1672. if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
  1673. return;
  1674. /**
  1675. * Initialize IOC state of all functions on a chip reset.
  1676. */
  1677. if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
  1678. writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
  1679. writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
  1680. } else {
  1681. writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
  1682. writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
  1683. }
  1684. bfa_ioc_msgflush(ioc);
  1685. bfa_ioc_download_fw(ioc, boot_type, boot_env);
  1686. bfa_ioc_lpu_start(ioc);
  1687. }
  1688. /**
  1689. * Enable/disable IOC failure auto recovery.
  1690. */
  1691. void
  1692. bfa_nw_ioc_auto_recover(bool auto_recover)
  1693. {
  1694. bfa_nw_auto_recover = auto_recover;
  1695. }
  1696. static bool
  1697. bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
  1698. {
  1699. u32 *msgp = mbmsg;
  1700. u32 r32;
  1701. int i;
  1702. r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
  1703. if ((r32 & 1) == 0)
  1704. return false;
  1705. /**
  1706. * read the MBOX msg
  1707. */
  1708. for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
  1709. i++) {
  1710. r32 = readl(ioc->ioc_regs.lpu_mbox +
  1711. i * sizeof(u32));
  1712. msgp[i] = htonl(r32);
  1713. }
  1714. /**
  1715. * turn off mailbox interrupt by clearing mailbox status
  1716. */
  1717. writel(1, ioc->ioc_regs.lpu_mbox_cmd);
  1718. readl(ioc->ioc_regs.lpu_mbox_cmd);
  1719. return true;
  1720. }
  1721. static void
  1722. bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
  1723. {
  1724. union bfi_ioc_i2h_msg_u *msg;
  1725. struct bfa_iocpf *iocpf = &ioc->iocpf;
  1726. msg = (union bfi_ioc_i2h_msg_u *) m;
  1727. bfa_ioc_stats(ioc, ioc_isrs);
  1728. switch (msg->mh.msg_id) {
  1729. case BFI_IOC_I2H_HBEAT:
  1730. break;
  1731. case BFI_IOC_I2H_ENABLE_REPLY:
  1732. bfa_ioc_enable_reply(ioc,
  1733. (enum bfa_mode)msg->fw_event.port_mode,
  1734. msg->fw_event.cap_bm);
  1735. break;
  1736. case BFI_IOC_I2H_DISABLE_REPLY:
  1737. bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
  1738. break;
  1739. case BFI_IOC_I2H_GETATTR_REPLY:
  1740. bfa_ioc_getattr_reply(ioc);
  1741. break;
  1742. default:
  1743. BUG_ON(1);
  1744. }
  1745. }
  1746. /**
  1747. * IOC attach time initialization and setup.
  1748. *
  1749. * @param[in] ioc memory for IOC
  1750. * @param[in] bfa driver instance structure
  1751. */
  1752. void
  1753. bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
  1754. {
  1755. ioc->bfa = bfa;
  1756. ioc->cbfn = cbfn;
  1757. ioc->fcmode = false;
  1758. ioc->pllinit = false;
  1759. ioc->dbg_fwsave_once = true;
  1760. ioc->iocpf.ioc = ioc;
  1761. bfa_ioc_mbox_attach(ioc);
  1762. INIT_LIST_HEAD(&ioc->notify_q);
  1763. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  1764. bfa_fsm_send_event(ioc, IOC_E_RESET);
  1765. }
  1766. /**
  1767. * Driver detach time IOC cleanup.
  1768. */
  1769. void
  1770. bfa_nw_ioc_detach(struct bfa_ioc *ioc)
  1771. {
  1772. bfa_fsm_send_event(ioc, IOC_E_DETACH);
  1773. /* Done with detach, empty the notify_q. */
  1774. INIT_LIST_HEAD(&ioc->notify_q);
  1775. }
  1776. /**
  1777. * Setup IOC PCI properties.
  1778. *
  1779. * @param[in] pcidev PCI device information for this IOC
  1780. */
  1781. void
  1782. bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
  1783. enum bfi_pcifn_class clscode)
  1784. {
  1785. ioc->clscode = clscode;
  1786. ioc->pcidev = *pcidev;
  1787. /**
  1788. * Initialize IOC and device personality
  1789. */
  1790. ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
  1791. ioc->asic_mode = BFI_ASIC_MODE_FC;
  1792. switch (pcidev->device_id) {
  1793. case PCI_DEVICE_ID_BROCADE_CT:
  1794. ioc->asic_gen = BFI_ASIC_GEN_CT;
  1795. ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
  1796. ioc->asic_mode = BFI_ASIC_MODE_ETH;
  1797. ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
  1798. ioc->ad_cap_bm = BFA_CM_CNA;
  1799. break;
  1800. case BFA_PCI_DEVICE_ID_CT2:
  1801. ioc->asic_gen = BFI_ASIC_GEN_CT2;
  1802. if (clscode == BFI_PCIFN_CLASS_FC &&
  1803. pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
  1804. ioc->asic_mode = BFI_ASIC_MODE_FC16;
  1805. ioc->fcmode = true;
  1806. ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
  1807. ioc->ad_cap_bm = BFA_CM_HBA;
  1808. } else {
  1809. ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
  1810. ioc->asic_mode = BFI_ASIC_MODE_ETH;
  1811. if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
  1812. ioc->port_mode =
  1813. ioc->port_mode_cfg = BFA_MODE_CNA;
  1814. ioc->ad_cap_bm = BFA_CM_CNA;
  1815. } else {
  1816. ioc->port_mode =
  1817. ioc->port_mode_cfg = BFA_MODE_NIC;
  1818. ioc->ad_cap_bm = BFA_CM_NIC;
  1819. }
  1820. }
  1821. break;
  1822. default:
  1823. BUG_ON(1);
  1824. }
  1825. /**
  1826. * Set asic specific interfaces.
  1827. */
  1828. if (ioc->asic_gen == BFI_ASIC_GEN_CT)
  1829. bfa_nw_ioc_set_ct_hwif(ioc);
  1830. else {
  1831. WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
  1832. bfa_nw_ioc_set_ct2_hwif(ioc);
  1833. bfa_nw_ioc_ct2_poweron(ioc);
  1834. }
  1835. bfa_ioc_map_port(ioc);
  1836. bfa_ioc_reg_init(ioc);
  1837. }
  1838. /**
  1839. * Initialize IOC dma memory
  1840. *
  1841. * @param[in] dm_kva kernel virtual address of IOC dma memory
  1842. * @param[in] dm_pa physical address of IOC dma memory
  1843. */
  1844. void
  1845. bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
  1846. {
  1847. /**
  1848. * dma memory for firmware attribute
  1849. */
  1850. ioc->attr_dma.kva = dm_kva;
  1851. ioc->attr_dma.pa = dm_pa;
  1852. ioc->attr = (struct bfi_ioc_attr *) dm_kva;
  1853. }
  1854. /**
  1855. * Return size of dma memory required.
  1856. */
  1857. u32
  1858. bfa_nw_ioc_meminfo(void)
  1859. {
  1860. return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
  1861. }
  1862. void
  1863. bfa_nw_ioc_enable(struct bfa_ioc *ioc)
  1864. {
  1865. bfa_ioc_stats(ioc, ioc_enables);
  1866. ioc->dbg_fwsave_once = true;
  1867. bfa_fsm_send_event(ioc, IOC_E_ENABLE);
  1868. }
  1869. void
  1870. bfa_nw_ioc_disable(struct bfa_ioc *ioc)
  1871. {
  1872. bfa_ioc_stats(ioc, ioc_disables);
  1873. bfa_fsm_send_event(ioc, IOC_E_DISABLE);
  1874. }
  1875. /**
  1876. * Initialize memory for saving firmware trace.
  1877. */
  1878. void
  1879. bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
  1880. {
  1881. ioc->dbg_fwsave = dbg_fwsave;
  1882. ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0;
  1883. }
  1884. static u32
  1885. bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
  1886. {
  1887. return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
  1888. }
  1889. /**
  1890. * Register mailbox message handler function, to be called by common modules
  1891. */
  1892. void
  1893. bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
  1894. bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
  1895. {
  1896. struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
  1897. mod->mbhdlr[mc].cbfn = cbfn;
  1898. mod->mbhdlr[mc].cbarg = cbarg;
  1899. }
  1900. /**
  1901. * Queue a mailbox command request to firmware. Waits if mailbox is busy.
  1902. * Responsibility of caller to serialize
  1903. *
  1904. * @param[in] ioc IOC instance
  1905. * @param[i] cmd Mailbox command
  1906. */
  1907. bool
  1908. bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
  1909. bfa_mbox_cmd_cbfn_t cbfn, void *cbarg)
  1910. {
  1911. struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
  1912. u32 stat;
  1913. cmd->cbfn = cbfn;
  1914. cmd->cbarg = cbarg;
  1915. /**
  1916. * If a previous command is pending, queue new command
  1917. */
  1918. if (!list_empty(&mod->cmd_q)) {
  1919. list_add_tail(&cmd->qe, &mod->cmd_q);
  1920. return true;
  1921. }
  1922. /**
  1923. * If mailbox is busy, queue command for poll timer
  1924. */
  1925. stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
  1926. if (stat) {
  1927. list_add_tail(&cmd->qe, &mod->cmd_q);
  1928. return true;
  1929. }
  1930. /**
  1931. * mailbox is free -- queue command to firmware
  1932. */
  1933. bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
  1934. return false;
  1935. }
  1936. /**
  1937. * Handle mailbox interrupts
  1938. */
  1939. void
  1940. bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
  1941. {
  1942. struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
  1943. struct bfi_mbmsg m;
  1944. int mc;
  1945. if (bfa_ioc_msgget(ioc, &m)) {
  1946. /**
  1947. * Treat IOC message class as special.
  1948. */
  1949. mc = m.mh.msg_class;
  1950. if (mc == BFI_MC_IOC) {
  1951. bfa_ioc_isr(ioc, &m);
  1952. return;
  1953. }
  1954. if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
  1955. return;
  1956. mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
  1957. }
  1958. bfa_ioc_lpu_read_stat(ioc);
  1959. /**
  1960. * Try to send pending mailbox commands
  1961. */
  1962. bfa_ioc_mbox_poll(ioc);
  1963. }
  1964. void
  1965. bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
  1966. {
  1967. bfa_ioc_stats(ioc, ioc_hbfails);
  1968. bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
  1969. bfa_fsm_send_event(ioc, IOC_E_HWERROR);
  1970. }
  1971. /**
  1972. * return true if IOC is disabled
  1973. */
  1974. bool
  1975. bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
  1976. {
  1977. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
  1978. bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
  1979. }
  1980. /**
  1981. * return true if IOC is operational
  1982. */
  1983. bool
  1984. bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
  1985. {
  1986. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
  1987. }
  1988. /**
  1989. * Add to IOC heartbeat failure notification queue. To be used by common
  1990. * modules such as cee, port, diag.
  1991. */
  1992. void
  1993. bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
  1994. struct bfa_ioc_notify *notify)
  1995. {
  1996. list_add_tail(&notify->qe, &ioc->notify_q);
  1997. }
  1998. #define BFA_MFG_NAME "Brocade"
  1999. static void
  2000. bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
  2001. struct bfa_adapter_attr *ad_attr)
  2002. {
  2003. struct bfi_ioc_attr *ioc_attr;
  2004. ioc_attr = ioc->attr;
  2005. bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
  2006. bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
  2007. bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
  2008. bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
  2009. memcpy(&ad_attr->vpd, &ioc_attr->vpd,
  2010. sizeof(struct bfa_mfg_vpd));
  2011. ad_attr->nports = bfa_ioc_get_nports(ioc);
  2012. ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
  2013. bfa_ioc_get_adapter_model(ioc, ad_attr->model);
  2014. /* For now, model descr uses same model string */
  2015. bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
  2016. ad_attr->card_type = ioc_attr->card_type;
  2017. ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
  2018. if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
  2019. ad_attr->prototype = 1;
  2020. else
  2021. ad_attr->prototype = 0;
  2022. ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
  2023. ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
  2024. ad_attr->pcie_gen = ioc_attr->pcie_gen;
  2025. ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
  2026. ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
  2027. ad_attr->asic_rev = ioc_attr->asic_rev;
  2028. bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
  2029. }
  2030. static enum bfa_ioc_type
  2031. bfa_ioc_get_type(struct bfa_ioc *ioc)
  2032. {
  2033. if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
  2034. return BFA_IOC_TYPE_LL;
  2035. BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC));
  2036. return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
  2037. ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
  2038. }
  2039. static void
  2040. bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
  2041. {
  2042. memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
  2043. memcpy(serial_num,
  2044. (void *)ioc->attr->brcd_serialnum,
  2045. BFA_ADAPTER_SERIAL_NUM_LEN);
  2046. }
  2047. static void
  2048. bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
  2049. {
  2050. memset(fw_ver, 0, BFA_VERSION_LEN);
  2051. memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
  2052. }
  2053. static void
  2054. bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
  2055. {
  2056. BUG_ON(!(chip_rev));
  2057. memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
  2058. chip_rev[0] = 'R';
  2059. chip_rev[1] = 'e';
  2060. chip_rev[2] = 'v';
  2061. chip_rev[3] = '-';
  2062. chip_rev[4] = ioc->attr->asic_rev;
  2063. chip_rev[5] = '\0';
  2064. }
  2065. static void
  2066. bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
  2067. {
  2068. memset(optrom_ver, 0, BFA_VERSION_LEN);
  2069. memcpy(optrom_ver, ioc->attr->optrom_version,
  2070. BFA_VERSION_LEN);
  2071. }
  2072. static void
  2073. bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
  2074. {
  2075. memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
  2076. memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
  2077. }
  2078. static void
  2079. bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
  2080. {
  2081. struct bfi_ioc_attr *ioc_attr;
  2082. BUG_ON(!(model));
  2083. memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
  2084. ioc_attr = ioc->attr;
  2085. snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
  2086. BFA_MFG_NAME, ioc_attr->card_type);
  2087. }
  2088. static enum bfa_ioc_state
  2089. bfa_ioc_get_state(struct bfa_ioc *ioc)
  2090. {
  2091. enum bfa_iocpf_state iocpf_st;
  2092. enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
  2093. if (ioc_st == BFA_IOC_ENABLING ||
  2094. ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
  2095. iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
  2096. switch (iocpf_st) {
  2097. case BFA_IOCPF_SEMWAIT:
  2098. ioc_st = BFA_IOC_SEMWAIT;
  2099. break;
  2100. case BFA_IOCPF_HWINIT:
  2101. ioc_st = BFA_IOC_HWINIT;
  2102. break;
  2103. case BFA_IOCPF_FWMISMATCH:
  2104. ioc_st = BFA_IOC_FWMISMATCH;
  2105. break;
  2106. case BFA_IOCPF_FAIL:
  2107. ioc_st = BFA_IOC_FAIL;
  2108. break;
  2109. case BFA_IOCPF_INITFAIL:
  2110. ioc_st = BFA_IOC_INITFAIL;
  2111. break;
  2112. default:
  2113. break;
  2114. }
  2115. }
  2116. return ioc_st;
  2117. }
  2118. void
  2119. bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
  2120. {
  2121. memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
  2122. ioc_attr->state = bfa_ioc_get_state(ioc);
  2123. ioc_attr->port_id = ioc->port_id;
  2124. ioc_attr->port_mode = ioc->port_mode;
  2125. ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
  2126. ioc_attr->cap_bm = ioc->ad_cap_bm;
  2127. ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
  2128. bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
  2129. ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
  2130. ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
  2131. bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
  2132. }
  2133. /**
  2134. * WWN public
  2135. */
  2136. static u64
  2137. bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
  2138. {
  2139. return ioc->attr->pwwn;
  2140. }
  2141. mac_t
  2142. bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
  2143. {
  2144. return ioc->attr->mac;
  2145. }
  2146. /**
  2147. * Firmware failure detected. Start recovery actions.
  2148. */
  2149. static void
  2150. bfa_ioc_recover(struct bfa_ioc *ioc)
  2151. {
  2152. pr_crit("Heart Beat of IOC has failed\n");
  2153. bfa_ioc_stats(ioc, ioc_hbfails);
  2154. bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
  2155. bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
  2156. }
  2157. /**
  2158. * @dg hal_iocpf_pvt BFA IOC PF private functions
  2159. * @{
  2160. */
  2161. static void
  2162. bfa_iocpf_enable(struct bfa_ioc *ioc)
  2163. {
  2164. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
  2165. }
  2166. static void
  2167. bfa_iocpf_disable(struct bfa_ioc *ioc)
  2168. {
  2169. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
  2170. }
  2171. static void
  2172. bfa_iocpf_fail(struct bfa_ioc *ioc)
  2173. {
  2174. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
  2175. }
  2176. static void
  2177. bfa_iocpf_initfail(struct bfa_ioc *ioc)
  2178. {
  2179. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
  2180. }
  2181. static void
  2182. bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
  2183. {
  2184. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
  2185. }
  2186. static void
  2187. bfa_iocpf_stop(struct bfa_ioc *ioc)
  2188. {
  2189. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
  2190. }
  2191. void
  2192. bfa_nw_iocpf_timeout(void *ioc_arg)
  2193. {
  2194. struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
  2195. enum bfa_iocpf_state iocpf_st;
  2196. iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
  2197. if (iocpf_st == BFA_IOCPF_HWINIT)
  2198. bfa_ioc_poll_fwinit(ioc);
  2199. else
  2200. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
  2201. }
  2202. void
  2203. bfa_nw_iocpf_sem_timeout(void *ioc_arg)
  2204. {
  2205. struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
  2206. bfa_ioc_hw_sem_get(ioc);
  2207. }
  2208. static void
  2209. bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
  2210. {
  2211. u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  2212. if (fwstate == BFI_IOC_DISABLED) {
  2213. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
  2214. return;
  2215. }
  2216. if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
  2217. bfa_nw_iocpf_timeout(ioc);
  2218. } else {
  2219. ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
  2220. mod_timer(&ioc->iocpf_timer, jiffies +
  2221. msecs_to_jiffies(BFA_IOC_POLL_TOV));
  2222. }
  2223. }
  2224. /*
  2225. * Flash module specific
  2226. */
  2227. /*
  2228. * FLASH DMA buffer should be big enough to hold both MFG block and
  2229. * asic block(64k) at the same time and also should be 2k aligned to
  2230. * avoid write segement to cross sector boundary.
  2231. */
  2232. #define BFA_FLASH_SEG_SZ 2048
  2233. #define BFA_FLASH_DMA_BUF_SZ \
  2234. roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ)
  2235. static void
  2236. bfa_flash_cb(struct bfa_flash *flash)
  2237. {
  2238. flash->op_busy = 0;
  2239. if (flash->cbfn)
  2240. flash->cbfn(flash->cbarg, flash->status);
  2241. }
  2242. static void
  2243. bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
  2244. {
  2245. struct bfa_flash *flash = cbarg;
  2246. switch (event) {
  2247. case BFA_IOC_E_DISABLED:
  2248. case BFA_IOC_E_FAILED:
  2249. if (flash->op_busy) {
  2250. flash->status = BFA_STATUS_IOC_FAILURE;
  2251. flash->cbfn(flash->cbarg, flash->status);
  2252. flash->op_busy = 0;
  2253. }
  2254. break;
  2255. default:
  2256. break;
  2257. }
  2258. }
  2259. /*
  2260. * Send flash write request.
  2261. *
  2262. * @param[in] cbarg - callback argument
  2263. */
  2264. static void
  2265. bfa_flash_write_send(struct bfa_flash *flash)
  2266. {
  2267. struct bfi_flash_write_req *msg =
  2268. (struct bfi_flash_write_req *) flash->mb.msg;
  2269. u32 len;
  2270. msg->type = be32_to_cpu(flash->type);
  2271. msg->instance = flash->instance;
  2272. msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
  2273. len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
  2274. flash->residue : BFA_FLASH_DMA_BUF_SZ;
  2275. msg->length = be32_to_cpu(len);
  2276. /* indicate if it's the last msg of the whole write operation */
  2277. msg->last = (len == flash->residue) ? 1 : 0;
  2278. bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
  2279. bfa_ioc_portid(flash->ioc));
  2280. bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
  2281. memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
  2282. bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
  2283. flash->residue -= len;
  2284. flash->offset += len;
  2285. }
  2286. /*
  2287. * Send flash read request.
  2288. *
  2289. * @param[in] cbarg - callback argument
  2290. */
  2291. static void
  2292. bfa_flash_read_send(void *cbarg)
  2293. {
  2294. struct bfa_flash *flash = cbarg;
  2295. struct bfi_flash_read_req *msg =
  2296. (struct bfi_flash_read_req *) flash->mb.msg;
  2297. u32 len;
  2298. msg->type = be32_to_cpu(flash->type);
  2299. msg->instance = flash->instance;
  2300. msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
  2301. len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
  2302. flash->residue : BFA_FLASH_DMA_BUF_SZ;
  2303. msg->length = be32_to_cpu(len);
  2304. bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
  2305. bfa_ioc_portid(flash->ioc));
  2306. bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
  2307. bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
  2308. }
  2309. /*
  2310. * Process flash response messages upon receiving interrupts.
  2311. *
  2312. * @param[in] flasharg - flash structure
  2313. * @param[in] msg - message structure
  2314. */
  2315. static void
  2316. bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
  2317. {
  2318. struct bfa_flash *flash = flasharg;
  2319. u32 status;
  2320. union {
  2321. struct bfi_flash_query_rsp *query;
  2322. struct bfi_flash_write_rsp *write;
  2323. struct bfi_flash_read_rsp *read;
  2324. struct bfi_mbmsg *msg;
  2325. } m;
  2326. m.msg = msg;
  2327. /* receiving response after ioc failure */
  2328. if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT)
  2329. return;
  2330. switch (msg->mh.msg_id) {
  2331. case BFI_FLASH_I2H_QUERY_RSP:
  2332. status = be32_to_cpu(m.query->status);
  2333. if (status == BFA_STATUS_OK) {
  2334. u32 i;
  2335. struct bfa_flash_attr *attr, *f;
  2336. attr = (struct bfa_flash_attr *) flash->ubuf;
  2337. f = (struct bfa_flash_attr *) flash->dbuf_kva;
  2338. attr->status = be32_to_cpu(f->status);
  2339. attr->npart = be32_to_cpu(f->npart);
  2340. for (i = 0; i < attr->npart; i++) {
  2341. attr->part[i].part_type =
  2342. be32_to_cpu(f->part[i].part_type);
  2343. attr->part[i].part_instance =
  2344. be32_to_cpu(f->part[i].part_instance);
  2345. attr->part[i].part_off =
  2346. be32_to_cpu(f->part[i].part_off);
  2347. attr->part[i].part_size =
  2348. be32_to_cpu(f->part[i].part_size);
  2349. attr->part[i].part_len =
  2350. be32_to_cpu(f->part[i].part_len);
  2351. attr->part[i].part_status =
  2352. be32_to_cpu(f->part[i].part_status);
  2353. }
  2354. }
  2355. flash->status = status;
  2356. bfa_flash_cb(flash);
  2357. break;
  2358. case BFI_FLASH_I2H_WRITE_RSP:
  2359. status = be32_to_cpu(m.write->status);
  2360. if (status != BFA_STATUS_OK || flash->residue == 0) {
  2361. flash->status = status;
  2362. bfa_flash_cb(flash);
  2363. } else
  2364. bfa_flash_write_send(flash);
  2365. break;
  2366. case BFI_FLASH_I2H_READ_RSP:
  2367. status = be32_to_cpu(m.read->status);
  2368. if (status != BFA_STATUS_OK) {
  2369. flash->status = status;
  2370. bfa_flash_cb(flash);
  2371. } else {
  2372. u32 len = be32_to_cpu(m.read->length);
  2373. memcpy(flash->ubuf + flash->offset,
  2374. flash->dbuf_kva, len);
  2375. flash->residue -= len;
  2376. flash->offset += len;
  2377. if (flash->residue == 0) {
  2378. flash->status = status;
  2379. bfa_flash_cb(flash);
  2380. } else
  2381. bfa_flash_read_send(flash);
  2382. }
  2383. break;
  2384. case BFI_FLASH_I2H_BOOT_VER_RSP:
  2385. case BFI_FLASH_I2H_EVENT:
  2386. break;
  2387. default:
  2388. WARN_ON(1);
  2389. }
  2390. }
  2391. /*
  2392. * Flash memory info API.
  2393. */
  2394. u32
  2395. bfa_nw_flash_meminfo(void)
  2396. {
  2397. return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
  2398. }
  2399. /*
  2400. * Flash attach API.
  2401. *
  2402. * @param[in] flash - flash structure
  2403. * @param[in] ioc - ioc structure
  2404. * @param[in] dev - device structure
  2405. */
  2406. void
  2407. bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
  2408. {
  2409. flash->ioc = ioc;
  2410. flash->cbfn = NULL;
  2411. flash->cbarg = NULL;
  2412. flash->op_busy = 0;
  2413. bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
  2414. bfa_q_qe_init(&flash->ioc_notify);
  2415. bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
  2416. list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
  2417. }
  2418. /*
  2419. * Claim memory for flash
  2420. *
  2421. * @param[in] flash - flash structure
  2422. * @param[in] dm_kva - pointer to virtual memory address
  2423. * @param[in] dm_pa - physical memory address
  2424. */
  2425. void
  2426. bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
  2427. {
  2428. flash->dbuf_kva = dm_kva;
  2429. flash->dbuf_pa = dm_pa;
  2430. memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
  2431. dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
  2432. dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
  2433. }
  2434. /*
  2435. * Get flash attribute.
  2436. *
  2437. * @param[in] flash - flash structure
  2438. * @param[in] attr - flash attribute structure
  2439. * @param[in] cbfn - callback function
  2440. * @param[in] cbarg - callback argument
  2441. *
  2442. * Return status.
  2443. */
  2444. enum bfa_status
  2445. bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
  2446. bfa_cb_flash cbfn, void *cbarg)
  2447. {
  2448. struct bfi_flash_query_req *msg =
  2449. (struct bfi_flash_query_req *) flash->mb.msg;
  2450. if (!bfa_nw_ioc_is_operational(flash->ioc))
  2451. return BFA_STATUS_IOC_NON_OP;
  2452. if (flash->op_busy)
  2453. return BFA_STATUS_DEVBUSY;
  2454. flash->op_busy = 1;
  2455. flash->cbfn = cbfn;
  2456. flash->cbarg = cbarg;
  2457. flash->ubuf = (u8 *) attr;
  2458. bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
  2459. bfa_ioc_portid(flash->ioc));
  2460. bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa);
  2461. bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
  2462. return BFA_STATUS_OK;
  2463. }
  2464. /*
  2465. * Update flash partition.
  2466. *
  2467. * @param[in] flash - flash structure
  2468. * @param[in] type - flash partition type
  2469. * @param[in] instance - flash partition instance
  2470. * @param[in] buf - update data buffer
  2471. * @param[in] len - data buffer length
  2472. * @param[in] offset - offset relative to the partition starting address
  2473. * @param[in] cbfn - callback function
  2474. * @param[in] cbarg - callback argument
  2475. *
  2476. * Return status.
  2477. */
  2478. enum bfa_status
  2479. bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
  2480. void *buf, u32 len, u32 offset,
  2481. bfa_cb_flash cbfn, void *cbarg)
  2482. {
  2483. if (!bfa_nw_ioc_is_operational(flash->ioc))
  2484. return BFA_STATUS_IOC_NON_OP;
  2485. /*
  2486. * 'len' must be in word (4-byte) boundary
  2487. */
  2488. if (!len || (len & 0x03))
  2489. return BFA_STATUS_FLASH_BAD_LEN;
  2490. if (type == BFA_FLASH_PART_MFG)
  2491. return BFA_STATUS_EINVAL;
  2492. if (flash->op_busy)
  2493. return BFA_STATUS_DEVBUSY;
  2494. flash->op_busy = 1;
  2495. flash->cbfn = cbfn;
  2496. flash->cbarg = cbarg;
  2497. flash->type = type;
  2498. flash->instance = instance;
  2499. flash->residue = len;
  2500. flash->offset = 0;
  2501. flash->addr_off = offset;
  2502. flash->ubuf = buf;
  2503. bfa_flash_write_send(flash);
  2504. return BFA_STATUS_OK;
  2505. }
  2506. /*
  2507. * Read flash partition.
  2508. *
  2509. * @param[in] flash - flash structure
  2510. * @param[in] type - flash partition type
  2511. * @param[in] instance - flash partition instance
  2512. * @param[in] buf - read data buffer
  2513. * @param[in] len - data buffer length
  2514. * @param[in] offset - offset relative to the partition starting address
  2515. * @param[in] cbfn - callback function
  2516. * @param[in] cbarg - callback argument
  2517. *
  2518. * Return status.
  2519. */
  2520. enum bfa_status
  2521. bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance,
  2522. void *buf, u32 len, u32 offset,
  2523. bfa_cb_flash cbfn, void *cbarg)
  2524. {
  2525. if (!bfa_nw_ioc_is_operational(flash->ioc))
  2526. return BFA_STATUS_IOC_NON_OP;
  2527. /*
  2528. * 'len' must be in word (4-byte) boundary
  2529. */
  2530. if (!len || (len & 0x03))
  2531. return BFA_STATUS_FLASH_BAD_LEN;
  2532. if (flash->op_busy)
  2533. return BFA_STATUS_DEVBUSY;
  2534. flash->op_busy = 1;
  2535. flash->cbfn = cbfn;
  2536. flash->cbarg = cbarg;
  2537. flash->type = type;
  2538. flash->instance = instance;
  2539. flash->residue = len;
  2540. flash->offset = 0;
  2541. flash->addr_off = offset;
  2542. flash->ubuf = buf;
  2543. bfa_flash_read_send(flash);
  2544. return BFA_STATUS_OK;
  2545. }