bfa_ioc.c 61 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include "bfa_ioc.h"
  18. #include "bfi_ctreg.h"
  19. #include "bfa_defs.h"
  20. #include "bfa_defs_svc.h"
  21. #include "bfad_drv.h"
  22. BFA_TRC_FILE(CNA, IOC);
  23. /**
  24. * IOC local definitions
  25. */
  26. #define BFA_IOC_TOV 3000 /* msecs */
  27. #define BFA_IOC_HWSEM_TOV 500 /* msecs */
  28. #define BFA_IOC_HB_TOV 500 /* msecs */
  29. #define BFA_IOC_HWINIT_MAX 2
  30. #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
  31. #define bfa_ioc_timer_start(__ioc) \
  32. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
  33. bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
  34. #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
  35. #define bfa_hb_timer_start(__ioc) \
  36. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
  37. bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
  38. #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
  39. #define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS)
  40. #define BFA_DBG_FWTRC_LEN \
  41. (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \
  42. (sizeof(struct bfa_trc_mod_s) - \
  43. BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
  44. #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
  45. /**
  46. * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
  47. */
  48. #define bfa_ioc_firmware_lock(__ioc) \
  49. ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
  50. #define bfa_ioc_firmware_unlock(__ioc) \
  51. ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
  52. #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
  53. #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
  54. #define bfa_ioc_notify_hbfail(__ioc) \
  55. ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
  56. #ifdef BFA_IOC_IS_UEFI
  57. #define bfa_ioc_is_bios_optrom(__ioc) (0)
  58. #define bfa_ioc_is_uefi(__ioc) BFA_IOC_IS_UEFI
  59. #else
  60. #define bfa_ioc_is_bios_optrom(__ioc) \
  61. (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
  62. #define bfa_ioc_is_uefi(__ioc) (0)
  63. #endif
  64. #define bfa_ioc_mbox_cmd_pending(__ioc) \
  65. (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
  66. bfa_reg_read((__ioc)->ioc_regs.hfn_mbox_cmd))
  67. bfa_boolean_t bfa_auto_recover = BFA_TRUE;
  68. /*
  69. * forward declarations
  70. */
  71. static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
  72. static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
  73. static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
  74. static void bfa_ioc_timeout(void *ioc);
  75. static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
  76. static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
  77. static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
  78. static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
  79. static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
  80. static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
  81. static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
  82. static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
  83. static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
  84. static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
  85. static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
  86. static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
  87. static void bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc);
  88. static void bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc);
  89. static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc);
  90. static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
  91. /**
  92. * hal_ioc_sm
  93. */
  94. /**
  95. * IOC state machine definitions/declarations
  96. */
  97. enum ioc_event {
  98. IOC_E_RESET = 1, /* IOC reset request */
  99. IOC_E_ENABLE = 2, /* IOC enable request */
  100. IOC_E_DISABLE = 3, /* IOC disable request */
  101. IOC_E_DETACH = 4, /* driver detach cleanup */
  102. IOC_E_ENABLED = 5, /* f/w enabled */
  103. IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
  104. IOC_E_DISABLED = 7, /* f/w disabled */
  105. IOC_E_FAILED = 8, /* failure notice by iocpf sm */
  106. IOC_E_HBFAIL = 9, /* heartbeat failure */
  107. IOC_E_HWERROR = 10, /* hardware error interrupt */
  108. IOC_E_TIMEOUT = 11, /* timeout */
  109. };
  110. bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
  111. bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
  112. bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
  113. bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
  114. bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
  115. bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event);
  116. bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
  117. bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
  118. bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
  119. static struct bfa_sm_table_s ioc_sm_table[] = {
  120. {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
  121. {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
  122. {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
  123. {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
  124. {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
  125. {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
  126. {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
  127. {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
  128. {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
  129. };
  130. /**
  131. * IOCPF state machine definitions/declarations
  132. */
  133. #define bfa_iocpf_timer_start(__ioc) \
  134. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
  135. bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
  136. #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
  137. #define bfa_iocpf_recovery_timer_start(__ioc) \
  138. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
  139. bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER)
  140. #define bfa_sem_timer_start(__ioc) \
  141. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
  142. bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
  143. #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
  144. /*
  145. * Forward declareations for iocpf state machine
  146. */
  147. static void bfa_iocpf_enable(struct bfa_ioc_s *ioc);
  148. static void bfa_iocpf_disable(struct bfa_ioc_s *ioc);
  149. static void bfa_iocpf_fail(struct bfa_ioc_s *ioc);
  150. static void bfa_iocpf_initfail(struct bfa_ioc_s *ioc);
  151. static void bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc);
  152. static void bfa_iocpf_stop(struct bfa_ioc_s *ioc);
  153. static void bfa_iocpf_timeout(void *ioc_arg);
  154. static void bfa_iocpf_sem_timeout(void *ioc_arg);
  155. /**
  156. * IOCPF state machine events
  157. */
  158. enum iocpf_event {
  159. IOCPF_E_ENABLE = 1, /* IOCPF enable request */
  160. IOCPF_E_DISABLE = 2, /* IOCPF disable request */
  161. IOCPF_E_STOP = 3, /* stop on driver detach */
  162. IOCPF_E_FWREADY = 4, /* f/w initialization done */
  163. IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
  164. IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
  165. IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
  166. IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
  167. IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
  168. IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
  169. IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
  170. };
  171. /**
  172. * IOCPF states
  173. */
  174. enum bfa_iocpf_state {
  175. BFA_IOCPF_RESET = 1, /* IOC is in reset state */
  176. BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
  177. BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
  178. BFA_IOCPF_READY = 4, /* IOCPF is initialized */
  179. BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
  180. BFA_IOCPF_FAIL = 6, /* IOCPF failed */
  181. BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
  182. BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
  183. BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
  184. };
  185. bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
  186. bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
  187. bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
  188. bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
  189. bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
  190. bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
  191. bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
  192. bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
  193. bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
  194. bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
  195. bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
  196. static struct bfa_sm_table_s iocpf_sm_table[] = {
  197. {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
  198. {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
  199. {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
  200. {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
  201. {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
  202. {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
  203. {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
  204. {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
  205. {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
  206. {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
  207. {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
  208. };
  209. /**
  210. * IOC State Machine
  211. */
  212. /**
  213. * Beginning state. IOC uninit state.
  214. */
  215. static void
  216. bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
  217. {
  218. }
  219. /**
  220. * IOC is in uninit state.
  221. */
  222. static void
  223. bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
  224. {
  225. bfa_trc(ioc, event);
  226. switch (event) {
  227. case IOC_E_RESET:
  228. bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
  229. break;
  230. default:
  231. bfa_sm_fault(ioc, event);
  232. }
  233. }
  234. /**
  235. * Reset entry actions -- initialize state machine
  236. */
  237. static void
  238. bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
  239. {
  240. bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
  241. }
  242. /**
  243. * IOC is in reset state.
  244. */
  245. static void
  246. bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
  247. {
  248. bfa_trc(ioc, event);
  249. switch (event) {
  250. case IOC_E_ENABLE:
  251. bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
  252. break;
  253. case IOC_E_DISABLE:
  254. bfa_ioc_disable_comp(ioc);
  255. break;
  256. case IOC_E_DETACH:
  257. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  258. break;
  259. default:
  260. bfa_sm_fault(ioc, event);
  261. }
  262. }
  263. static void
  264. bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
  265. {
  266. bfa_iocpf_enable(ioc);
  267. }
  268. /**
  269. * Host IOC function is being enabled, awaiting response from firmware.
  270. * Semaphore is acquired.
  271. */
  272. static void
  273. bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
  274. {
  275. bfa_trc(ioc, event);
  276. switch (event) {
  277. case IOC_E_ENABLED:
  278. bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
  279. break;
  280. case IOC_E_FAILED:
  281. bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
  282. break;
  283. case IOC_E_HWERROR:
  284. bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
  285. bfa_iocpf_initfail(ioc);
  286. break;
  287. case IOC_E_DISABLE:
  288. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  289. break;
  290. case IOC_E_DETACH:
  291. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  292. bfa_iocpf_stop(ioc);
  293. break;
  294. case IOC_E_ENABLE:
  295. break;
  296. default:
  297. bfa_sm_fault(ioc, event);
  298. }
  299. }
  300. static void
  301. bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
  302. {
  303. bfa_ioc_timer_start(ioc);
  304. bfa_ioc_send_getattr(ioc);
  305. }
  306. /**
  307. * IOC configuration in progress. Timer is active.
  308. */
  309. static void
  310. bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
  311. {
  312. bfa_trc(ioc, event);
  313. switch (event) {
  314. case IOC_E_FWRSP_GETATTR:
  315. bfa_ioc_timer_stop(ioc);
  316. bfa_ioc_check_attr_wwns(ioc);
  317. bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
  318. break;
  319. case IOC_E_FAILED:
  320. bfa_ioc_timer_stop(ioc);
  321. bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
  322. break;
  323. case IOC_E_HWERROR:
  324. bfa_ioc_timer_stop(ioc);
  325. /* fall through */
  326. case IOC_E_TIMEOUT:
  327. bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
  328. bfa_iocpf_getattrfail(ioc);
  329. break;
  330. case IOC_E_DISABLE:
  331. bfa_ioc_timer_stop(ioc);
  332. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  333. break;
  334. case IOC_E_ENABLE:
  335. break;
  336. default:
  337. bfa_sm_fault(ioc, event);
  338. }
  339. }
  340. static void
  341. bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
  342. {
  343. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  344. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
  345. bfa_ioc_hb_monitor(ioc);
  346. BFA_LOG(KERN_INFO, bfad, log_level, "IOC enabled\n");
  347. }
  348. static void
  349. bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
  350. {
  351. bfa_trc(ioc, event);
  352. switch (event) {
  353. case IOC_E_ENABLE:
  354. break;
  355. case IOC_E_DISABLE:
  356. bfa_ioc_hb_stop(ioc);
  357. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  358. break;
  359. case IOC_E_FAILED:
  360. bfa_ioc_hb_stop(ioc);
  361. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  362. break;
  363. case IOC_E_HWERROR:
  364. bfa_ioc_hb_stop(ioc);
  365. /* !!! fall through !!! */
  366. case IOC_E_HBFAIL:
  367. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  368. bfa_iocpf_fail(ioc);
  369. break;
  370. default:
  371. bfa_sm_fault(ioc, event);
  372. }
  373. }
  374. static void
  375. bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
  376. {
  377. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  378. bfa_iocpf_disable(ioc);
  379. BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n");
  380. }
  381. /**
  382. * IOC is being disabled
  383. */
  384. static void
  385. bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
  386. {
  387. bfa_trc(ioc, event);
  388. switch (event) {
  389. case IOC_E_DISABLED:
  390. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
  391. break;
  392. case IOC_E_HWERROR:
  393. /*
  394. * No state change. Will move to disabled state
  395. * after iocpf sm completes failure processing and
  396. * moves to disabled state.
  397. */
  398. bfa_iocpf_fail(ioc);
  399. break;
  400. default:
  401. bfa_sm_fault(ioc, event);
  402. }
  403. }
  404. /**
  405. * IOC disable completion entry.
  406. */
  407. static void
  408. bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
  409. {
  410. bfa_ioc_disable_comp(ioc);
  411. }
  412. static void
  413. bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
  414. {
  415. bfa_trc(ioc, event);
  416. switch (event) {
  417. case IOC_E_ENABLE:
  418. bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
  419. break;
  420. case IOC_E_DISABLE:
  421. ioc->cbfn->disable_cbfn(ioc->bfa);
  422. break;
  423. case IOC_E_DETACH:
  424. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  425. bfa_iocpf_stop(ioc);
  426. break;
  427. default:
  428. bfa_sm_fault(ioc, event);
  429. }
  430. }
  431. static void
  432. bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
  433. {
  434. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  435. }
  436. /**
  437. * Hardware initialization failed.
  438. */
  439. static void
  440. bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
  441. {
  442. bfa_trc(ioc, event);
  443. switch (event) {
  444. case IOC_E_ENABLED:
  445. bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
  446. break;
  447. case IOC_E_FAILED:
  448. /**
  449. * Initialization failure during iocpf init retry.
  450. */
  451. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  452. break;
  453. case IOC_E_DISABLE:
  454. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  455. break;
  456. case IOC_E_DETACH:
  457. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  458. bfa_iocpf_stop(ioc);
  459. break;
  460. default:
  461. bfa_sm_fault(ioc, event);
  462. }
  463. }
  464. static void
  465. bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
  466. {
  467. struct list_head *qe;
  468. struct bfa_ioc_hbfail_notify_s *notify;
  469. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  470. /**
  471. * Notify driver and common modules registered for notification.
  472. */
  473. ioc->cbfn->hbfail_cbfn(ioc->bfa);
  474. list_for_each(qe, &ioc->hb_notify_q) {
  475. notify = (struct bfa_ioc_hbfail_notify_s *) qe;
  476. notify->cbfn(notify->cbarg);
  477. }
  478. BFA_LOG(KERN_CRIT, bfad, log_level,
  479. "Heart Beat of IOC has failed\n");
  480. }
  481. /**
  482. * IOC failure.
  483. */
  484. static void
  485. bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
  486. {
  487. bfa_trc(ioc, event);
  488. switch (event) {
  489. case IOC_E_FAILED:
  490. /**
  491. * Initialization failure during iocpf recovery.
  492. * !!! Fall through !!!
  493. */
  494. case IOC_E_ENABLE:
  495. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  496. break;
  497. case IOC_E_ENABLED:
  498. bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
  499. break;
  500. case IOC_E_DISABLE:
  501. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  502. break;
  503. case IOC_E_HWERROR:
  504. /*
  505. * HB failure notification, ignore.
  506. */
  507. break;
  508. default:
  509. bfa_sm_fault(ioc, event);
  510. }
  511. }
  512. /**
  513. * IOCPF State Machine
  514. */
  515. /**
  516. * Reset entry actions -- initialize state machine
  517. */
  518. static void
  519. bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
  520. {
  521. iocpf->retry_count = 0;
  522. iocpf->auto_recover = bfa_auto_recover;
  523. }
  524. /**
  525. * Beginning state. IOC is in reset state.
  526. */
  527. static void
  528. bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  529. {
  530. struct bfa_ioc_s *ioc = iocpf->ioc;
  531. bfa_trc(ioc, event);
  532. switch (event) {
  533. case IOCPF_E_ENABLE:
  534. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
  535. break;
  536. case IOCPF_E_STOP:
  537. break;
  538. default:
  539. bfa_sm_fault(ioc, event);
  540. }
  541. }
  542. /**
  543. * Semaphore should be acquired for version check.
  544. */
  545. static void
  546. bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
  547. {
  548. bfa_ioc_hw_sem_get(iocpf->ioc);
  549. }
  550. /**
  551. * Awaiting h/w semaphore to continue with version check.
  552. */
  553. static void
  554. bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  555. {
  556. struct bfa_ioc_s *ioc = iocpf->ioc;
  557. bfa_trc(ioc, event);
  558. switch (event) {
  559. case IOCPF_E_SEMLOCKED:
  560. if (bfa_ioc_firmware_lock(ioc)) {
  561. iocpf->retry_count = 0;
  562. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  563. } else {
  564. bfa_ioc_hw_sem_release(ioc);
  565. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
  566. }
  567. break;
  568. case IOCPF_E_DISABLE:
  569. bfa_ioc_hw_sem_get_cancel(ioc);
  570. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  571. bfa_ioc_pf_disabled(ioc);
  572. break;
  573. case IOCPF_E_STOP:
  574. bfa_ioc_hw_sem_get_cancel(ioc);
  575. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  576. break;
  577. default:
  578. bfa_sm_fault(ioc, event);
  579. }
  580. }
  581. /**
  582. * Notify enable completion callback.
  583. */
  584. static void
  585. bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
  586. {
  587. /*
  588. * Call only the first time sm enters fwmismatch state.
  589. */
  590. if (iocpf->retry_count == 0)
  591. bfa_ioc_pf_fwmismatch(iocpf->ioc);
  592. iocpf->retry_count++;
  593. bfa_iocpf_timer_start(iocpf->ioc);
  594. }
  595. /**
  596. * Awaiting firmware version match.
  597. */
  598. static void
  599. bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  600. {
  601. struct bfa_ioc_s *ioc = iocpf->ioc;
  602. bfa_trc(ioc, event);
  603. switch (event) {
  604. case IOCPF_E_TIMEOUT:
  605. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
  606. break;
  607. case IOCPF_E_DISABLE:
  608. bfa_iocpf_timer_stop(ioc);
  609. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  610. bfa_ioc_pf_disabled(ioc);
  611. break;
  612. case IOCPF_E_STOP:
  613. bfa_iocpf_timer_stop(ioc);
  614. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  615. break;
  616. default:
  617. bfa_sm_fault(ioc, event);
  618. }
  619. }
  620. /**
  621. * Request for semaphore.
  622. */
  623. static void
  624. bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
  625. {
  626. bfa_ioc_hw_sem_get(iocpf->ioc);
  627. }
  628. /**
  629. * Awaiting semaphore for h/w initialzation.
  630. */
  631. static void
  632. bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  633. {
  634. struct bfa_ioc_s *ioc = iocpf->ioc;
  635. bfa_trc(ioc, event);
  636. switch (event) {
  637. case IOCPF_E_SEMLOCKED:
  638. iocpf->retry_count = 0;
  639. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  640. break;
  641. case IOCPF_E_DISABLE:
  642. bfa_ioc_hw_sem_get_cancel(ioc);
  643. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  644. break;
  645. default:
  646. bfa_sm_fault(ioc, event);
  647. }
  648. }
  649. static void
  650. bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
  651. {
  652. bfa_iocpf_timer_start(iocpf->ioc);
  653. bfa_ioc_reset(iocpf->ioc, BFA_FALSE);
  654. }
  655. /**
  656. * Hardware is being initialized. Interrupts are enabled.
  657. * Holding hardware semaphore lock.
  658. */
  659. static void
  660. bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  661. {
  662. struct bfa_ioc_s *ioc = iocpf->ioc;
  663. bfa_trc(ioc, event);
  664. switch (event) {
  665. case IOCPF_E_FWREADY:
  666. bfa_iocpf_timer_stop(ioc);
  667. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
  668. break;
  669. case IOCPF_E_INITFAIL:
  670. bfa_iocpf_timer_stop(ioc);
  671. /*
  672. * !!! fall through !!!
  673. */
  674. case IOCPF_E_TIMEOUT:
  675. iocpf->retry_count++;
  676. if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
  677. bfa_iocpf_timer_start(ioc);
  678. bfa_ioc_reset(ioc, BFA_TRUE);
  679. break;
  680. }
  681. bfa_ioc_hw_sem_release(ioc);
  682. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
  683. if (event == IOCPF_E_TIMEOUT)
  684. bfa_ioc_pf_failed(ioc);
  685. break;
  686. case IOCPF_E_DISABLE:
  687. bfa_ioc_hw_sem_release(ioc);
  688. bfa_iocpf_timer_stop(ioc);
  689. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  690. break;
  691. default:
  692. bfa_sm_fault(ioc, event);
  693. }
  694. }
  695. static void
  696. bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
  697. {
  698. bfa_iocpf_timer_start(iocpf->ioc);
  699. bfa_ioc_send_enable(iocpf->ioc);
  700. }
  701. /**
  702. * Host IOC function is being enabled, awaiting response from firmware.
  703. * Semaphore is acquired.
  704. */
  705. static void
  706. bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  707. {
  708. struct bfa_ioc_s *ioc = iocpf->ioc;
  709. bfa_trc(ioc, event);
  710. switch (event) {
  711. case IOCPF_E_FWRSP_ENABLE:
  712. bfa_iocpf_timer_stop(ioc);
  713. bfa_ioc_hw_sem_release(ioc);
  714. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
  715. break;
  716. case IOCPF_E_INITFAIL:
  717. bfa_iocpf_timer_stop(ioc);
  718. /*
  719. * !!! fall through !!!
  720. */
  721. case IOCPF_E_TIMEOUT:
  722. iocpf->retry_count++;
  723. if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
  724. bfa_reg_write(ioc->ioc_regs.ioc_fwstate,
  725. BFI_IOC_UNINIT);
  726. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  727. break;
  728. }
  729. bfa_ioc_hw_sem_release(ioc);
  730. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
  731. if (event == IOCPF_E_TIMEOUT)
  732. bfa_ioc_pf_failed(ioc);
  733. break;
  734. case IOCPF_E_DISABLE:
  735. bfa_iocpf_timer_stop(ioc);
  736. bfa_ioc_hw_sem_release(ioc);
  737. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
  738. break;
  739. case IOCPF_E_FWREADY:
  740. bfa_ioc_send_enable(ioc);
  741. break;
  742. default:
  743. bfa_sm_fault(ioc, event);
  744. }
  745. }
  746. static void
  747. bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
  748. {
  749. bfa_ioc_pf_enabled(iocpf->ioc);
  750. }
  751. static void
  752. bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  753. {
  754. struct bfa_ioc_s *ioc = iocpf->ioc;
  755. bfa_trc(ioc, event);
  756. switch (event) {
  757. case IOCPF_E_DISABLE:
  758. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
  759. break;
  760. case IOCPF_E_GETATTRFAIL:
  761. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
  762. break;
  763. case IOCPF_E_FAIL:
  764. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  765. break;
  766. case IOCPF_E_FWREADY:
  767. if (bfa_ioc_is_operational(ioc))
  768. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  769. else
  770. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
  771. bfa_ioc_pf_failed(ioc);
  772. break;
  773. default:
  774. bfa_sm_fault(ioc, event);
  775. }
  776. }
  777. static void
  778. bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
  779. {
  780. bfa_iocpf_timer_start(iocpf->ioc);
  781. bfa_ioc_send_disable(iocpf->ioc);
  782. }
  783. /**
  784. * IOC is being disabled
  785. */
  786. static void
  787. bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  788. {
  789. struct bfa_ioc_s *ioc = iocpf->ioc;
  790. bfa_trc(ioc, event);
  791. switch (event) {
  792. case IOCPF_E_FWRSP_DISABLE:
  793. case IOCPF_E_FWREADY:
  794. bfa_iocpf_timer_stop(ioc);
  795. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  796. break;
  797. case IOCPF_E_FAIL:
  798. bfa_iocpf_timer_stop(ioc);
  799. /*
  800. * !!! fall through !!!
  801. */
  802. case IOCPF_E_TIMEOUT:
  803. bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
  804. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  805. break;
  806. case IOCPF_E_FWRSP_ENABLE:
  807. break;
  808. default:
  809. bfa_sm_fault(ioc, event);
  810. }
  811. }
  812. /**
  813. * IOC disable completion entry.
  814. */
  815. static void
  816. bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
  817. {
  818. bfa_ioc_pf_disabled(iocpf->ioc);
  819. }
  820. static void
  821. bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  822. {
  823. struct bfa_ioc_s *ioc = iocpf->ioc;
  824. bfa_trc(ioc, event);
  825. switch (event) {
  826. case IOCPF_E_ENABLE:
  827. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  828. break;
  829. case IOCPF_E_STOP:
  830. bfa_ioc_firmware_unlock(ioc);
  831. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  832. break;
  833. default:
  834. bfa_sm_fault(ioc, event);
  835. }
  836. }
  837. static void
  838. bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
  839. {
  840. bfa_iocpf_timer_start(iocpf->ioc);
  841. }
  842. /**
  843. * Hardware initialization failed.
  844. */
  845. static void
  846. bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  847. {
  848. struct bfa_ioc_s *ioc = iocpf->ioc;
  849. bfa_trc(ioc, event);
  850. switch (event) {
  851. case IOCPF_E_DISABLE:
  852. bfa_iocpf_timer_stop(ioc);
  853. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  854. break;
  855. case IOCPF_E_STOP:
  856. bfa_iocpf_timer_stop(ioc);
  857. bfa_ioc_firmware_unlock(ioc);
  858. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  859. break;
  860. case IOCPF_E_TIMEOUT:
  861. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  862. break;
  863. default:
  864. bfa_sm_fault(ioc, event);
  865. }
  866. }
  867. static void
  868. bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
  869. {
  870. /**
  871. * Mark IOC as failed in hardware and stop firmware.
  872. */
  873. bfa_ioc_lpu_stop(iocpf->ioc);
  874. bfa_reg_write(iocpf->ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
  875. /**
  876. * Notify other functions on HB failure.
  877. */
  878. bfa_ioc_notify_hbfail(iocpf->ioc);
  879. /**
  880. * Flush any queued up mailbox requests.
  881. */
  882. bfa_ioc_mbox_hbfail(iocpf->ioc);
  883. if (iocpf->auto_recover)
  884. bfa_iocpf_recovery_timer_start(iocpf->ioc);
  885. }
  886. /**
  887. * IOC is in failed state.
  888. */
  889. static void
  890. bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  891. {
  892. struct bfa_ioc_s *ioc = iocpf->ioc;
  893. bfa_trc(ioc, event);
  894. switch (event) {
  895. case IOCPF_E_DISABLE:
  896. if (iocpf->auto_recover)
  897. bfa_iocpf_timer_stop(ioc);
  898. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  899. break;
  900. case IOCPF_E_TIMEOUT:
  901. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  902. break;
  903. default:
  904. bfa_sm_fault(ioc, event);
  905. }
  906. }
  907. /**
  908. * hal_ioc_pvt BFA IOC private functions
  909. */
  910. static void
  911. bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
  912. {
  913. struct list_head *qe;
  914. struct bfa_ioc_hbfail_notify_s *notify;
  915. ioc->cbfn->disable_cbfn(ioc->bfa);
  916. /**
  917. * Notify common modules registered for notification.
  918. */
  919. list_for_each(qe, &ioc->hb_notify_q) {
  920. notify = (struct bfa_ioc_hbfail_notify_s *) qe;
  921. notify->cbfn(notify->cbarg);
  922. }
  923. }
  924. bfa_boolean_t
  925. bfa_ioc_sem_get(bfa_os_addr_t sem_reg)
  926. {
  927. u32 r32;
  928. int cnt = 0;
  929. #define BFA_SEM_SPINCNT 3000
  930. r32 = bfa_reg_read(sem_reg);
  931. while (r32 && (cnt < BFA_SEM_SPINCNT)) {
  932. cnt++;
  933. bfa_os_udelay(2);
  934. r32 = bfa_reg_read(sem_reg);
  935. }
  936. if (r32 == 0)
  937. return BFA_TRUE;
  938. bfa_assert(cnt < BFA_SEM_SPINCNT);
  939. return BFA_FALSE;
  940. }
  941. void
  942. bfa_ioc_sem_release(bfa_os_addr_t sem_reg)
  943. {
  944. bfa_reg_write(sem_reg, 1);
  945. }
  946. static void
  947. bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
  948. {
  949. u32 r32;
  950. /**
  951. * First read to the semaphore register will return 0, subsequent reads
  952. * will return 1. Semaphore is released by writing 1 to the register
  953. */
  954. r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
  955. if (r32 == 0) {
  956. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
  957. return;
  958. }
  959. bfa_sem_timer_start(ioc);
  960. }
  961. void
  962. bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
  963. {
  964. bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1);
  965. }
  966. static void
  967. bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
  968. {
  969. bfa_sem_timer_stop(ioc);
  970. }
  971. /**
  972. * Initialize LPU local memory (aka secondary memory / SRAM)
  973. */
  974. static void
  975. bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
  976. {
  977. u32 pss_ctl;
  978. int i;
  979. #define PSS_LMEM_INIT_TIME 10000
  980. pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
  981. pss_ctl &= ~__PSS_LMEM_RESET;
  982. pss_ctl |= __PSS_LMEM_INIT_EN;
  983. /*
  984. * i2c workaround 12.5khz clock
  985. */
  986. pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
  987. bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
  988. /**
  989. * wait for memory initialization to be complete
  990. */
  991. i = 0;
  992. do {
  993. pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
  994. i++;
  995. } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
  996. /**
  997. * If memory initialization is not successful, IOC timeout will catch
  998. * such failures.
  999. */
  1000. bfa_assert(pss_ctl & __PSS_LMEM_INIT_DONE);
  1001. bfa_trc(ioc, pss_ctl);
  1002. pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
  1003. bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
  1004. }
  1005. static void
  1006. bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
  1007. {
  1008. u32 pss_ctl;
  1009. /**
  1010. * Take processor out of reset.
  1011. */
  1012. pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
  1013. pss_ctl &= ~__PSS_LPU0_RESET;
  1014. bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
  1015. }
  1016. static void
  1017. bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
  1018. {
  1019. u32 pss_ctl;
  1020. /**
  1021. * Put processors in reset.
  1022. */
  1023. pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
  1024. pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
  1025. bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
  1026. }
  1027. /**
  1028. * Get driver and firmware versions.
  1029. */
  1030. void
  1031. bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
  1032. {
  1033. u32 pgnum, pgoff;
  1034. u32 loff = 0;
  1035. int i;
  1036. u32 *fwsig = (u32 *) fwhdr;
  1037. pgnum = bfa_ioc_smem_pgnum(ioc, loff);
  1038. pgoff = bfa_ioc_smem_pgoff(ioc, loff);
  1039. bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
  1040. for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
  1041. i++) {
  1042. fwsig[i] =
  1043. bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
  1044. loff += sizeof(u32);
  1045. }
  1046. }
  1047. /**
  1048. * Returns TRUE if same.
  1049. */
  1050. bfa_boolean_t
  1051. bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
  1052. {
  1053. struct bfi_ioc_image_hdr_s *drv_fwhdr;
  1054. int i;
  1055. drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
  1056. bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
  1057. for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
  1058. if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
  1059. bfa_trc(ioc, i);
  1060. bfa_trc(ioc, fwhdr->md5sum[i]);
  1061. bfa_trc(ioc, drv_fwhdr->md5sum[i]);
  1062. return BFA_FALSE;
  1063. }
  1064. }
  1065. bfa_trc(ioc, fwhdr->md5sum[0]);
  1066. return BFA_TRUE;
  1067. }
  1068. /**
  1069. * Return true if current running version is valid. Firmware signature and
  1070. * execution context (driver/bios) must match.
  1071. */
  1072. static bfa_boolean_t
  1073. bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
  1074. {
  1075. struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
  1076. /**
  1077. * If bios/efi boot (flash based) -- return true
  1078. */
  1079. if (bfa_ioc_is_bios_optrom(ioc))
  1080. return BFA_TRUE;
  1081. bfa_ioc_fwver_get(ioc, &fwhdr);
  1082. drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
  1083. bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
  1084. if (fwhdr.signature != drv_fwhdr->signature) {
  1085. bfa_trc(ioc, fwhdr.signature);
  1086. bfa_trc(ioc, drv_fwhdr->signature);
  1087. return BFA_FALSE;
  1088. }
  1089. if (bfa_os_swap32(fwhdr.param) != boot_env) {
  1090. bfa_trc(ioc, fwhdr.param);
  1091. bfa_trc(ioc, boot_env);
  1092. return BFA_FALSE;
  1093. }
  1094. return bfa_ioc_fwver_cmp(ioc, &fwhdr);
  1095. }
  1096. /**
  1097. * Conditionally flush any pending message from firmware at start.
  1098. */
  1099. static void
  1100. bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
  1101. {
  1102. u32 r32;
  1103. r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
  1104. if (r32)
  1105. bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
  1106. }
  1107. static void
  1108. bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
  1109. {
  1110. enum bfi_ioc_state ioc_fwstate;
  1111. bfa_boolean_t fwvalid;
  1112. u32 boot_type;
  1113. u32 boot_env;
  1114. ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
  1115. if (force)
  1116. ioc_fwstate = BFI_IOC_UNINIT;
  1117. bfa_trc(ioc, ioc_fwstate);
  1118. boot_type = BFI_BOOT_TYPE_NORMAL;
  1119. boot_env = BFI_BOOT_LOADER_OS;
  1120. /**
  1121. * Flash based firmware boot BIOS env.
  1122. */
  1123. if (bfa_ioc_is_bios_optrom(ioc)) {
  1124. boot_type = BFI_BOOT_TYPE_FLASH;
  1125. boot_env = BFI_BOOT_LOADER_BIOS;
  1126. }
  1127. /**
  1128. * Flash based firmware boot UEFI env.
  1129. */
  1130. if (bfa_ioc_is_uefi(ioc)) {
  1131. boot_type = BFI_BOOT_TYPE_FLASH;
  1132. boot_env = BFI_BOOT_LOADER_UEFI;
  1133. }
  1134. /**
  1135. * check if firmware is valid
  1136. */
  1137. fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
  1138. BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
  1139. if (!fwvalid) {
  1140. bfa_ioc_boot(ioc, boot_type, boot_env);
  1141. return;
  1142. }
  1143. /**
  1144. * If hardware initialization is in progress (initialized by other IOC),
  1145. * just wait for an initialization completion interrupt.
  1146. */
  1147. if (ioc_fwstate == BFI_IOC_INITING) {
  1148. ioc->cbfn->reset_cbfn(ioc->bfa);
  1149. return;
  1150. }
  1151. /**
  1152. * If IOC function is disabled and firmware version is same,
  1153. * just re-enable IOC.
  1154. *
  1155. * If option rom, IOC must not be in operational state. With
  1156. * convergence, IOC will be in operational state when 2nd driver
  1157. * is loaded.
  1158. */
  1159. if (ioc_fwstate == BFI_IOC_DISABLED ||
  1160. (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
  1161. /**
  1162. * When using MSI-X any pending firmware ready event should
  1163. * be flushed. Otherwise MSI-X interrupts are not delivered.
  1164. */
  1165. bfa_ioc_msgflush(ioc);
  1166. ioc->cbfn->reset_cbfn(ioc->bfa);
  1167. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
  1168. return;
  1169. }
  1170. /**
  1171. * Initialize the h/w for any other states.
  1172. */
  1173. bfa_ioc_boot(ioc, boot_type, boot_env);
  1174. }
  1175. static void
  1176. bfa_ioc_timeout(void *ioc_arg)
  1177. {
  1178. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  1179. bfa_trc(ioc, 0);
  1180. bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
  1181. }
  1182. void
  1183. bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
  1184. {
  1185. u32 *msgp = (u32 *) ioc_msg;
  1186. u32 i;
  1187. bfa_trc(ioc, msgp[0]);
  1188. bfa_trc(ioc, len);
  1189. bfa_assert(len <= BFI_IOC_MSGLEN_MAX);
  1190. /*
  1191. * first write msg to mailbox registers
  1192. */
  1193. for (i = 0; i < len / sizeof(u32); i++)
  1194. bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32),
  1195. bfa_os_wtole(msgp[i]));
  1196. for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
  1197. bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 0);
  1198. /*
  1199. * write 1 to mailbox CMD to trigger LPU event
  1200. */
  1201. bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1);
  1202. (void) bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
  1203. }
  1204. static void
  1205. bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
  1206. {
  1207. struct bfi_ioc_ctrl_req_s enable_req;
  1208. struct bfa_timeval_s tv;
  1209. bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
  1210. bfa_ioc_portid(ioc));
  1211. enable_req.ioc_class = ioc->ioc_mc;
  1212. bfa_os_gettimeofday(&tv);
  1213. enable_req.tv_sec = bfa_os_ntohl(tv.tv_sec);
  1214. bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
  1215. }
  1216. static void
  1217. bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
  1218. {
  1219. struct bfi_ioc_ctrl_req_s disable_req;
  1220. bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
  1221. bfa_ioc_portid(ioc));
  1222. bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
  1223. }
  1224. static void
  1225. bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
  1226. {
  1227. struct bfi_ioc_getattr_req_s attr_req;
  1228. bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
  1229. bfa_ioc_portid(ioc));
  1230. bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
  1231. bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
  1232. }
  1233. static void
  1234. bfa_ioc_hb_check(void *cbarg)
  1235. {
  1236. struct bfa_ioc_s *ioc = cbarg;
  1237. u32 hb_count;
  1238. hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
  1239. if (ioc->hb_count == hb_count) {
  1240. printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count);
  1241. bfa_ioc_recover(ioc);
  1242. return;
  1243. } else {
  1244. ioc->hb_count = hb_count;
  1245. }
  1246. bfa_ioc_mbox_poll(ioc);
  1247. bfa_hb_timer_start(ioc);
  1248. }
  1249. static void
  1250. bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
  1251. {
  1252. ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
  1253. bfa_hb_timer_start(ioc);
  1254. }
  1255. static void
  1256. bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
  1257. {
  1258. bfa_hb_timer_stop(ioc);
  1259. }
  1260. /**
  1261. * Initiate a full firmware download.
  1262. */
  1263. static void
  1264. bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
  1265. u32 boot_env)
  1266. {
  1267. u32 *fwimg;
  1268. u32 pgnum, pgoff;
  1269. u32 loff = 0;
  1270. u32 chunkno = 0;
  1271. u32 i;
  1272. /**
  1273. * Initialize LMEM first before code download
  1274. */
  1275. bfa_ioc_lmem_init(ioc);
  1276. bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
  1277. fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
  1278. pgnum = bfa_ioc_smem_pgnum(ioc, loff);
  1279. pgoff = bfa_ioc_smem_pgoff(ioc, loff);
  1280. bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
  1281. for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
  1282. if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
  1283. chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
  1284. fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
  1285. BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
  1286. }
  1287. /**
  1288. * write smem
  1289. */
  1290. bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
  1291. fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
  1292. loff += sizeof(u32);
  1293. /**
  1294. * handle page offset wrap around
  1295. */
  1296. loff = PSS_SMEM_PGOFF(loff);
  1297. if (loff == 0) {
  1298. pgnum++;
  1299. bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
  1300. pgnum);
  1301. }
  1302. }
  1303. bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
  1304. bfa_ioc_smem_pgnum(ioc, 0));
  1305. /*
  1306. * Set boot type and boot param at the end.
  1307. */
  1308. bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
  1309. bfa_os_swap32(boot_type));
  1310. bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
  1311. bfa_os_swap32(boot_env));
  1312. }
  1313. static void
  1314. bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
  1315. {
  1316. bfa_ioc_hwinit(ioc, force);
  1317. }
  1318. /**
  1319. * Update BFA configuration from firmware configuration.
  1320. */
  1321. static void
  1322. bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
  1323. {
  1324. struct bfi_ioc_attr_s *attr = ioc->attr;
  1325. attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop);
  1326. attr->card_type = bfa_os_ntohl(attr->card_type);
  1327. attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize);
  1328. bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
  1329. }
  1330. /**
  1331. * Attach time initialization of mbox logic.
  1332. */
  1333. static void
  1334. bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
  1335. {
  1336. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1337. int mc;
  1338. INIT_LIST_HEAD(&mod->cmd_q);
  1339. for (mc = 0; mc < BFI_MC_MAX; mc++) {
  1340. mod->mbhdlr[mc].cbfn = NULL;
  1341. mod->mbhdlr[mc].cbarg = ioc->bfa;
  1342. }
  1343. }
  1344. /**
  1345. * Mbox poll timer -- restarts any pending mailbox requests.
  1346. */
  1347. static void
  1348. bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
  1349. {
  1350. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1351. struct bfa_mbox_cmd_s *cmd;
  1352. u32 stat;
  1353. /**
  1354. * If no command pending, do nothing
  1355. */
  1356. if (list_empty(&mod->cmd_q))
  1357. return;
  1358. /**
  1359. * If previous command is not yet fetched by firmware, do nothing
  1360. */
  1361. stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
  1362. if (stat)
  1363. return;
  1364. /**
  1365. * Enqueue command to firmware.
  1366. */
  1367. bfa_q_deq(&mod->cmd_q, &cmd);
  1368. bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
  1369. }
  1370. /**
  1371. * Cleanup any pending requests.
  1372. */
  1373. static void
  1374. bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
  1375. {
  1376. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1377. struct bfa_mbox_cmd_s *cmd;
  1378. while (!list_empty(&mod->cmd_q))
  1379. bfa_q_deq(&mod->cmd_q, &cmd);
  1380. }
  1381. /**
  1382. * Read data from SMEM to host through PCI memmap
  1383. *
  1384. * @param[in] ioc memory for IOC
  1385. * @param[in] tbuf app memory to store data from smem
  1386. * @param[in] soff smem offset
  1387. * @param[in] sz size of smem in bytes
  1388. */
  1389. static bfa_status_t
  1390. bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
  1391. {
  1392. u32 pgnum, loff, r32;
  1393. int i, len;
  1394. u32 *buf = tbuf;
  1395. pgnum = bfa_ioc_smem_pgnum(ioc, soff);
  1396. loff = bfa_ioc_smem_pgoff(ioc, soff);
  1397. bfa_trc(ioc, pgnum);
  1398. bfa_trc(ioc, loff);
  1399. bfa_trc(ioc, sz);
  1400. /*
  1401. * Hold semaphore to serialize pll init and fwtrc.
  1402. */
  1403. if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
  1404. bfa_trc(ioc, 0);
  1405. return BFA_STATUS_FAILED;
  1406. }
  1407. bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
  1408. len = sz/sizeof(u32);
  1409. bfa_trc(ioc, len);
  1410. for (i = 0; i < len; i++) {
  1411. r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
  1412. buf[i] = bfa_os_ntohl(r32);
  1413. loff += sizeof(u32);
  1414. /**
  1415. * handle page offset wrap around
  1416. */
  1417. loff = PSS_SMEM_PGOFF(loff);
  1418. if (loff == 0) {
  1419. pgnum++;
  1420. bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
  1421. }
  1422. }
  1423. bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
  1424. bfa_ioc_smem_pgnum(ioc, 0));
  1425. /*
  1426. * release semaphore.
  1427. */
  1428. bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
  1429. bfa_trc(ioc, pgnum);
  1430. return BFA_STATUS_OK;
  1431. }
  1432. /**
  1433. * Clear SMEM data from host through PCI memmap
  1434. *
  1435. * @param[in] ioc memory for IOC
  1436. * @param[in] soff smem offset
  1437. * @param[in] sz size of smem in bytes
  1438. */
  1439. static bfa_status_t
  1440. bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
  1441. {
  1442. int i, len;
  1443. u32 pgnum, loff;
  1444. pgnum = bfa_ioc_smem_pgnum(ioc, soff);
  1445. loff = bfa_ioc_smem_pgoff(ioc, soff);
  1446. bfa_trc(ioc, pgnum);
  1447. bfa_trc(ioc, loff);
  1448. bfa_trc(ioc, sz);
  1449. /*
  1450. * Hold semaphore to serialize pll init and fwtrc.
  1451. */
  1452. if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
  1453. bfa_trc(ioc, 0);
  1454. return BFA_STATUS_FAILED;
  1455. }
  1456. bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
  1457. len = sz/sizeof(u32); /* len in words */
  1458. bfa_trc(ioc, len);
  1459. for (i = 0; i < len; i++) {
  1460. bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
  1461. loff += sizeof(u32);
  1462. /**
  1463. * handle page offset wrap around
  1464. */
  1465. loff = PSS_SMEM_PGOFF(loff);
  1466. if (loff == 0) {
  1467. pgnum++;
  1468. bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
  1469. }
  1470. }
  1471. bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
  1472. bfa_ioc_smem_pgnum(ioc, 0));
  1473. /*
  1474. * release semaphore.
  1475. */
  1476. bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
  1477. bfa_trc(ioc, pgnum);
  1478. return BFA_STATUS_OK;
  1479. }
  1480. /**
  1481. * hal iocpf to ioc interface
  1482. */
  1483. static void
  1484. bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc)
  1485. {
  1486. bfa_fsm_send_event(ioc, IOC_E_ENABLED);
  1487. }
  1488. static void
  1489. bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc)
  1490. {
  1491. bfa_fsm_send_event(ioc, IOC_E_DISABLED);
  1492. }
  1493. static void
  1494. bfa_ioc_pf_failed(struct bfa_ioc_s *ioc)
  1495. {
  1496. bfa_fsm_send_event(ioc, IOC_E_FAILED);
  1497. }
  1498. static void
  1499. bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
  1500. {
  1501. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  1502. /**
  1503. * Provide enable completion callback.
  1504. */
  1505. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  1506. BFA_LOG(KERN_WARNING, bfad, log_level,
  1507. "Running firmware version is incompatible "
  1508. "with the driver version\n");
  1509. }
  1510. /**
  1511. * hal_ioc_public
  1512. */
  1513. bfa_status_t
  1514. bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
  1515. {
  1516. /*
  1517. * Hold semaphore so that nobody can access the chip during init.
  1518. */
  1519. bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
  1520. bfa_ioc_pll_init_asic(ioc);
  1521. ioc->pllinit = BFA_TRUE;
  1522. /*
  1523. * release semaphore.
  1524. */
  1525. bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
  1526. return BFA_STATUS_OK;
  1527. }
  1528. /**
  1529. * Interface used by diag module to do firmware boot with memory test
  1530. * as the entry vector.
  1531. */
  1532. void
  1533. bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
  1534. {
  1535. bfa_os_addr_t rb;
  1536. bfa_ioc_stats(ioc, ioc_boots);
  1537. if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
  1538. return;
  1539. /**
  1540. * Initialize IOC state of all functions on a chip reset.
  1541. */
  1542. rb = ioc->pcidev.pci_bar_kva;
  1543. if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
  1544. bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST);
  1545. bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST);
  1546. } else {
  1547. bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_INITING);
  1548. bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING);
  1549. }
  1550. bfa_ioc_msgflush(ioc);
  1551. bfa_ioc_download_fw(ioc, boot_type, boot_env);
  1552. /**
  1553. * Enable interrupts just before starting LPU
  1554. */
  1555. ioc->cbfn->reset_cbfn(ioc->bfa);
  1556. bfa_ioc_lpu_start(ioc);
  1557. }
  1558. /**
  1559. * Enable/disable IOC failure auto recovery.
  1560. */
  1561. void
  1562. bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
  1563. {
  1564. bfa_auto_recover = auto_recover;
  1565. }
  1566. bfa_boolean_t
  1567. bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
  1568. {
  1569. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
  1570. }
  1571. bfa_boolean_t
  1572. bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
  1573. {
  1574. u32 r32 = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
  1575. return ((r32 != BFI_IOC_UNINIT) &&
  1576. (r32 != BFI_IOC_INITING) &&
  1577. (r32 != BFI_IOC_MEMTEST));
  1578. }
  1579. void
  1580. bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
  1581. {
  1582. u32 *msgp = mbmsg;
  1583. u32 r32;
  1584. int i;
  1585. /**
  1586. * read the MBOX msg
  1587. */
  1588. for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
  1589. i++) {
  1590. r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox +
  1591. i * sizeof(u32));
  1592. msgp[i] = bfa_os_htonl(r32);
  1593. }
  1594. /**
  1595. * turn off mailbox interrupt by clearing mailbox status
  1596. */
  1597. bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
  1598. bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
  1599. }
  1600. void
  1601. bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
  1602. {
  1603. union bfi_ioc_i2h_msg_u *msg;
  1604. struct bfa_iocpf_s *iocpf = &ioc->iocpf;
  1605. msg = (union bfi_ioc_i2h_msg_u *) m;
  1606. bfa_ioc_stats(ioc, ioc_isrs);
  1607. switch (msg->mh.msg_id) {
  1608. case BFI_IOC_I2H_HBEAT:
  1609. break;
  1610. case BFI_IOC_I2H_READY_EVENT:
  1611. bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
  1612. break;
  1613. case BFI_IOC_I2H_ENABLE_REPLY:
  1614. bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
  1615. break;
  1616. case BFI_IOC_I2H_DISABLE_REPLY:
  1617. bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
  1618. break;
  1619. case BFI_IOC_I2H_GETATTR_REPLY:
  1620. bfa_ioc_getattr_reply(ioc);
  1621. break;
  1622. default:
  1623. bfa_trc(ioc, msg->mh.msg_id);
  1624. bfa_assert(0);
  1625. }
  1626. }
  1627. /**
  1628. * IOC attach time initialization and setup.
  1629. *
  1630. * @param[in] ioc memory for IOC
  1631. * @param[in] bfa driver instance structure
  1632. */
  1633. void
  1634. bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
  1635. struct bfa_timer_mod_s *timer_mod)
  1636. {
  1637. ioc->bfa = bfa;
  1638. ioc->cbfn = cbfn;
  1639. ioc->timer_mod = timer_mod;
  1640. ioc->fcmode = BFA_FALSE;
  1641. ioc->pllinit = BFA_FALSE;
  1642. ioc->dbg_fwsave_once = BFA_TRUE;
  1643. ioc->iocpf.ioc = ioc;
  1644. bfa_ioc_mbox_attach(ioc);
  1645. INIT_LIST_HEAD(&ioc->hb_notify_q);
  1646. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  1647. bfa_fsm_send_event(ioc, IOC_E_RESET);
  1648. }
  1649. /**
  1650. * Driver detach time IOC cleanup.
  1651. */
  1652. void
  1653. bfa_ioc_detach(struct bfa_ioc_s *ioc)
  1654. {
  1655. bfa_fsm_send_event(ioc, IOC_E_DETACH);
  1656. }
  1657. /**
  1658. * Setup IOC PCI properties.
  1659. *
  1660. * @param[in] pcidev PCI device information for this IOC
  1661. */
  1662. void
  1663. bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
  1664. enum bfi_mclass mc)
  1665. {
  1666. ioc->ioc_mc = mc;
  1667. ioc->pcidev = *pcidev;
  1668. ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
  1669. ioc->cna = ioc->ctdev && !ioc->fcmode;
  1670. /**
  1671. * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
  1672. */
  1673. if (ioc->ctdev)
  1674. bfa_ioc_set_ct_hwif(ioc);
  1675. else
  1676. bfa_ioc_set_cb_hwif(ioc);
  1677. bfa_ioc_map_port(ioc);
  1678. bfa_ioc_reg_init(ioc);
  1679. }
  1680. /**
  1681. * Initialize IOC dma memory
  1682. *
  1683. * @param[in] dm_kva kernel virtual address of IOC dma memory
  1684. * @param[in] dm_pa physical address of IOC dma memory
  1685. */
  1686. void
  1687. bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
  1688. {
  1689. /**
  1690. * dma memory for firmware attribute
  1691. */
  1692. ioc->attr_dma.kva = dm_kva;
  1693. ioc->attr_dma.pa = dm_pa;
  1694. ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
  1695. }
  1696. /**
  1697. * Return size of dma memory required.
  1698. */
  1699. u32
  1700. bfa_ioc_meminfo(void)
  1701. {
  1702. return BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
  1703. }
  1704. void
  1705. bfa_ioc_enable(struct bfa_ioc_s *ioc)
  1706. {
  1707. bfa_ioc_stats(ioc, ioc_enables);
  1708. ioc->dbg_fwsave_once = BFA_TRUE;
  1709. bfa_fsm_send_event(ioc, IOC_E_ENABLE);
  1710. }
  1711. void
  1712. bfa_ioc_disable(struct bfa_ioc_s *ioc)
  1713. {
  1714. bfa_ioc_stats(ioc, ioc_disables);
  1715. bfa_fsm_send_event(ioc, IOC_E_DISABLE);
  1716. }
  1717. /**
  1718. * Returns memory required for saving firmware trace in case of crash.
  1719. * Driver must call this interface to allocate memory required for
  1720. * automatic saving of firmware trace. Driver should call
  1721. * bfa_ioc_debug_memclaim() right after bfa_ioc_attach() to setup this
  1722. * trace memory.
  1723. */
  1724. int
  1725. bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
  1726. {
  1727. return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
  1728. }
  1729. /**
  1730. * Initialize memory for saving firmware trace. Driver must initialize
  1731. * trace memory before call bfa_ioc_enable().
  1732. */
  1733. void
  1734. bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
  1735. {
  1736. ioc->dbg_fwsave = dbg_fwsave;
  1737. ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->iocpf.auto_recover);
  1738. }
  1739. u32
  1740. bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr)
  1741. {
  1742. return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
  1743. }
  1744. u32
  1745. bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
  1746. {
  1747. return PSS_SMEM_PGOFF(fmaddr);
  1748. }
  1749. /**
  1750. * Register mailbox message handler functions
  1751. *
  1752. * @param[in] ioc IOC instance
  1753. * @param[in] mcfuncs message class handler functions
  1754. */
  1755. void
  1756. bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
  1757. {
  1758. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1759. int mc;
  1760. for (mc = 0; mc < BFI_MC_MAX; mc++)
  1761. mod->mbhdlr[mc].cbfn = mcfuncs[mc];
  1762. }
  1763. /**
  1764. * Register mailbox message handler function, to be called by common modules
  1765. */
  1766. void
  1767. bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
  1768. bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
  1769. {
  1770. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1771. mod->mbhdlr[mc].cbfn = cbfn;
  1772. mod->mbhdlr[mc].cbarg = cbarg;
  1773. }
  1774. /**
  1775. * Queue a mailbox command request to firmware. Waits if mailbox is busy.
  1776. * Responsibility of caller to serialize
  1777. *
  1778. * @param[in] ioc IOC instance
  1779. * @param[i] cmd Mailbox command
  1780. */
  1781. void
  1782. bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
  1783. {
  1784. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1785. u32 stat;
  1786. /**
  1787. * If a previous command is pending, queue new command
  1788. */
  1789. if (!list_empty(&mod->cmd_q)) {
  1790. list_add_tail(&cmd->qe, &mod->cmd_q);
  1791. return;
  1792. }
  1793. /**
  1794. * If mailbox is busy, queue command for poll timer
  1795. */
  1796. stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
  1797. if (stat) {
  1798. list_add_tail(&cmd->qe, &mod->cmd_q);
  1799. return;
  1800. }
  1801. /**
  1802. * mailbox is free -- queue command to firmware
  1803. */
  1804. bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
  1805. }
  1806. /**
  1807. * Handle mailbox interrupts
  1808. */
  1809. void
  1810. bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
  1811. {
  1812. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1813. struct bfi_mbmsg_s m;
  1814. int mc;
  1815. bfa_ioc_msgget(ioc, &m);
  1816. /**
  1817. * Treat IOC message class as special.
  1818. */
  1819. mc = m.mh.msg_class;
  1820. if (mc == BFI_MC_IOC) {
  1821. bfa_ioc_isr(ioc, &m);
  1822. return;
  1823. }
  1824. if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
  1825. return;
  1826. mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
  1827. }
  1828. void
  1829. bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
  1830. {
  1831. bfa_fsm_send_event(ioc, IOC_E_HWERROR);
  1832. }
  1833. void
  1834. bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
  1835. {
  1836. ioc->fcmode = BFA_TRUE;
  1837. ioc->port_id = bfa_ioc_pcifn(ioc);
  1838. }
  1839. /**
  1840. * return true if IOC is disabled
  1841. */
  1842. bfa_boolean_t
  1843. bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
  1844. {
  1845. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
  1846. bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
  1847. }
  1848. /**
  1849. * return true if IOC firmware is different.
  1850. */
  1851. bfa_boolean_t
  1852. bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
  1853. {
  1854. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
  1855. bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
  1856. bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
  1857. }
  1858. #define bfa_ioc_state_disabled(__sm) \
  1859. (((__sm) == BFI_IOC_UNINIT) || \
  1860. ((__sm) == BFI_IOC_INITING) || \
  1861. ((__sm) == BFI_IOC_HWINIT) || \
  1862. ((__sm) == BFI_IOC_DISABLED) || \
  1863. ((__sm) == BFI_IOC_FAIL) || \
  1864. ((__sm) == BFI_IOC_CFG_DISABLED))
  1865. /**
  1866. * Check if adapter is disabled -- both IOCs should be in a disabled
  1867. * state.
  1868. */
  1869. bfa_boolean_t
  1870. bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
  1871. {
  1872. u32 ioc_state;
  1873. bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
  1874. if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
  1875. return BFA_FALSE;
  1876. ioc_state = bfa_reg_read(rb + BFA_IOC0_STATE_REG);
  1877. if (!bfa_ioc_state_disabled(ioc_state))
  1878. return BFA_FALSE;
  1879. if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
  1880. ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG);
  1881. if (!bfa_ioc_state_disabled(ioc_state))
  1882. return BFA_FALSE;
  1883. }
  1884. return BFA_TRUE;
  1885. }
  1886. /**
  1887. * Add to IOC heartbeat failure notification queue. To be used by common
  1888. * modules such as cee, port, diag.
  1889. */
  1890. void
  1891. bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
  1892. struct bfa_ioc_hbfail_notify_s *notify)
  1893. {
  1894. list_add_tail(&notify->qe, &ioc->hb_notify_q);
  1895. }
  1896. #define BFA_MFG_NAME "Brocade"
  1897. void
  1898. bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
  1899. struct bfa_adapter_attr_s *ad_attr)
  1900. {
  1901. struct bfi_ioc_attr_s *ioc_attr;
  1902. ioc_attr = ioc->attr;
  1903. bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
  1904. bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
  1905. bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
  1906. bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
  1907. bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd,
  1908. sizeof(struct bfa_mfg_vpd_s));
  1909. ad_attr->nports = bfa_ioc_get_nports(ioc);
  1910. ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
  1911. bfa_ioc_get_adapter_model(ioc, ad_attr->model);
  1912. /* For now, model descr uses same model string */
  1913. bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
  1914. ad_attr->card_type = ioc_attr->card_type;
  1915. ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
  1916. if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
  1917. ad_attr->prototype = 1;
  1918. else
  1919. ad_attr->prototype = 0;
  1920. ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
  1921. ad_attr->mac = bfa_ioc_get_mac(ioc);
  1922. ad_attr->pcie_gen = ioc_attr->pcie_gen;
  1923. ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
  1924. ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
  1925. ad_attr->asic_rev = ioc_attr->asic_rev;
  1926. bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
  1927. ad_attr->cna_capable = ioc->cna;
  1928. ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
  1929. }
  1930. enum bfa_ioc_type_e
  1931. bfa_ioc_get_type(struct bfa_ioc_s *ioc)
  1932. {
  1933. if (!ioc->ctdev || ioc->fcmode)
  1934. return BFA_IOC_TYPE_FC;
  1935. else if (ioc->ioc_mc == BFI_MC_IOCFC)
  1936. return BFA_IOC_TYPE_FCoE;
  1937. else if (ioc->ioc_mc == BFI_MC_LL)
  1938. return BFA_IOC_TYPE_LL;
  1939. else {
  1940. bfa_assert(ioc->ioc_mc == BFI_MC_LL);
  1941. return BFA_IOC_TYPE_LL;
  1942. }
  1943. }
  1944. void
  1945. bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
  1946. {
  1947. bfa_os_memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
  1948. bfa_os_memcpy((void *)serial_num,
  1949. (void *)ioc->attr->brcd_serialnum,
  1950. BFA_ADAPTER_SERIAL_NUM_LEN);
  1951. }
  1952. void
  1953. bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
  1954. {
  1955. bfa_os_memset((void *)fw_ver, 0, BFA_VERSION_LEN);
  1956. bfa_os_memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
  1957. }
  1958. void
  1959. bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
  1960. {
  1961. bfa_assert(chip_rev);
  1962. bfa_os_memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
  1963. chip_rev[0] = 'R';
  1964. chip_rev[1] = 'e';
  1965. chip_rev[2] = 'v';
  1966. chip_rev[3] = '-';
  1967. chip_rev[4] = ioc->attr->asic_rev;
  1968. chip_rev[5] = '\0';
  1969. }
  1970. void
  1971. bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
  1972. {
  1973. bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
  1974. bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version,
  1975. BFA_VERSION_LEN);
  1976. }
  1977. void
  1978. bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
  1979. {
  1980. bfa_os_memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
  1981. bfa_os_memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
  1982. }
  1983. void
  1984. bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
  1985. {
  1986. struct bfi_ioc_attr_s *ioc_attr;
  1987. bfa_assert(model);
  1988. bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
  1989. ioc_attr = ioc->attr;
  1990. /**
  1991. * model name
  1992. */
  1993. bfa_os_snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
  1994. BFA_MFG_NAME, ioc_attr->card_type);
  1995. }
  1996. enum bfa_ioc_state
  1997. bfa_ioc_get_state(struct bfa_ioc_s *ioc)
  1998. {
  1999. enum bfa_iocpf_state iocpf_st;
  2000. enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
  2001. if (ioc_st == BFA_IOC_ENABLING ||
  2002. ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
  2003. iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
  2004. switch (iocpf_st) {
  2005. case BFA_IOCPF_SEMWAIT:
  2006. ioc_st = BFA_IOC_SEMWAIT;
  2007. break;
  2008. case BFA_IOCPF_HWINIT:
  2009. ioc_st = BFA_IOC_HWINIT;
  2010. break;
  2011. case BFA_IOCPF_FWMISMATCH:
  2012. ioc_st = BFA_IOC_FWMISMATCH;
  2013. break;
  2014. case BFA_IOCPF_FAIL:
  2015. ioc_st = BFA_IOC_FAIL;
  2016. break;
  2017. case BFA_IOCPF_INITFAIL:
  2018. ioc_st = BFA_IOC_INITFAIL;
  2019. break;
  2020. default:
  2021. break;
  2022. }
  2023. }
  2024. return ioc_st;
  2025. }
  2026. void
  2027. bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
  2028. {
  2029. bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
  2030. ioc_attr->state = bfa_ioc_get_state(ioc);
  2031. ioc_attr->port_id = ioc->port_id;
  2032. ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
  2033. bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
  2034. ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
  2035. ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
  2036. bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
  2037. }
  2038. /**
  2039. * hal_wwn_public
  2040. */
  2041. wwn_t
  2042. bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc)
  2043. {
  2044. return ioc->attr->pwwn;
  2045. }
  2046. wwn_t
  2047. bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc)
  2048. {
  2049. return ioc->attr->nwwn;
  2050. }
  2051. u64
  2052. bfa_ioc_get_adid(struct bfa_ioc_s *ioc)
  2053. {
  2054. return ioc->attr->mfg_pwwn;
  2055. }
  2056. mac_t
  2057. bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
  2058. {
  2059. /*
  2060. * Check the IOC type and return the appropriate MAC
  2061. */
  2062. if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
  2063. return ioc->attr->fcoe_mac;
  2064. else
  2065. return ioc->attr->mac;
  2066. }
  2067. wwn_t
  2068. bfa_ioc_get_mfg_pwwn(struct bfa_ioc_s *ioc)
  2069. {
  2070. return ioc->attr->mfg_pwwn;
  2071. }
  2072. wwn_t
  2073. bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc)
  2074. {
  2075. return ioc->attr->mfg_nwwn;
  2076. }
  2077. mac_t
  2078. bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
  2079. {
  2080. mac_t m;
  2081. m = ioc->attr->mfg_mac;
  2082. if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
  2083. m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
  2084. else
  2085. bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
  2086. bfa_ioc_pcifn(ioc));
  2087. return m;
  2088. }
  2089. bfa_boolean_t
  2090. bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
  2091. {
  2092. return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
  2093. }
  2094. /**
  2095. * Retrieve saved firmware trace from a prior IOC failure.
  2096. */
  2097. bfa_status_t
  2098. bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
  2099. {
  2100. int tlen;
  2101. if (ioc->dbg_fwsave_len == 0)
  2102. return BFA_STATUS_ENOFSAVE;
  2103. tlen = *trclen;
  2104. if (tlen > ioc->dbg_fwsave_len)
  2105. tlen = ioc->dbg_fwsave_len;
  2106. bfa_os_memcpy(trcdata, ioc->dbg_fwsave, tlen);
  2107. *trclen = tlen;
  2108. return BFA_STATUS_OK;
  2109. }
  2110. /**
  2111. * Clear saved firmware trace
  2112. */
  2113. void
  2114. bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
  2115. {
  2116. ioc->dbg_fwsave_once = BFA_TRUE;
  2117. }
  2118. /**
  2119. * Retrieve saved firmware trace from a prior IOC failure.
  2120. */
  2121. bfa_status_t
  2122. bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
  2123. {
  2124. u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
  2125. int tlen;
  2126. bfa_status_t status;
  2127. bfa_trc(ioc, *trclen);
  2128. tlen = *trclen;
  2129. if (tlen > BFA_DBG_FWTRC_LEN)
  2130. tlen = BFA_DBG_FWTRC_LEN;
  2131. status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
  2132. *trclen = tlen;
  2133. return status;
  2134. }
  2135. static void
  2136. bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
  2137. {
  2138. struct bfa_mbox_cmd_s cmd;
  2139. struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
  2140. bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
  2141. bfa_ioc_portid(ioc));
  2142. req->ioc_class = ioc->ioc_mc;
  2143. bfa_ioc_mbox_queue(ioc, &cmd);
  2144. }
  2145. static void
  2146. bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
  2147. {
  2148. u32 fwsync_iter = 1000;
  2149. bfa_ioc_send_fwsync(ioc);
  2150. /**
  2151. * After sending a fw sync mbox command wait for it to
  2152. * take effect. We will not wait for a response because
  2153. * 1. fw_sync mbox cmd doesn't have a response.
  2154. * 2. Even if we implement that, interrupts might not
  2155. * be enabled when we call this function.
  2156. * So, just keep checking if any mbox cmd is pending, and
  2157. * after waiting for a reasonable amount of time, go ahead.
  2158. * It is possible that fw has crashed and the mbox command
  2159. * is never acknowledged.
  2160. */
  2161. while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
  2162. fwsync_iter--;
  2163. }
  2164. /**
  2165. * Dump firmware smem
  2166. */
  2167. bfa_status_t
  2168. bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
  2169. u32 *offset, int *buflen)
  2170. {
  2171. u32 loff;
  2172. int dlen;
  2173. bfa_status_t status;
  2174. u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
  2175. if (*offset >= smem_len) {
  2176. *offset = *buflen = 0;
  2177. return BFA_STATUS_EINVAL;
  2178. }
  2179. loff = *offset;
  2180. dlen = *buflen;
  2181. /**
  2182. * First smem read, sync smem before proceeding
  2183. * No need to sync before reading every chunk.
  2184. */
  2185. if (loff == 0)
  2186. bfa_ioc_fwsync(ioc);
  2187. if ((loff + dlen) >= smem_len)
  2188. dlen = smem_len - loff;
  2189. status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
  2190. if (status != BFA_STATUS_OK) {
  2191. *offset = *buflen = 0;
  2192. return status;
  2193. }
  2194. *offset += dlen;
  2195. if (*offset >= smem_len)
  2196. *offset = 0;
  2197. *buflen = dlen;
  2198. return status;
  2199. }
  2200. /**
  2201. * Firmware statistics
  2202. */
  2203. bfa_status_t
  2204. bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
  2205. {
  2206. u32 loff = BFI_IOC_FWSTATS_OFF + \
  2207. BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
  2208. int tlen;
  2209. bfa_status_t status;
  2210. if (ioc->stats_busy) {
  2211. bfa_trc(ioc, ioc->stats_busy);
  2212. return BFA_STATUS_DEVBUSY;
  2213. }
  2214. ioc->stats_busy = BFA_TRUE;
  2215. tlen = sizeof(struct bfa_fw_stats_s);
  2216. status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
  2217. ioc->stats_busy = BFA_FALSE;
  2218. return status;
  2219. }
  2220. bfa_status_t
  2221. bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
  2222. {
  2223. u32 loff = BFI_IOC_FWSTATS_OFF + \
  2224. BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
  2225. int tlen;
  2226. bfa_status_t status;
  2227. if (ioc->stats_busy) {
  2228. bfa_trc(ioc, ioc->stats_busy);
  2229. return BFA_STATUS_DEVBUSY;
  2230. }
  2231. ioc->stats_busy = BFA_TRUE;
  2232. tlen = sizeof(struct bfa_fw_stats_s);
  2233. status = bfa_ioc_smem_clr(ioc, loff, tlen);
  2234. ioc->stats_busy = BFA_FALSE;
  2235. return status;
  2236. }
  2237. /**
  2238. * Save firmware trace if configured.
  2239. */
  2240. static void
  2241. bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
  2242. {
  2243. int tlen;
  2244. if (ioc->dbg_fwsave_len) {
  2245. tlen = ioc->dbg_fwsave_len;
  2246. bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
  2247. }
  2248. }
  2249. /**
  2250. * Firmware failure detected. Start recovery actions.
  2251. */
  2252. static void
  2253. bfa_ioc_recover(struct bfa_ioc_s *ioc)
  2254. {
  2255. if (ioc->dbg_fwsave_once) {
  2256. ioc->dbg_fwsave_once = BFA_FALSE;
  2257. bfa_ioc_debug_save(ioc);
  2258. }
  2259. bfa_ioc_stats(ioc, ioc_hbfails);
  2260. bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
  2261. }
  2262. static void
  2263. bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
  2264. {
  2265. if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
  2266. return;
  2267. }
  2268. /**
  2269. * hal_iocpf_pvt BFA IOC PF private functions
  2270. */
  2271. static void
  2272. bfa_iocpf_enable(struct bfa_ioc_s *ioc)
  2273. {
  2274. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
  2275. }
  2276. static void
  2277. bfa_iocpf_disable(struct bfa_ioc_s *ioc)
  2278. {
  2279. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
  2280. }
  2281. static void
  2282. bfa_iocpf_fail(struct bfa_ioc_s *ioc)
  2283. {
  2284. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
  2285. }
  2286. static void
  2287. bfa_iocpf_initfail(struct bfa_ioc_s *ioc)
  2288. {
  2289. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
  2290. }
  2291. static void
  2292. bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc)
  2293. {
  2294. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
  2295. }
  2296. static void
  2297. bfa_iocpf_stop(struct bfa_ioc_s *ioc)
  2298. {
  2299. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
  2300. }
  2301. static void
  2302. bfa_iocpf_timeout(void *ioc_arg)
  2303. {
  2304. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  2305. bfa_trc(ioc, 0);
  2306. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
  2307. }
  2308. static void
  2309. bfa_iocpf_sem_timeout(void *ioc_arg)
  2310. {
  2311. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  2312. bfa_ioc_hw_sem_get(ioc);
  2313. }
  2314. /**
  2315. * bfa timer function
  2316. */
  2317. void
  2318. bfa_timer_init(struct bfa_timer_mod_s *mod)
  2319. {
  2320. INIT_LIST_HEAD(&mod->timer_q);
  2321. }
  2322. void
  2323. bfa_timer_beat(struct bfa_timer_mod_s *mod)
  2324. {
  2325. struct list_head *qh = &mod->timer_q;
  2326. struct list_head *qe, *qe_next;
  2327. struct bfa_timer_s *elem;
  2328. struct list_head timedout_q;
  2329. INIT_LIST_HEAD(&timedout_q);
  2330. qe = bfa_q_next(qh);
  2331. while (qe != qh) {
  2332. qe_next = bfa_q_next(qe);
  2333. elem = (struct bfa_timer_s *) qe;
  2334. if (elem->timeout <= BFA_TIMER_FREQ) {
  2335. elem->timeout = 0;
  2336. list_del(&elem->qe);
  2337. list_add_tail(&elem->qe, &timedout_q);
  2338. } else {
  2339. elem->timeout -= BFA_TIMER_FREQ;
  2340. }
  2341. qe = qe_next; /* go to next elem */
  2342. }
  2343. /*
  2344. * Pop all the timeout entries
  2345. */
  2346. while (!list_empty(&timedout_q)) {
  2347. bfa_q_deq(&timedout_q, &elem);
  2348. elem->timercb(elem->arg);
  2349. }
  2350. }
  2351. /**
  2352. * Should be called with lock protection
  2353. */
  2354. void
  2355. bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
  2356. void (*timercb) (void *), void *arg, unsigned int timeout)
  2357. {
  2358. bfa_assert(timercb != NULL);
  2359. bfa_assert(!bfa_q_is_on_q(&mod->timer_q, timer));
  2360. timer->timeout = timeout;
  2361. timer->timercb = timercb;
  2362. timer->arg = arg;
  2363. list_add_tail(&timer->qe, &mod->timer_q);
  2364. }
  2365. /**
  2366. * Should be called with lock protection
  2367. */
  2368. void
  2369. bfa_timer_stop(struct bfa_timer_s *timer)
  2370. {
  2371. bfa_assert(!list_empty(&timer->qe));
  2372. list_del(&timer->qe);
  2373. }