bfa_ioc.c 73 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include "bfad_drv.h"
  18. #include "bfa_ioc.h"
  19. #include "bfi_reg.h"
  20. #include "bfa_defs.h"
  21. #include "bfa_defs_svc.h"
  22. BFA_TRC_FILE(CNA, IOC);
  23. /*
  24. * IOC local definitions
  25. */
  26. #define BFA_IOC_TOV 3000 /* msecs */
  27. #define BFA_IOC_HWSEM_TOV 500 /* msecs */
  28. #define BFA_IOC_HB_TOV 500 /* msecs */
  29. #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
  30. #define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
  31. #define bfa_ioc_timer_start(__ioc) \
  32. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
  33. bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
  34. #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
  35. #define bfa_hb_timer_start(__ioc) \
  36. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
  37. bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
  38. #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
  39. #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
  40. /*
  41. * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
  42. */
  43. #define bfa_ioc_firmware_lock(__ioc) \
  44. ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
  45. #define bfa_ioc_firmware_unlock(__ioc) \
  46. ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
  47. #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
  48. #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
  49. #define bfa_ioc_notify_fail(__ioc) \
  50. ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
  51. #define bfa_ioc_sync_start(__ioc) \
  52. ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
  53. #define bfa_ioc_sync_join(__ioc) \
  54. ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
  55. #define bfa_ioc_sync_leave(__ioc) \
  56. ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
  57. #define bfa_ioc_sync_ack(__ioc) \
  58. ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
  59. #define bfa_ioc_sync_complete(__ioc) \
  60. ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
  61. #define bfa_ioc_mbox_cmd_pending(__ioc) \
  62. (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
  63. readl((__ioc)->ioc_regs.hfn_mbox_cmd))
  64. bfa_boolean_t bfa_auto_recover = BFA_TRUE;
  65. /*
  66. * forward declarations
  67. */
  68. static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
  69. static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
  70. static void bfa_ioc_timeout(void *ioc);
  71. static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
  72. static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
  73. static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
  74. static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
  75. static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
  76. static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
  77. static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
  78. static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
  79. static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
  80. static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
  81. enum bfa_ioc_event_e event);
  82. static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
  83. static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
  84. static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
  85. static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
  86. static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
  87. /*
  88. * IOC state machine definitions/declarations
  89. */
  90. enum ioc_event {
  91. IOC_E_RESET = 1, /* IOC reset request */
  92. IOC_E_ENABLE = 2, /* IOC enable request */
  93. IOC_E_DISABLE = 3, /* IOC disable request */
  94. IOC_E_DETACH = 4, /* driver detach cleanup */
  95. IOC_E_ENABLED = 5, /* f/w enabled */
  96. IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
  97. IOC_E_DISABLED = 7, /* f/w disabled */
  98. IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
  99. IOC_E_HBFAIL = 9, /* heartbeat failure */
  100. IOC_E_HWERROR = 10, /* hardware error interrupt */
  101. IOC_E_TIMEOUT = 11, /* timeout */
  102. IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
  103. };
  104. bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
  105. bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
  106. bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
  107. bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
  108. bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
  109. bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
  110. bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
  111. bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
  112. bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
  113. bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
  114. static struct bfa_sm_table_s ioc_sm_table[] = {
  115. {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
  116. {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
  117. {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
  118. {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
  119. {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
  120. {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
  121. {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
  122. {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
  123. {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
  124. {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
  125. };
  126. /*
  127. * IOCPF state machine definitions/declarations
  128. */
  129. #define bfa_iocpf_timer_start(__ioc) \
  130. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
  131. bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
  132. #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
  133. #define bfa_iocpf_poll_timer_start(__ioc) \
  134. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
  135. bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
  136. #define bfa_sem_timer_start(__ioc) \
  137. bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
  138. bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
  139. #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
  140. /*
  141. * Forward declareations for iocpf state machine
  142. */
  143. static void bfa_iocpf_timeout(void *ioc_arg);
  144. static void bfa_iocpf_sem_timeout(void *ioc_arg);
  145. static void bfa_iocpf_poll_timeout(void *ioc_arg);
  146. /*
  147. * IOCPF state machine events
  148. */
  149. enum iocpf_event {
  150. IOCPF_E_ENABLE = 1, /* IOCPF enable request */
  151. IOCPF_E_DISABLE = 2, /* IOCPF disable request */
  152. IOCPF_E_STOP = 3, /* stop on driver detach */
  153. IOCPF_E_FWREADY = 4, /* f/w initialization done */
  154. IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
  155. IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
  156. IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
  157. IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
  158. IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
  159. IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
  160. IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
  161. IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */
  162. };
  163. /*
  164. * IOCPF states
  165. */
  166. enum bfa_iocpf_state {
  167. BFA_IOCPF_RESET = 1, /* IOC is in reset state */
  168. BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
  169. BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
  170. BFA_IOCPF_READY = 4, /* IOCPF is initialized */
  171. BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
  172. BFA_IOCPF_FAIL = 6, /* IOCPF failed */
  173. BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
  174. BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
  175. BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
  176. };
  177. bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
  178. bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
  179. bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
  180. bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
  181. bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
  182. bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
  183. bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
  184. bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
  185. enum iocpf_event);
  186. bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
  187. bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
  188. bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
  189. bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
  190. bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
  191. enum iocpf_event);
  192. bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
  193. static struct bfa_sm_table_s iocpf_sm_table[] = {
  194. {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
  195. {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
  196. {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
  197. {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
  198. {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
  199. {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
  200. {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
  201. {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
  202. {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
  203. {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
  204. {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
  205. {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
  206. {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
  207. {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
  208. };
  209. /*
  210. * IOC State Machine
  211. */
  212. /*
  213. * Beginning state. IOC uninit state.
  214. */
  215. static void
  216. bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
  217. {
  218. }
  219. /*
  220. * IOC is in uninit state.
  221. */
  222. static void
  223. bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
  224. {
  225. bfa_trc(ioc, event);
  226. switch (event) {
  227. case IOC_E_RESET:
  228. bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
  229. break;
  230. default:
  231. bfa_sm_fault(ioc, event);
  232. }
  233. }
  234. /*
  235. * Reset entry actions -- initialize state machine
  236. */
  237. static void
  238. bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
  239. {
  240. bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
  241. }
  242. /*
  243. * IOC is in reset state.
  244. */
  245. static void
  246. bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
  247. {
  248. bfa_trc(ioc, event);
  249. switch (event) {
  250. case IOC_E_ENABLE:
  251. bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
  252. break;
  253. case IOC_E_DISABLE:
  254. bfa_ioc_disable_comp(ioc);
  255. break;
  256. case IOC_E_DETACH:
  257. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  258. break;
  259. default:
  260. bfa_sm_fault(ioc, event);
  261. }
  262. }
  263. static void
  264. bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
  265. {
  266. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
  267. }
  268. /*
  269. * Host IOC function is being enabled, awaiting response from firmware.
  270. * Semaphore is acquired.
  271. */
  272. static void
  273. bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
  274. {
  275. bfa_trc(ioc, event);
  276. switch (event) {
  277. case IOC_E_ENABLED:
  278. bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
  279. break;
  280. case IOC_E_PFFAILED:
  281. /* !!! fall through !!! */
  282. case IOC_E_HWERROR:
  283. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  284. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  285. if (event != IOC_E_PFFAILED)
  286. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
  287. break;
  288. case IOC_E_HWFAILED:
  289. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  290. bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
  291. break;
  292. case IOC_E_DISABLE:
  293. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  294. break;
  295. case IOC_E_DETACH:
  296. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  297. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
  298. break;
  299. case IOC_E_ENABLE:
  300. break;
  301. default:
  302. bfa_sm_fault(ioc, event);
  303. }
  304. }
  305. static void
  306. bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
  307. {
  308. bfa_ioc_timer_start(ioc);
  309. bfa_ioc_send_getattr(ioc);
  310. }
  311. /*
  312. * IOC configuration in progress. Timer is active.
  313. */
  314. static void
  315. bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
  316. {
  317. bfa_trc(ioc, event);
  318. switch (event) {
  319. case IOC_E_FWRSP_GETATTR:
  320. bfa_ioc_timer_stop(ioc);
  321. bfa_ioc_check_attr_wwns(ioc);
  322. bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
  323. break;
  324. break;
  325. case IOC_E_PFFAILED:
  326. case IOC_E_HWERROR:
  327. bfa_ioc_timer_stop(ioc);
  328. /* !!! fall through !!! */
  329. case IOC_E_TIMEOUT:
  330. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  331. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  332. if (event != IOC_E_PFFAILED)
  333. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
  334. break;
  335. case IOC_E_DISABLE:
  336. bfa_ioc_timer_stop(ioc);
  337. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  338. break;
  339. case IOC_E_ENABLE:
  340. break;
  341. default:
  342. bfa_sm_fault(ioc, event);
  343. }
  344. }
  345. static void
  346. bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
  347. {
  348. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  349. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
  350. bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
  351. bfa_ioc_hb_monitor(ioc);
  352. BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
  353. }
  354. static void
  355. bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
  356. {
  357. bfa_trc(ioc, event);
  358. switch (event) {
  359. case IOC_E_ENABLE:
  360. break;
  361. case IOC_E_DISABLE:
  362. bfa_hb_timer_stop(ioc);
  363. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  364. break;
  365. case IOC_E_PFFAILED:
  366. case IOC_E_HWERROR:
  367. bfa_hb_timer_stop(ioc);
  368. /* !!! fall through !!! */
  369. case IOC_E_HBFAIL:
  370. if (ioc->iocpf.auto_recover)
  371. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
  372. else
  373. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  374. bfa_ioc_fail_notify(ioc);
  375. if (event != IOC_E_PFFAILED)
  376. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
  377. break;
  378. default:
  379. bfa_sm_fault(ioc, event);
  380. }
  381. }
  382. static void
  383. bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
  384. {
  385. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  386. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
  387. BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
  388. }
  389. /*
  390. * IOC is being disabled
  391. */
  392. static void
  393. bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
  394. {
  395. bfa_trc(ioc, event);
  396. switch (event) {
  397. case IOC_E_DISABLED:
  398. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
  399. break;
  400. case IOC_E_HWERROR:
  401. /*
  402. * No state change. Will move to disabled state
  403. * after iocpf sm completes failure processing and
  404. * moves to disabled state.
  405. */
  406. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
  407. break;
  408. case IOC_E_HWFAILED:
  409. bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
  410. bfa_ioc_disable_comp(ioc);
  411. break;
  412. default:
  413. bfa_sm_fault(ioc, event);
  414. }
  415. }
  416. /*
  417. * IOC disable completion entry.
  418. */
  419. static void
  420. bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
  421. {
  422. bfa_ioc_disable_comp(ioc);
  423. }
  424. static void
  425. bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
  426. {
  427. bfa_trc(ioc, event);
  428. switch (event) {
  429. case IOC_E_ENABLE:
  430. bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
  431. break;
  432. case IOC_E_DISABLE:
  433. ioc->cbfn->disable_cbfn(ioc->bfa);
  434. break;
  435. case IOC_E_DETACH:
  436. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  437. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
  438. break;
  439. default:
  440. bfa_sm_fault(ioc, event);
  441. }
  442. }
  443. static void
  444. bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
  445. {
  446. bfa_trc(ioc, 0);
  447. }
  448. /*
  449. * Hardware initialization retry.
  450. */
  451. static void
  452. bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
  453. {
  454. bfa_trc(ioc, event);
  455. switch (event) {
  456. case IOC_E_ENABLED:
  457. bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
  458. break;
  459. case IOC_E_PFFAILED:
  460. case IOC_E_HWERROR:
  461. /*
  462. * Initialization retry failed.
  463. */
  464. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  465. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  466. if (event != IOC_E_PFFAILED)
  467. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
  468. break;
  469. case IOC_E_HWFAILED:
  470. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  471. bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
  472. break;
  473. case IOC_E_ENABLE:
  474. break;
  475. case IOC_E_DISABLE:
  476. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  477. break;
  478. case IOC_E_DETACH:
  479. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  480. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
  481. break;
  482. default:
  483. bfa_sm_fault(ioc, event);
  484. }
  485. }
  486. static void
  487. bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
  488. {
  489. bfa_trc(ioc, 0);
  490. }
  491. /*
  492. * IOC failure.
  493. */
  494. static void
  495. bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
  496. {
  497. bfa_trc(ioc, event);
  498. switch (event) {
  499. case IOC_E_ENABLE:
  500. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  501. break;
  502. case IOC_E_DISABLE:
  503. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  504. break;
  505. case IOC_E_DETACH:
  506. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  507. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
  508. break;
  509. case IOC_E_HWERROR:
  510. /*
  511. * HB failure notification, ignore.
  512. */
  513. break;
  514. default:
  515. bfa_sm_fault(ioc, event);
  516. }
  517. }
  518. static void
  519. bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
  520. {
  521. bfa_trc(ioc, 0);
  522. }
  523. static void
  524. bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
  525. {
  526. bfa_trc(ioc, event);
  527. switch (event) {
  528. case IOC_E_ENABLE:
  529. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  530. break;
  531. case IOC_E_DISABLE:
  532. ioc->cbfn->disable_cbfn(ioc->bfa);
  533. break;
  534. case IOC_E_DETACH:
  535. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  536. break;
  537. default:
  538. bfa_sm_fault(ioc, event);
  539. }
  540. }
  541. /*
  542. * IOCPF State Machine
  543. */
  544. /*
  545. * Reset entry actions -- initialize state machine
  546. */
  547. static void
  548. bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
  549. {
  550. iocpf->fw_mismatch_notified = BFA_FALSE;
  551. iocpf->auto_recover = bfa_auto_recover;
  552. }
  553. /*
  554. * Beginning state. IOC is in reset state.
  555. */
  556. static void
  557. bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  558. {
  559. struct bfa_ioc_s *ioc = iocpf->ioc;
  560. bfa_trc(ioc, event);
  561. switch (event) {
  562. case IOCPF_E_ENABLE:
  563. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
  564. break;
  565. case IOCPF_E_STOP:
  566. break;
  567. default:
  568. bfa_sm_fault(ioc, event);
  569. }
  570. }
  571. /*
  572. * Semaphore should be acquired for version check.
  573. */
  574. static void
  575. bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
  576. {
  577. struct bfi_ioc_image_hdr_s fwhdr;
  578. u32 fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
  579. /* h/w sem init */
  580. if (fwstate == BFI_IOC_UNINIT)
  581. goto sem_get;
  582. bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
  583. if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
  584. goto sem_get;
  585. bfa_trc(iocpf->ioc, fwstate);
  586. bfa_trc(iocpf->ioc, fwhdr.exec);
  587. writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
  588. /*
  589. * Try to lock and then unlock the semaphore.
  590. */
  591. readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
  592. writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
  593. sem_get:
  594. bfa_ioc_hw_sem_get(iocpf->ioc);
  595. }
  596. /*
  597. * Awaiting h/w semaphore to continue with version check.
  598. */
  599. static void
  600. bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  601. {
  602. struct bfa_ioc_s *ioc = iocpf->ioc;
  603. bfa_trc(ioc, event);
  604. switch (event) {
  605. case IOCPF_E_SEMLOCKED:
  606. if (bfa_ioc_firmware_lock(ioc)) {
  607. if (bfa_ioc_sync_start(ioc)) {
  608. bfa_ioc_sync_join(ioc);
  609. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  610. } else {
  611. bfa_ioc_firmware_unlock(ioc);
  612. writel(1, ioc->ioc_regs.ioc_sem_reg);
  613. bfa_sem_timer_start(ioc);
  614. }
  615. } else {
  616. writel(1, ioc->ioc_regs.ioc_sem_reg);
  617. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
  618. }
  619. break;
  620. case IOCPF_E_SEM_ERROR:
  621. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  622. bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
  623. break;
  624. case IOCPF_E_DISABLE:
  625. bfa_sem_timer_stop(ioc);
  626. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  627. bfa_fsm_send_event(ioc, IOC_E_DISABLED);
  628. break;
  629. case IOCPF_E_STOP:
  630. bfa_sem_timer_stop(ioc);
  631. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  632. break;
  633. default:
  634. bfa_sm_fault(ioc, event);
  635. }
  636. }
  637. /*
  638. * Notify enable completion callback.
  639. */
  640. static void
  641. bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
  642. {
  643. /*
  644. * Call only the first time sm enters fwmismatch state.
  645. */
  646. if (iocpf->fw_mismatch_notified == BFA_FALSE)
  647. bfa_ioc_pf_fwmismatch(iocpf->ioc);
  648. iocpf->fw_mismatch_notified = BFA_TRUE;
  649. bfa_iocpf_timer_start(iocpf->ioc);
  650. }
  651. /*
  652. * Awaiting firmware version match.
  653. */
  654. static void
  655. bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  656. {
  657. struct bfa_ioc_s *ioc = iocpf->ioc;
  658. bfa_trc(ioc, event);
  659. switch (event) {
  660. case IOCPF_E_TIMEOUT:
  661. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
  662. break;
  663. case IOCPF_E_DISABLE:
  664. bfa_iocpf_timer_stop(ioc);
  665. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  666. bfa_fsm_send_event(ioc, IOC_E_DISABLED);
  667. break;
  668. case IOCPF_E_STOP:
  669. bfa_iocpf_timer_stop(ioc);
  670. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  671. break;
  672. default:
  673. bfa_sm_fault(ioc, event);
  674. }
  675. }
  676. /*
  677. * Request for semaphore.
  678. */
  679. static void
  680. bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
  681. {
  682. bfa_ioc_hw_sem_get(iocpf->ioc);
  683. }
  684. /*
  685. * Awaiting semaphore for h/w initialzation.
  686. */
  687. static void
  688. bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  689. {
  690. struct bfa_ioc_s *ioc = iocpf->ioc;
  691. bfa_trc(ioc, event);
  692. switch (event) {
  693. case IOCPF_E_SEMLOCKED:
  694. if (bfa_ioc_sync_complete(ioc)) {
  695. bfa_ioc_sync_join(ioc);
  696. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  697. } else {
  698. writel(1, ioc->ioc_regs.ioc_sem_reg);
  699. bfa_sem_timer_start(ioc);
  700. }
  701. break;
  702. case IOCPF_E_SEM_ERROR:
  703. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  704. bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
  705. break;
  706. case IOCPF_E_DISABLE:
  707. bfa_sem_timer_stop(ioc);
  708. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  709. break;
  710. default:
  711. bfa_sm_fault(ioc, event);
  712. }
  713. }
  714. static void
  715. bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
  716. {
  717. iocpf->poll_time = 0;
  718. bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
  719. }
  720. /*
  721. * Hardware is being initialized. Interrupts are enabled.
  722. * Holding hardware semaphore lock.
  723. */
  724. static void
  725. bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  726. {
  727. struct bfa_ioc_s *ioc = iocpf->ioc;
  728. bfa_trc(ioc, event);
  729. switch (event) {
  730. case IOCPF_E_FWREADY:
  731. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
  732. break;
  733. case IOCPF_E_TIMEOUT:
  734. writel(1, ioc->ioc_regs.ioc_sem_reg);
  735. bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
  736. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
  737. break;
  738. case IOCPF_E_DISABLE:
  739. bfa_iocpf_timer_stop(ioc);
  740. bfa_ioc_sync_leave(ioc);
  741. writel(1, ioc->ioc_regs.ioc_sem_reg);
  742. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  743. break;
  744. default:
  745. bfa_sm_fault(ioc, event);
  746. }
  747. }
  748. static void
  749. bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
  750. {
  751. bfa_iocpf_timer_start(iocpf->ioc);
  752. /*
  753. * Enable Interrupts before sending fw IOC ENABLE cmd.
  754. */
  755. iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
  756. bfa_ioc_send_enable(iocpf->ioc);
  757. }
  758. /*
  759. * Host IOC function is being enabled, awaiting response from firmware.
  760. * Semaphore is acquired.
  761. */
  762. static void
  763. bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  764. {
  765. struct bfa_ioc_s *ioc = iocpf->ioc;
  766. bfa_trc(ioc, event);
  767. switch (event) {
  768. case IOCPF_E_FWRSP_ENABLE:
  769. bfa_iocpf_timer_stop(ioc);
  770. writel(1, ioc->ioc_regs.ioc_sem_reg);
  771. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
  772. break;
  773. case IOCPF_E_INITFAIL:
  774. bfa_iocpf_timer_stop(ioc);
  775. /*
  776. * !!! fall through !!!
  777. */
  778. case IOCPF_E_TIMEOUT:
  779. writel(1, ioc->ioc_regs.ioc_sem_reg);
  780. if (event == IOCPF_E_TIMEOUT)
  781. bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
  782. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
  783. break;
  784. case IOCPF_E_DISABLE:
  785. bfa_iocpf_timer_stop(ioc);
  786. writel(1, ioc->ioc_regs.ioc_sem_reg);
  787. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
  788. break;
  789. default:
  790. bfa_sm_fault(ioc, event);
  791. }
  792. }
  793. static void
  794. bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
  795. {
  796. bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
  797. }
  798. static void
  799. bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  800. {
  801. struct bfa_ioc_s *ioc = iocpf->ioc;
  802. bfa_trc(ioc, event);
  803. switch (event) {
  804. case IOCPF_E_DISABLE:
  805. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
  806. break;
  807. case IOCPF_E_GETATTRFAIL:
  808. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
  809. break;
  810. case IOCPF_E_FAIL:
  811. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
  812. break;
  813. default:
  814. bfa_sm_fault(ioc, event);
  815. }
  816. }
  817. static void
  818. bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
  819. {
  820. bfa_iocpf_timer_start(iocpf->ioc);
  821. bfa_ioc_send_disable(iocpf->ioc);
  822. }
  823. /*
  824. * IOC is being disabled
  825. */
  826. static void
  827. bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  828. {
  829. struct bfa_ioc_s *ioc = iocpf->ioc;
  830. bfa_trc(ioc, event);
  831. switch (event) {
  832. case IOCPF_E_FWRSP_DISABLE:
  833. bfa_iocpf_timer_stop(ioc);
  834. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  835. break;
  836. case IOCPF_E_FAIL:
  837. bfa_iocpf_timer_stop(ioc);
  838. /*
  839. * !!! fall through !!!
  840. */
  841. case IOCPF_E_TIMEOUT:
  842. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  843. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  844. break;
  845. case IOCPF_E_FWRSP_ENABLE:
  846. break;
  847. default:
  848. bfa_sm_fault(ioc, event);
  849. }
  850. }
  851. static void
  852. bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
  853. {
  854. bfa_ioc_hw_sem_get(iocpf->ioc);
  855. }
  856. /*
  857. * IOC hb ack request is being removed.
  858. */
  859. static void
  860. bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  861. {
  862. struct bfa_ioc_s *ioc = iocpf->ioc;
  863. bfa_trc(ioc, event);
  864. switch (event) {
  865. case IOCPF_E_SEMLOCKED:
  866. bfa_ioc_sync_leave(ioc);
  867. writel(1, ioc->ioc_regs.ioc_sem_reg);
  868. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  869. break;
  870. case IOCPF_E_SEM_ERROR:
  871. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  872. bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
  873. break;
  874. case IOCPF_E_FAIL:
  875. break;
  876. default:
  877. bfa_sm_fault(ioc, event);
  878. }
  879. }
  880. /*
  881. * IOC disable completion entry.
  882. */
  883. static void
  884. bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
  885. {
  886. bfa_ioc_mbox_flush(iocpf->ioc);
  887. bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
  888. }
  889. static void
  890. bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  891. {
  892. struct bfa_ioc_s *ioc = iocpf->ioc;
  893. bfa_trc(ioc, event);
  894. switch (event) {
  895. case IOCPF_E_ENABLE:
  896. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  897. break;
  898. case IOCPF_E_STOP:
  899. bfa_ioc_firmware_unlock(ioc);
  900. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  901. break;
  902. default:
  903. bfa_sm_fault(ioc, event);
  904. }
  905. }
  906. static void
  907. bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
  908. {
  909. bfa_ioc_debug_save_ftrc(iocpf->ioc);
  910. bfa_ioc_hw_sem_get(iocpf->ioc);
  911. }
  912. /*
  913. * Hardware initialization failed.
  914. */
  915. static void
  916. bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  917. {
  918. struct bfa_ioc_s *ioc = iocpf->ioc;
  919. bfa_trc(ioc, event);
  920. switch (event) {
  921. case IOCPF_E_SEMLOCKED:
  922. bfa_ioc_notify_fail(ioc);
  923. bfa_ioc_sync_leave(ioc);
  924. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  925. writel(1, ioc->ioc_regs.ioc_sem_reg);
  926. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
  927. break;
  928. case IOCPF_E_SEM_ERROR:
  929. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  930. bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
  931. break;
  932. case IOCPF_E_DISABLE:
  933. bfa_sem_timer_stop(ioc);
  934. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  935. break;
  936. case IOCPF_E_STOP:
  937. bfa_sem_timer_stop(ioc);
  938. bfa_ioc_firmware_unlock(ioc);
  939. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  940. break;
  941. case IOCPF_E_FAIL:
  942. break;
  943. default:
  944. bfa_sm_fault(ioc, event);
  945. }
  946. }
  947. static void
  948. bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
  949. {
  950. bfa_trc(iocpf->ioc, 0);
  951. }
  952. /*
  953. * Hardware initialization failed.
  954. */
  955. static void
  956. bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  957. {
  958. struct bfa_ioc_s *ioc = iocpf->ioc;
  959. bfa_trc(ioc, event);
  960. switch (event) {
  961. case IOCPF_E_DISABLE:
  962. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  963. break;
  964. case IOCPF_E_STOP:
  965. bfa_ioc_firmware_unlock(ioc);
  966. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  967. break;
  968. default:
  969. bfa_sm_fault(ioc, event);
  970. }
  971. }
  972. static void
  973. bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
  974. {
  975. /*
  976. * Mark IOC as failed in hardware and stop firmware.
  977. */
  978. bfa_ioc_lpu_stop(iocpf->ioc);
  979. /*
  980. * Flush any queued up mailbox requests.
  981. */
  982. bfa_ioc_mbox_flush(iocpf->ioc);
  983. bfa_ioc_hw_sem_get(iocpf->ioc);
  984. }
  985. static void
  986. bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  987. {
  988. struct bfa_ioc_s *ioc = iocpf->ioc;
  989. bfa_trc(ioc, event);
  990. switch (event) {
  991. case IOCPF_E_SEMLOCKED:
  992. bfa_ioc_sync_ack(ioc);
  993. bfa_ioc_notify_fail(ioc);
  994. if (!iocpf->auto_recover) {
  995. bfa_ioc_sync_leave(ioc);
  996. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  997. writel(1, ioc->ioc_regs.ioc_sem_reg);
  998. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  999. } else {
  1000. if (bfa_ioc_sync_complete(ioc))
  1001. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  1002. else {
  1003. writel(1, ioc->ioc_regs.ioc_sem_reg);
  1004. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  1005. }
  1006. }
  1007. break;
  1008. case IOCPF_E_SEM_ERROR:
  1009. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  1010. bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
  1011. break;
  1012. case IOCPF_E_DISABLE:
  1013. bfa_sem_timer_stop(ioc);
  1014. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  1015. break;
  1016. case IOCPF_E_FAIL:
  1017. break;
  1018. default:
  1019. bfa_sm_fault(ioc, event);
  1020. }
  1021. }
  1022. static void
  1023. bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
  1024. {
  1025. bfa_trc(iocpf->ioc, 0);
  1026. }
  1027. /*
  1028. * IOC is in failed state.
  1029. */
  1030. static void
  1031. bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
  1032. {
  1033. struct bfa_ioc_s *ioc = iocpf->ioc;
  1034. bfa_trc(ioc, event);
  1035. switch (event) {
  1036. case IOCPF_E_DISABLE:
  1037. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  1038. break;
  1039. default:
  1040. bfa_sm_fault(ioc, event);
  1041. }
  1042. }
  1043. /*
  1044. * BFA IOC private functions
  1045. */
  1046. /*
  1047. * Notify common modules registered for notification.
  1048. */
  1049. static void
  1050. bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
  1051. {
  1052. struct bfa_ioc_notify_s *notify;
  1053. struct list_head *qe;
  1054. list_for_each(qe, &ioc->notify_q) {
  1055. notify = (struct bfa_ioc_notify_s *)qe;
  1056. notify->cbfn(notify->cbarg, event);
  1057. }
  1058. }
  1059. static void
  1060. bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
  1061. {
  1062. ioc->cbfn->disable_cbfn(ioc->bfa);
  1063. bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
  1064. }
  1065. bfa_boolean_t
  1066. bfa_ioc_sem_get(void __iomem *sem_reg)
  1067. {
  1068. u32 r32;
  1069. int cnt = 0;
  1070. #define BFA_SEM_SPINCNT 3000
  1071. r32 = readl(sem_reg);
  1072. while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
  1073. cnt++;
  1074. udelay(2);
  1075. r32 = readl(sem_reg);
  1076. }
  1077. if (!(r32 & 1))
  1078. return BFA_TRUE;
  1079. return BFA_FALSE;
  1080. }
  1081. static void
  1082. bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
  1083. {
  1084. u32 r32;
  1085. /*
  1086. * First read to the semaphore register will return 0, subsequent reads
  1087. * will return 1. Semaphore is released by writing 1 to the register
  1088. */
  1089. r32 = readl(ioc->ioc_regs.ioc_sem_reg);
  1090. if (r32 == ~0) {
  1091. WARN_ON(r32 == ~0);
  1092. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
  1093. return;
  1094. }
  1095. if (!(r32 & 1)) {
  1096. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
  1097. return;
  1098. }
  1099. bfa_sem_timer_start(ioc);
  1100. }
  1101. /*
  1102. * Initialize LPU local memory (aka secondary memory / SRAM)
  1103. */
  1104. static void
  1105. bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
  1106. {
  1107. u32 pss_ctl;
  1108. int i;
  1109. #define PSS_LMEM_INIT_TIME 10000
  1110. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  1111. pss_ctl &= ~__PSS_LMEM_RESET;
  1112. pss_ctl |= __PSS_LMEM_INIT_EN;
  1113. /*
  1114. * i2c workaround 12.5khz clock
  1115. */
  1116. pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
  1117. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1118. /*
  1119. * wait for memory initialization to be complete
  1120. */
  1121. i = 0;
  1122. do {
  1123. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  1124. i++;
  1125. } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
  1126. /*
  1127. * If memory initialization is not successful, IOC timeout will catch
  1128. * such failures.
  1129. */
  1130. WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
  1131. bfa_trc(ioc, pss_ctl);
  1132. pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
  1133. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1134. }
  1135. static void
  1136. bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
  1137. {
  1138. u32 pss_ctl;
  1139. /*
  1140. * Take processor out of reset.
  1141. */
  1142. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  1143. pss_ctl &= ~__PSS_LPU0_RESET;
  1144. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1145. }
  1146. static void
  1147. bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
  1148. {
  1149. u32 pss_ctl;
  1150. /*
  1151. * Put processors in reset.
  1152. */
  1153. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  1154. pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
  1155. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1156. }
  1157. /*
  1158. * Get driver and firmware versions.
  1159. */
  1160. void
  1161. bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
  1162. {
  1163. u32 pgnum, pgoff;
  1164. u32 loff = 0;
  1165. int i;
  1166. u32 *fwsig = (u32 *) fwhdr;
  1167. pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
  1168. pgoff = PSS_SMEM_PGOFF(loff);
  1169. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1170. for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
  1171. i++) {
  1172. fwsig[i] =
  1173. bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
  1174. loff += sizeof(u32);
  1175. }
  1176. }
  1177. /*
  1178. * Returns TRUE if same.
  1179. */
  1180. bfa_boolean_t
  1181. bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
  1182. {
  1183. struct bfi_ioc_image_hdr_s *drv_fwhdr;
  1184. int i;
  1185. drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
  1186. bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
  1187. for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
  1188. if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
  1189. bfa_trc(ioc, i);
  1190. bfa_trc(ioc, fwhdr->md5sum[i]);
  1191. bfa_trc(ioc, drv_fwhdr->md5sum[i]);
  1192. return BFA_FALSE;
  1193. }
  1194. }
  1195. bfa_trc(ioc, fwhdr->md5sum[0]);
  1196. return BFA_TRUE;
  1197. }
  1198. /*
  1199. * Return true if current running version is valid. Firmware signature and
  1200. * execution context (driver/bios) must match.
  1201. */
  1202. static bfa_boolean_t
  1203. bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
  1204. {
  1205. struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
  1206. bfa_ioc_fwver_get(ioc, &fwhdr);
  1207. drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
  1208. bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
  1209. if (fwhdr.signature != drv_fwhdr->signature) {
  1210. bfa_trc(ioc, fwhdr.signature);
  1211. bfa_trc(ioc, drv_fwhdr->signature);
  1212. return BFA_FALSE;
  1213. }
  1214. if (swab32(fwhdr.bootenv) != boot_env) {
  1215. bfa_trc(ioc, fwhdr.bootenv);
  1216. bfa_trc(ioc, boot_env);
  1217. return BFA_FALSE;
  1218. }
  1219. return bfa_ioc_fwver_cmp(ioc, &fwhdr);
  1220. }
  1221. /*
  1222. * Conditionally flush any pending message from firmware at start.
  1223. */
  1224. static void
  1225. bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
  1226. {
  1227. u32 r32;
  1228. r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
  1229. if (r32)
  1230. writel(1, ioc->ioc_regs.lpu_mbox_cmd);
  1231. }
  1232. static void
  1233. bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
  1234. {
  1235. enum bfi_ioc_state ioc_fwstate;
  1236. bfa_boolean_t fwvalid;
  1237. u32 boot_type;
  1238. u32 boot_env;
  1239. ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  1240. if (force)
  1241. ioc_fwstate = BFI_IOC_UNINIT;
  1242. bfa_trc(ioc, ioc_fwstate);
  1243. boot_type = BFI_FWBOOT_TYPE_NORMAL;
  1244. boot_env = BFI_FWBOOT_ENV_OS;
  1245. /*
  1246. * check if firmware is valid
  1247. */
  1248. fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
  1249. BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
  1250. if (!fwvalid) {
  1251. bfa_ioc_boot(ioc, boot_type, boot_env);
  1252. bfa_ioc_poll_fwinit(ioc);
  1253. return;
  1254. }
  1255. /*
  1256. * If hardware initialization is in progress (initialized by other IOC),
  1257. * just wait for an initialization completion interrupt.
  1258. */
  1259. if (ioc_fwstate == BFI_IOC_INITING) {
  1260. bfa_ioc_poll_fwinit(ioc);
  1261. return;
  1262. }
  1263. /*
  1264. * If IOC function is disabled and firmware version is same,
  1265. * just re-enable IOC.
  1266. *
  1267. * If option rom, IOC must not be in operational state. With
  1268. * convergence, IOC will be in operational state when 2nd driver
  1269. * is loaded.
  1270. */
  1271. if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
  1272. /*
  1273. * When using MSI-X any pending firmware ready event should
  1274. * be flushed. Otherwise MSI-X interrupts are not delivered.
  1275. */
  1276. bfa_ioc_msgflush(ioc);
  1277. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
  1278. return;
  1279. }
  1280. /*
  1281. * Initialize the h/w for any other states.
  1282. */
  1283. bfa_ioc_boot(ioc, boot_type, boot_env);
  1284. bfa_ioc_poll_fwinit(ioc);
  1285. }
  1286. static void
  1287. bfa_ioc_timeout(void *ioc_arg)
  1288. {
  1289. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  1290. bfa_trc(ioc, 0);
  1291. bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
  1292. }
  1293. void
  1294. bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
  1295. {
  1296. u32 *msgp = (u32 *) ioc_msg;
  1297. u32 i;
  1298. bfa_trc(ioc, msgp[0]);
  1299. bfa_trc(ioc, len);
  1300. WARN_ON(len > BFI_IOC_MSGLEN_MAX);
  1301. /*
  1302. * first write msg to mailbox registers
  1303. */
  1304. for (i = 0; i < len / sizeof(u32); i++)
  1305. writel(cpu_to_le32(msgp[i]),
  1306. ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
  1307. for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
  1308. writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
  1309. /*
  1310. * write 1 to mailbox CMD to trigger LPU event
  1311. */
  1312. writel(1, ioc->ioc_regs.hfn_mbox_cmd);
  1313. (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
  1314. }
  1315. static void
  1316. bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
  1317. {
  1318. struct bfi_ioc_ctrl_req_s enable_req;
  1319. struct timeval tv;
  1320. bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
  1321. bfa_ioc_portid(ioc));
  1322. enable_req.clscode = cpu_to_be16(ioc->clscode);
  1323. do_gettimeofday(&tv);
  1324. enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
  1325. bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
  1326. }
  1327. static void
  1328. bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
  1329. {
  1330. struct bfi_ioc_ctrl_req_s disable_req;
  1331. bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
  1332. bfa_ioc_portid(ioc));
  1333. bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
  1334. }
  1335. static void
  1336. bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
  1337. {
  1338. struct bfi_ioc_getattr_req_s attr_req;
  1339. bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
  1340. bfa_ioc_portid(ioc));
  1341. bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
  1342. bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
  1343. }
  1344. static void
  1345. bfa_ioc_hb_check(void *cbarg)
  1346. {
  1347. struct bfa_ioc_s *ioc = cbarg;
  1348. u32 hb_count;
  1349. hb_count = readl(ioc->ioc_regs.heartbeat);
  1350. if (ioc->hb_count == hb_count) {
  1351. bfa_ioc_recover(ioc);
  1352. return;
  1353. } else {
  1354. ioc->hb_count = hb_count;
  1355. }
  1356. bfa_ioc_mbox_poll(ioc);
  1357. bfa_hb_timer_start(ioc);
  1358. }
  1359. static void
  1360. bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
  1361. {
  1362. ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
  1363. bfa_hb_timer_start(ioc);
  1364. }
  1365. /*
  1366. * Initiate a full firmware download.
  1367. */
  1368. static void
  1369. bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
  1370. u32 boot_env)
  1371. {
  1372. u32 *fwimg;
  1373. u32 pgnum, pgoff;
  1374. u32 loff = 0;
  1375. u32 chunkno = 0;
  1376. u32 i;
  1377. u32 asicmode;
  1378. /*
  1379. * Initialize LMEM first before code download
  1380. */
  1381. bfa_ioc_lmem_init(ioc);
  1382. bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
  1383. fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
  1384. pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
  1385. pgoff = PSS_SMEM_PGOFF(loff);
  1386. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1387. for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
  1388. if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
  1389. chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
  1390. fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
  1391. BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
  1392. }
  1393. /*
  1394. * write smem
  1395. */
  1396. bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
  1397. fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
  1398. loff += sizeof(u32);
  1399. /*
  1400. * handle page offset wrap around
  1401. */
  1402. loff = PSS_SMEM_PGOFF(loff);
  1403. if (loff == 0) {
  1404. pgnum++;
  1405. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1406. }
  1407. }
  1408. writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
  1409. ioc->ioc_regs.host_page_num_fn);
  1410. /*
  1411. * Set boot type and device mode at the end.
  1412. */
  1413. asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
  1414. ioc->port0_mode, ioc->port1_mode);
  1415. bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
  1416. swab32(asicmode));
  1417. bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
  1418. swab32(boot_type));
  1419. bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
  1420. swab32(boot_env));
  1421. }
  1422. /*
  1423. * Update BFA configuration from firmware configuration.
  1424. */
  1425. static void
  1426. bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
  1427. {
  1428. struct bfi_ioc_attr_s *attr = ioc->attr;
  1429. attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
  1430. attr->card_type = be32_to_cpu(attr->card_type);
  1431. attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
  1432. ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC);
  1433. bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
  1434. }
  1435. /*
  1436. * Attach time initialization of mbox logic.
  1437. */
  1438. static void
  1439. bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
  1440. {
  1441. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1442. int mc;
  1443. INIT_LIST_HEAD(&mod->cmd_q);
  1444. for (mc = 0; mc < BFI_MC_MAX; mc++) {
  1445. mod->mbhdlr[mc].cbfn = NULL;
  1446. mod->mbhdlr[mc].cbarg = ioc->bfa;
  1447. }
  1448. }
  1449. /*
  1450. * Mbox poll timer -- restarts any pending mailbox requests.
  1451. */
  1452. static void
  1453. bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
  1454. {
  1455. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1456. struct bfa_mbox_cmd_s *cmd;
  1457. u32 stat;
  1458. /*
  1459. * If no command pending, do nothing
  1460. */
  1461. if (list_empty(&mod->cmd_q))
  1462. return;
  1463. /*
  1464. * If previous command is not yet fetched by firmware, do nothing
  1465. */
  1466. stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
  1467. if (stat)
  1468. return;
  1469. /*
  1470. * Enqueue command to firmware.
  1471. */
  1472. bfa_q_deq(&mod->cmd_q, &cmd);
  1473. bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
  1474. }
  1475. /*
  1476. * Cleanup any pending requests.
  1477. */
  1478. static void
  1479. bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
  1480. {
  1481. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1482. struct bfa_mbox_cmd_s *cmd;
  1483. while (!list_empty(&mod->cmd_q))
  1484. bfa_q_deq(&mod->cmd_q, &cmd);
  1485. }
  1486. /*
  1487. * Read data from SMEM to host through PCI memmap
  1488. *
  1489. * @param[in] ioc memory for IOC
  1490. * @param[in] tbuf app memory to store data from smem
  1491. * @param[in] soff smem offset
  1492. * @param[in] sz size of smem in bytes
  1493. */
  1494. static bfa_status_t
  1495. bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
  1496. {
  1497. u32 pgnum, loff;
  1498. __be32 r32;
  1499. int i, len;
  1500. u32 *buf = tbuf;
  1501. pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
  1502. loff = PSS_SMEM_PGOFF(soff);
  1503. bfa_trc(ioc, pgnum);
  1504. bfa_trc(ioc, loff);
  1505. bfa_trc(ioc, sz);
  1506. /*
  1507. * Hold semaphore to serialize pll init and fwtrc.
  1508. */
  1509. if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
  1510. bfa_trc(ioc, 0);
  1511. return BFA_STATUS_FAILED;
  1512. }
  1513. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1514. len = sz/sizeof(u32);
  1515. bfa_trc(ioc, len);
  1516. for (i = 0; i < len; i++) {
  1517. r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
  1518. buf[i] = be32_to_cpu(r32);
  1519. loff += sizeof(u32);
  1520. /*
  1521. * handle page offset wrap around
  1522. */
  1523. loff = PSS_SMEM_PGOFF(loff);
  1524. if (loff == 0) {
  1525. pgnum++;
  1526. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1527. }
  1528. }
  1529. writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
  1530. ioc->ioc_regs.host_page_num_fn);
  1531. /*
  1532. * release semaphore.
  1533. */
  1534. readl(ioc->ioc_regs.ioc_init_sem_reg);
  1535. writel(1, ioc->ioc_regs.ioc_init_sem_reg);
  1536. bfa_trc(ioc, pgnum);
  1537. return BFA_STATUS_OK;
  1538. }
  1539. /*
  1540. * Clear SMEM data from host through PCI memmap
  1541. *
  1542. * @param[in] ioc memory for IOC
  1543. * @param[in] soff smem offset
  1544. * @param[in] sz size of smem in bytes
  1545. */
  1546. static bfa_status_t
  1547. bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
  1548. {
  1549. int i, len;
  1550. u32 pgnum, loff;
  1551. pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
  1552. loff = PSS_SMEM_PGOFF(soff);
  1553. bfa_trc(ioc, pgnum);
  1554. bfa_trc(ioc, loff);
  1555. bfa_trc(ioc, sz);
  1556. /*
  1557. * Hold semaphore to serialize pll init and fwtrc.
  1558. */
  1559. if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
  1560. bfa_trc(ioc, 0);
  1561. return BFA_STATUS_FAILED;
  1562. }
  1563. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1564. len = sz/sizeof(u32); /* len in words */
  1565. bfa_trc(ioc, len);
  1566. for (i = 0; i < len; i++) {
  1567. bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
  1568. loff += sizeof(u32);
  1569. /*
  1570. * handle page offset wrap around
  1571. */
  1572. loff = PSS_SMEM_PGOFF(loff);
  1573. if (loff == 0) {
  1574. pgnum++;
  1575. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1576. }
  1577. }
  1578. writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
  1579. ioc->ioc_regs.host_page_num_fn);
  1580. /*
  1581. * release semaphore.
  1582. */
  1583. readl(ioc->ioc_regs.ioc_init_sem_reg);
  1584. writel(1, ioc->ioc_regs.ioc_init_sem_reg);
  1585. bfa_trc(ioc, pgnum);
  1586. return BFA_STATUS_OK;
  1587. }
  1588. static void
  1589. bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
  1590. {
  1591. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  1592. /*
  1593. * Notify driver and common modules registered for notification.
  1594. */
  1595. ioc->cbfn->hbfail_cbfn(ioc->bfa);
  1596. bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
  1597. bfa_ioc_debug_save_ftrc(ioc);
  1598. BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
  1599. "Heart Beat of IOC has failed\n");
  1600. }
  1601. static void
  1602. bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
  1603. {
  1604. struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
  1605. /*
  1606. * Provide enable completion callback.
  1607. */
  1608. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  1609. BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
  1610. "Running firmware version is incompatible "
  1611. "with the driver version\n");
  1612. }
  1613. bfa_status_t
  1614. bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
  1615. {
  1616. /*
  1617. * Hold semaphore so that nobody can access the chip during init.
  1618. */
  1619. bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
  1620. bfa_ioc_pll_init_asic(ioc);
  1621. ioc->pllinit = BFA_TRUE;
  1622. /*
  1623. * release semaphore.
  1624. */
  1625. readl(ioc->ioc_regs.ioc_init_sem_reg);
  1626. writel(1, ioc->ioc_regs.ioc_init_sem_reg);
  1627. return BFA_STATUS_OK;
  1628. }
  1629. /*
  1630. * Interface used by diag module to do firmware boot with memory test
  1631. * as the entry vector.
  1632. */
  1633. void
  1634. bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
  1635. {
  1636. bfa_ioc_stats(ioc, ioc_boots);
  1637. if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
  1638. return;
  1639. /*
  1640. * Initialize IOC state of all functions on a chip reset.
  1641. */
  1642. if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
  1643. writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
  1644. writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
  1645. } else {
  1646. writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
  1647. writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
  1648. }
  1649. bfa_ioc_msgflush(ioc);
  1650. bfa_ioc_download_fw(ioc, boot_type, boot_env);
  1651. bfa_ioc_lpu_start(ioc);
  1652. }
  1653. /*
  1654. * Enable/disable IOC failure auto recovery.
  1655. */
  1656. void
  1657. bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
  1658. {
  1659. bfa_auto_recover = auto_recover;
  1660. }
  1661. bfa_boolean_t
  1662. bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
  1663. {
  1664. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
  1665. }
  1666. bfa_boolean_t
  1667. bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
  1668. {
  1669. u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
  1670. return ((r32 != BFI_IOC_UNINIT) &&
  1671. (r32 != BFI_IOC_INITING) &&
  1672. (r32 != BFI_IOC_MEMTEST));
  1673. }
  1674. bfa_boolean_t
  1675. bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
  1676. {
  1677. __be32 *msgp = mbmsg;
  1678. u32 r32;
  1679. int i;
  1680. r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
  1681. if ((r32 & 1) == 0)
  1682. return BFA_FALSE;
  1683. /*
  1684. * read the MBOX msg
  1685. */
  1686. for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
  1687. i++) {
  1688. r32 = readl(ioc->ioc_regs.lpu_mbox +
  1689. i * sizeof(u32));
  1690. msgp[i] = cpu_to_be32(r32);
  1691. }
  1692. /*
  1693. * turn off mailbox interrupt by clearing mailbox status
  1694. */
  1695. writel(1, ioc->ioc_regs.lpu_mbox_cmd);
  1696. readl(ioc->ioc_regs.lpu_mbox_cmd);
  1697. return BFA_TRUE;
  1698. }
  1699. void
  1700. bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
  1701. {
  1702. union bfi_ioc_i2h_msg_u *msg;
  1703. struct bfa_iocpf_s *iocpf = &ioc->iocpf;
  1704. msg = (union bfi_ioc_i2h_msg_u *) m;
  1705. bfa_ioc_stats(ioc, ioc_isrs);
  1706. switch (msg->mh.msg_id) {
  1707. case BFI_IOC_I2H_HBEAT:
  1708. break;
  1709. case BFI_IOC_I2H_ENABLE_REPLY:
  1710. ioc->port_mode = ioc->port_mode_cfg =
  1711. (enum bfa_mode_s)msg->fw_event.port_mode;
  1712. ioc->ad_cap_bm = msg->fw_event.cap_bm;
  1713. bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
  1714. break;
  1715. case BFI_IOC_I2H_DISABLE_REPLY:
  1716. bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
  1717. break;
  1718. case BFI_IOC_I2H_GETATTR_REPLY:
  1719. bfa_ioc_getattr_reply(ioc);
  1720. break;
  1721. default:
  1722. bfa_trc(ioc, msg->mh.msg_id);
  1723. WARN_ON(1);
  1724. }
  1725. }
  1726. /*
  1727. * IOC attach time initialization and setup.
  1728. *
  1729. * @param[in] ioc memory for IOC
  1730. * @param[in] bfa driver instance structure
  1731. */
  1732. void
  1733. bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
  1734. struct bfa_timer_mod_s *timer_mod)
  1735. {
  1736. ioc->bfa = bfa;
  1737. ioc->cbfn = cbfn;
  1738. ioc->timer_mod = timer_mod;
  1739. ioc->fcmode = BFA_FALSE;
  1740. ioc->pllinit = BFA_FALSE;
  1741. ioc->dbg_fwsave_once = BFA_TRUE;
  1742. ioc->iocpf.ioc = ioc;
  1743. bfa_ioc_mbox_attach(ioc);
  1744. INIT_LIST_HEAD(&ioc->notify_q);
  1745. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  1746. bfa_fsm_send_event(ioc, IOC_E_RESET);
  1747. }
  1748. /*
  1749. * Driver detach time IOC cleanup.
  1750. */
  1751. void
  1752. bfa_ioc_detach(struct bfa_ioc_s *ioc)
  1753. {
  1754. bfa_fsm_send_event(ioc, IOC_E_DETACH);
  1755. }
  1756. /*
  1757. * Setup IOC PCI properties.
  1758. *
  1759. * @param[in] pcidev PCI device information for this IOC
  1760. */
  1761. void
  1762. bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
  1763. enum bfi_pcifn_class clscode)
  1764. {
  1765. ioc->clscode = clscode;
  1766. ioc->pcidev = *pcidev;
  1767. /*
  1768. * Initialize IOC and device personality
  1769. */
  1770. ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
  1771. ioc->asic_mode = BFI_ASIC_MODE_FC;
  1772. switch (pcidev->device_id) {
  1773. case BFA_PCI_DEVICE_ID_FC_8G1P:
  1774. case BFA_PCI_DEVICE_ID_FC_8G2P:
  1775. ioc->asic_gen = BFI_ASIC_GEN_CB;
  1776. ioc->fcmode = BFA_TRUE;
  1777. ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
  1778. ioc->ad_cap_bm = BFA_CM_HBA;
  1779. break;
  1780. case BFA_PCI_DEVICE_ID_CT:
  1781. ioc->asic_gen = BFI_ASIC_GEN_CT;
  1782. ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
  1783. ioc->asic_mode = BFI_ASIC_MODE_ETH;
  1784. ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
  1785. ioc->ad_cap_bm = BFA_CM_CNA;
  1786. break;
  1787. case BFA_PCI_DEVICE_ID_CT_FC:
  1788. ioc->asic_gen = BFI_ASIC_GEN_CT;
  1789. ioc->fcmode = BFA_TRUE;
  1790. ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
  1791. ioc->ad_cap_bm = BFA_CM_HBA;
  1792. break;
  1793. case BFA_PCI_DEVICE_ID_CT2:
  1794. ioc->asic_gen = BFI_ASIC_GEN_CT2;
  1795. if (clscode == BFI_PCIFN_CLASS_FC &&
  1796. pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
  1797. ioc->asic_mode = BFI_ASIC_MODE_FC16;
  1798. ioc->fcmode = BFA_TRUE;
  1799. ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
  1800. ioc->ad_cap_bm = BFA_CM_HBA;
  1801. } else {
  1802. ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
  1803. ioc->asic_mode = BFI_ASIC_MODE_ETH;
  1804. if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
  1805. ioc->port_mode =
  1806. ioc->port_mode_cfg = BFA_MODE_CNA;
  1807. ioc->ad_cap_bm = BFA_CM_CNA;
  1808. } else {
  1809. ioc->port_mode =
  1810. ioc->port_mode_cfg = BFA_MODE_NIC;
  1811. ioc->ad_cap_bm = BFA_CM_NIC;
  1812. }
  1813. }
  1814. break;
  1815. default:
  1816. WARN_ON(1);
  1817. }
  1818. /*
  1819. * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
  1820. */
  1821. if (ioc->asic_gen == BFI_ASIC_GEN_CB)
  1822. bfa_ioc_set_cb_hwif(ioc);
  1823. else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
  1824. bfa_ioc_set_ct_hwif(ioc);
  1825. else {
  1826. WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
  1827. bfa_ioc_set_ct2_hwif(ioc);
  1828. bfa_ioc_ct2_poweron(ioc);
  1829. }
  1830. bfa_ioc_map_port(ioc);
  1831. bfa_ioc_reg_init(ioc);
  1832. }
  1833. /*
  1834. * Initialize IOC dma memory
  1835. *
  1836. * @param[in] dm_kva kernel virtual address of IOC dma memory
  1837. * @param[in] dm_pa physical address of IOC dma memory
  1838. */
  1839. void
  1840. bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
  1841. {
  1842. /*
  1843. * dma memory for firmware attribute
  1844. */
  1845. ioc->attr_dma.kva = dm_kva;
  1846. ioc->attr_dma.pa = dm_pa;
  1847. ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
  1848. }
  1849. void
  1850. bfa_ioc_enable(struct bfa_ioc_s *ioc)
  1851. {
  1852. bfa_ioc_stats(ioc, ioc_enables);
  1853. ioc->dbg_fwsave_once = BFA_TRUE;
  1854. bfa_fsm_send_event(ioc, IOC_E_ENABLE);
  1855. }
  1856. void
  1857. bfa_ioc_disable(struct bfa_ioc_s *ioc)
  1858. {
  1859. bfa_ioc_stats(ioc, ioc_disables);
  1860. bfa_fsm_send_event(ioc, IOC_E_DISABLE);
  1861. }
  1862. /*
  1863. * Initialize memory for saving firmware trace. Driver must initialize
  1864. * trace memory before call bfa_ioc_enable().
  1865. */
  1866. void
  1867. bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
  1868. {
  1869. ioc->dbg_fwsave = dbg_fwsave;
  1870. ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
  1871. }
  1872. /*
  1873. * Register mailbox message handler functions
  1874. *
  1875. * @param[in] ioc IOC instance
  1876. * @param[in] mcfuncs message class handler functions
  1877. */
  1878. void
  1879. bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
  1880. {
  1881. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1882. int mc;
  1883. for (mc = 0; mc < BFI_MC_MAX; mc++)
  1884. mod->mbhdlr[mc].cbfn = mcfuncs[mc];
  1885. }
  1886. /*
  1887. * Register mailbox message handler function, to be called by common modules
  1888. */
  1889. void
  1890. bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
  1891. bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
  1892. {
  1893. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1894. mod->mbhdlr[mc].cbfn = cbfn;
  1895. mod->mbhdlr[mc].cbarg = cbarg;
  1896. }
  1897. /*
  1898. * Queue a mailbox command request to firmware. Waits if mailbox is busy.
  1899. * Responsibility of caller to serialize
  1900. *
  1901. * @param[in] ioc IOC instance
  1902. * @param[i] cmd Mailbox command
  1903. */
  1904. void
  1905. bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
  1906. {
  1907. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1908. u32 stat;
  1909. /*
  1910. * If a previous command is pending, queue new command
  1911. */
  1912. if (!list_empty(&mod->cmd_q)) {
  1913. list_add_tail(&cmd->qe, &mod->cmd_q);
  1914. return;
  1915. }
  1916. /*
  1917. * If mailbox is busy, queue command for poll timer
  1918. */
  1919. stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
  1920. if (stat) {
  1921. list_add_tail(&cmd->qe, &mod->cmd_q);
  1922. return;
  1923. }
  1924. /*
  1925. * mailbox is free -- queue command to firmware
  1926. */
  1927. bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
  1928. }
  1929. /*
  1930. * Handle mailbox interrupts
  1931. */
  1932. void
  1933. bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
  1934. {
  1935. struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
  1936. struct bfi_mbmsg_s m;
  1937. int mc;
  1938. if (bfa_ioc_msgget(ioc, &m)) {
  1939. /*
  1940. * Treat IOC message class as special.
  1941. */
  1942. mc = m.mh.msg_class;
  1943. if (mc == BFI_MC_IOC) {
  1944. bfa_ioc_isr(ioc, &m);
  1945. return;
  1946. }
  1947. if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
  1948. return;
  1949. mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
  1950. }
  1951. bfa_ioc_lpu_read_stat(ioc);
  1952. /*
  1953. * Try to send pending mailbox commands
  1954. */
  1955. bfa_ioc_mbox_poll(ioc);
  1956. }
  1957. void
  1958. bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
  1959. {
  1960. bfa_ioc_stats(ioc, ioc_hbfails);
  1961. ioc->stats.hb_count = ioc->hb_count;
  1962. bfa_fsm_send_event(ioc, IOC_E_HWERROR);
  1963. }
  1964. void
  1965. bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
  1966. {
  1967. ioc->fcmode = BFA_TRUE;
  1968. }
  1969. /*
  1970. * return true if IOC is disabled
  1971. */
  1972. bfa_boolean_t
  1973. bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
  1974. {
  1975. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
  1976. bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
  1977. }
  1978. /*
  1979. * return true if IOC firmware is different.
  1980. */
  1981. bfa_boolean_t
  1982. bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
  1983. {
  1984. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
  1985. bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
  1986. bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
  1987. }
  1988. #define bfa_ioc_state_disabled(__sm) \
  1989. (((__sm) == BFI_IOC_UNINIT) || \
  1990. ((__sm) == BFI_IOC_INITING) || \
  1991. ((__sm) == BFI_IOC_HWINIT) || \
  1992. ((__sm) == BFI_IOC_DISABLED) || \
  1993. ((__sm) == BFI_IOC_FAIL) || \
  1994. ((__sm) == BFI_IOC_CFG_DISABLED))
  1995. /*
  1996. * Check if adapter is disabled -- both IOCs should be in a disabled
  1997. * state.
  1998. */
  1999. bfa_boolean_t
  2000. bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
  2001. {
  2002. u32 ioc_state;
  2003. if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
  2004. return BFA_FALSE;
  2005. ioc_state = readl(ioc->ioc_regs.ioc_fwstate);
  2006. if (!bfa_ioc_state_disabled(ioc_state))
  2007. return BFA_FALSE;
  2008. if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
  2009. ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate);
  2010. if (!bfa_ioc_state_disabled(ioc_state))
  2011. return BFA_FALSE;
  2012. }
  2013. return BFA_TRUE;
  2014. }
  2015. /*
  2016. * Reset IOC fwstate registers.
  2017. */
  2018. void
  2019. bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
  2020. {
  2021. writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
  2022. writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
  2023. }
  2024. #define BFA_MFG_NAME "Brocade"
  2025. void
  2026. bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
  2027. struct bfa_adapter_attr_s *ad_attr)
  2028. {
  2029. struct bfi_ioc_attr_s *ioc_attr;
  2030. ioc_attr = ioc->attr;
  2031. bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
  2032. bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
  2033. bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
  2034. bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
  2035. memcpy(&ad_attr->vpd, &ioc_attr->vpd,
  2036. sizeof(struct bfa_mfg_vpd_s));
  2037. ad_attr->nports = bfa_ioc_get_nports(ioc);
  2038. ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
  2039. bfa_ioc_get_adapter_model(ioc, ad_attr->model);
  2040. /* For now, model descr uses same model string */
  2041. bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
  2042. ad_attr->card_type = ioc_attr->card_type;
  2043. ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
  2044. if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
  2045. ad_attr->prototype = 1;
  2046. else
  2047. ad_attr->prototype = 0;
  2048. ad_attr->pwwn = ioc->attr->pwwn;
  2049. ad_attr->mac = bfa_ioc_get_mac(ioc);
  2050. ad_attr->pcie_gen = ioc_attr->pcie_gen;
  2051. ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
  2052. ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
  2053. ad_attr->asic_rev = ioc_attr->asic_rev;
  2054. bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
  2055. ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
  2056. ad_attr->trunk_capable = (ad_attr->nports > 1) &&
  2057. !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
  2058. }
  2059. enum bfa_ioc_type_e
  2060. bfa_ioc_get_type(struct bfa_ioc_s *ioc)
  2061. {
  2062. if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
  2063. return BFA_IOC_TYPE_LL;
  2064. WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
  2065. return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
  2066. ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
  2067. }
  2068. void
  2069. bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
  2070. {
  2071. memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
  2072. memcpy((void *)serial_num,
  2073. (void *)ioc->attr->brcd_serialnum,
  2074. BFA_ADAPTER_SERIAL_NUM_LEN);
  2075. }
  2076. void
  2077. bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
  2078. {
  2079. memset((void *)fw_ver, 0, BFA_VERSION_LEN);
  2080. memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
  2081. }
  2082. void
  2083. bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
  2084. {
  2085. WARN_ON(!chip_rev);
  2086. memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
  2087. chip_rev[0] = 'R';
  2088. chip_rev[1] = 'e';
  2089. chip_rev[2] = 'v';
  2090. chip_rev[3] = '-';
  2091. chip_rev[4] = ioc->attr->asic_rev;
  2092. chip_rev[5] = '\0';
  2093. }
  2094. void
  2095. bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
  2096. {
  2097. memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
  2098. memcpy(optrom_ver, ioc->attr->optrom_version,
  2099. BFA_VERSION_LEN);
  2100. }
  2101. void
  2102. bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
  2103. {
  2104. memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
  2105. memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
  2106. }
  2107. void
  2108. bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
  2109. {
  2110. struct bfi_ioc_attr_s *ioc_attr;
  2111. WARN_ON(!model);
  2112. memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
  2113. ioc_attr = ioc->attr;
  2114. /*
  2115. * model name
  2116. */
  2117. if (ioc->asic_gen == BFI_ASIC_GEN_CT2) {
  2118. int np = bfa_ioc_get_nports(ioc);
  2119. char c;
  2120. switch (ioc_attr->card_type) {
  2121. case BFA_MFG_TYPE_PROWLER_F:
  2122. case BFA_MFG_TYPE_PROWLER_N:
  2123. case BFA_MFG_TYPE_PROWLER_C:
  2124. snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN,
  2125. "%s-%u-%u",
  2126. BFA_MFG_NAME, ioc_attr->card_type, np);
  2127. break;
  2128. case BFA_MFG_TYPE_PROWLER_D:
  2129. if (ioc_attr->ic == BFA_MFG_IC_FC)
  2130. c = 'F';
  2131. else
  2132. c = 'P';
  2133. snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN,
  2134. "%s-%u-%u%c",
  2135. BFA_MFG_NAME, ioc_attr->card_type, np, c);
  2136. break;
  2137. default:
  2138. break;
  2139. }
  2140. } else
  2141. snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
  2142. BFA_MFG_NAME, ioc_attr->card_type);
  2143. }
  2144. enum bfa_ioc_state
  2145. bfa_ioc_get_state(struct bfa_ioc_s *ioc)
  2146. {
  2147. enum bfa_iocpf_state iocpf_st;
  2148. enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
  2149. if (ioc_st == BFA_IOC_ENABLING ||
  2150. ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
  2151. iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
  2152. switch (iocpf_st) {
  2153. case BFA_IOCPF_SEMWAIT:
  2154. ioc_st = BFA_IOC_SEMWAIT;
  2155. break;
  2156. case BFA_IOCPF_HWINIT:
  2157. ioc_st = BFA_IOC_HWINIT;
  2158. break;
  2159. case BFA_IOCPF_FWMISMATCH:
  2160. ioc_st = BFA_IOC_FWMISMATCH;
  2161. break;
  2162. case BFA_IOCPF_FAIL:
  2163. ioc_st = BFA_IOC_FAIL;
  2164. break;
  2165. case BFA_IOCPF_INITFAIL:
  2166. ioc_st = BFA_IOC_INITFAIL;
  2167. break;
  2168. default:
  2169. break;
  2170. }
  2171. }
  2172. return ioc_st;
  2173. }
  2174. void
  2175. bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
  2176. {
  2177. memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
  2178. ioc_attr->state = bfa_ioc_get_state(ioc);
  2179. ioc_attr->port_id = ioc->port_id;
  2180. ioc_attr->port_mode = ioc->port_mode;
  2181. ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
  2182. ioc_attr->cap_bm = ioc->ad_cap_bm;
  2183. ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
  2184. bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
  2185. ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
  2186. ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
  2187. bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
  2188. }
  2189. mac_t
  2190. bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
  2191. {
  2192. /*
  2193. * Check the IOC type and return the appropriate MAC
  2194. */
  2195. if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
  2196. return ioc->attr->fcoe_mac;
  2197. else
  2198. return ioc->attr->mac;
  2199. }
  2200. mac_t
  2201. bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
  2202. {
  2203. mac_t m;
  2204. m = ioc->attr->mfg_mac;
  2205. if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
  2206. m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
  2207. else
  2208. bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
  2209. bfa_ioc_pcifn(ioc));
  2210. return m;
  2211. }
  2212. bfa_boolean_t
  2213. bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
  2214. {
  2215. return ioc->fcmode || bfa_asic_id_cb(ioc->pcidev.device_id);
  2216. }
  2217. /*
  2218. * Retrieve saved firmware trace from a prior IOC failure.
  2219. */
  2220. bfa_status_t
  2221. bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
  2222. {
  2223. int tlen;
  2224. if (ioc->dbg_fwsave_len == 0)
  2225. return BFA_STATUS_ENOFSAVE;
  2226. tlen = *trclen;
  2227. if (tlen > ioc->dbg_fwsave_len)
  2228. tlen = ioc->dbg_fwsave_len;
  2229. memcpy(trcdata, ioc->dbg_fwsave, tlen);
  2230. *trclen = tlen;
  2231. return BFA_STATUS_OK;
  2232. }
  2233. /*
  2234. * Retrieve saved firmware trace from a prior IOC failure.
  2235. */
  2236. bfa_status_t
  2237. bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
  2238. {
  2239. u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
  2240. int tlen;
  2241. bfa_status_t status;
  2242. bfa_trc(ioc, *trclen);
  2243. tlen = *trclen;
  2244. if (tlen > BFA_DBG_FWTRC_LEN)
  2245. tlen = BFA_DBG_FWTRC_LEN;
  2246. status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
  2247. *trclen = tlen;
  2248. return status;
  2249. }
  2250. static void
  2251. bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
  2252. {
  2253. struct bfa_mbox_cmd_s cmd;
  2254. struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
  2255. bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
  2256. bfa_ioc_portid(ioc));
  2257. req->clscode = cpu_to_be16(ioc->clscode);
  2258. bfa_ioc_mbox_queue(ioc, &cmd);
  2259. }
  2260. static void
  2261. bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
  2262. {
  2263. u32 fwsync_iter = 1000;
  2264. bfa_ioc_send_fwsync(ioc);
  2265. /*
  2266. * After sending a fw sync mbox command wait for it to
  2267. * take effect. We will not wait for a response because
  2268. * 1. fw_sync mbox cmd doesn't have a response.
  2269. * 2. Even if we implement that, interrupts might not
  2270. * be enabled when we call this function.
  2271. * So, just keep checking if any mbox cmd is pending, and
  2272. * after waiting for a reasonable amount of time, go ahead.
  2273. * It is possible that fw has crashed and the mbox command
  2274. * is never acknowledged.
  2275. */
  2276. while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
  2277. fwsync_iter--;
  2278. }
  2279. /*
  2280. * Dump firmware smem
  2281. */
  2282. bfa_status_t
  2283. bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
  2284. u32 *offset, int *buflen)
  2285. {
  2286. u32 loff;
  2287. int dlen;
  2288. bfa_status_t status;
  2289. u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
  2290. if (*offset >= smem_len) {
  2291. *offset = *buflen = 0;
  2292. return BFA_STATUS_EINVAL;
  2293. }
  2294. loff = *offset;
  2295. dlen = *buflen;
  2296. /*
  2297. * First smem read, sync smem before proceeding
  2298. * No need to sync before reading every chunk.
  2299. */
  2300. if (loff == 0)
  2301. bfa_ioc_fwsync(ioc);
  2302. if ((loff + dlen) >= smem_len)
  2303. dlen = smem_len - loff;
  2304. status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
  2305. if (status != BFA_STATUS_OK) {
  2306. *offset = *buflen = 0;
  2307. return status;
  2308. }
  2309. *offset += dlen;
  2310. if (*offset >= smem_len)
  2311. *offset = 0;
  2312. *buflen = dlen;
  2313. return status;
  2314. }
  2315. /*
  2316. * Firmware statistics
  2317. */
  2318. bfa_status_t
  2319. bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
  2320. {
  2321. u32 loff = BFI_IOC_FWSTATS_OFF + \
  2322. BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
  2323. int tlen;
  2324. bfa_status_t status;
  2325. if (ioc->stats_busy) {
  2326. bfa_trc(ioc, ioc->stats_busy);
  2327. return BFA_STATUS_DEVBUSY;
  2328. }
  2329. ioc->stats_busy = BFA_TRUE;
  2330. tlen = sizeof(struct bfa_fw_stats_s);
  2331. status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
  2332. ioc->stats_busy = BFA_FALSE;
  2333. return status;
  2334. }
  2335. bfa_status_t
  2336. bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
  2337. {
  2338. u32 loff = BFI_IOC_FWSTATS_OFF + \
  2339. BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
  2340. int tlen;
  2341. bfa_status_t status;
  2342. if (ioc->stats_busy) {
  2343. bfa_trc(ioc, ioc->stats_busy);
  2344. return BFA_STATUS_DEVBUSY;
  2345. }
  2346. ioc->stats_busy = BFA_TRUE;
  2347. tlen = sizeof(struct bfa_fw_stats_s);
  2348. status = bfa_ioc_smem_clr(ioc, loff, tlen);
  2349. ioc->stats_busy = BFA_FALSE;
  2350. return status;
  2351. }
  2352. /*
  2353. * Save firmware trace if configured.
  2354. */
  2355. static void
  2356. bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
  2357. {
  2358. int tlen;
  2359. if (ioc->dbg_fwsave_once) {
  2360. ioc->dbg_fwsave_once = BFA_FALSE;
  2361. if (ioc->dbg_fwsave_len) {
  2362. tlen = ioc->dbg_fwsave_len;
  2363. bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
  2364. }
  2365. }
  2366. }
  2367. /*
  2368. * Firmware failure detected. Start recovery actions.
  2369. */
  2370. static void
  2371. bfa_ioc_recover(struct bfa_ioc_s *ioc)
  2372. {
  2373. bfa_ioc_stats(ioc, ioc_hbfails);
  2374. ioc->stats.hb_count = ioc->hb_count;
  2375. bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
  2376. }
  2377. static void
  2378. bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
  2379. {
  2380. if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
  2381. return;
  2382. }
  2383. /*
  2384. * BFA IOC PF private functions
  2385. */
  2386. static void
  2387. bfa_iocpf_timeout(void *ioc_arg)
  2388. {
  2389. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  2390. bfa_trc(ioc, 0);
  2391. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
  2392. }
  2393. static void
  2394. bfa_iocpf_sem_timeout(void *ioc_arg)
  2395. {
  2396. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  2397. bfa_ioc_hw_sem_get(ioc);
  2398. }
  2399. static void
  2400. bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
  2401. {
  2402. u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  2403. bfa_trc(ioc, fwstate);
  2404. if (fwstate == BFI_IOC_DISABLED) {
  2405. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
  2406. return;
  2407. }
  2408. if (ioc->iocpf.poll_time >= BFA_IOC_TOV)
  2409. bfa_iocpf_timeout(ioc);
  2410. else {
  2411. ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
  2412. bfa_iocpf_poll_timer_start(ioc);
  2413. }
  2414. }
  2415. static void
  2416. bfa_iocpf_poll_timeout(void *ioc_arg)
  2417. {
  2418. struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
  2419. bfa_ioc_poll_fwinit(ioc);
  2420. }
  2421. /*
  2422. * bfa timer function
  2423. */
  2424. void
  2425. bfa_timer_beat(struct bfa_timer_mod_s *mod)
  2426. {
  2427. struct list_head *qh = &mod->timer_q;
  2428. struct list_head *qe, *qe_next;
  2429. struct bfa_timer_s *elem;
  2430. struct list_head timedout_q;
  2431. INIT_LIST_HEAD(&timedout_q);
  2432. qe = bfa_q_next(qh);
  2433. while (qe != qh) {
  2434. qe_next = bfa_q_next(qe);
  2435. elem = (struct bfa_timer_s *) qe;
  2436. if (elem->timeout <= BFA_TIMER_FREQ) {
  2437. elem->timeout = 0;
  2438. list_del(&elem->qe);
  2439. list_add_tail(&elem->qe, &timedout_q);
  2440. } else {
  2441. elem->timeout -= BFA_TIMER_FREQ;
  2442. }
  2443. qe = qe_next; /* go to next elem */
  2444. }
  2445. /*
  2446. * Pop all the timeout entries
  2447. */
  2448. while (!list_empty(&timedout_q)) {
  2449. bfa_q_deq(&timedout_q, &elem);
  2450. elem->timercb(elem->arg);
  2451. }
  2452. }
  2453. /*
  2454. * Should be called with lock protection
  2455. */
  2456. void
  2457. bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
  2458. void (*timercb) (void *), void *arg, unsigned int timeout)
  2459. {
  2460. WARN_ON(timercb == NULL);
  2461. WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
  2462. timer->timeout = timeout;
  2463. timer->timercb = timercb;
  2464. timer->arg = arg;
  2465. list_add_tail(&timer->qe, &mod->timer_q);
  2466. }
  2467. /*
  2468. * Should be called with lock protection
  2469. */
  2470. void
  2471. bfa_timer_stop(struct bfa_timer_s *timer)
  2472. {
  2473. WARN_ON(list_empty(&timer->qe));
  2474. list_del(&timer->qe);
  2475. }
  2476. /*
  2477. * ASIC block related
  2478. */
  2479. static void
  2480. bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
  2481. {
  2482. struct bfa_ablk_cfg_inst_s *cfg_inst;
  2483. int i, j;
  2484. u16 be16;
  2485. u32 be32;
  2486. for (i = 0; i < BFA_ABLK_MAX; i++) {
  2487. cfg_inst = &cfg->inst[i];
  2488. for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
  2489. be16 = cfg_inst->pf_cfg[j].pers;
  2490. cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
  2491. be16 = cfg_inst->pf_cfg[j].num_qpairs;
  2492. cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
  2493. be16 = cfg_inst->pf_cfg[j].num_vectors;
  2494. cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
  2495. be32 = cfg_inst->pf_cfg[j].bw;
  2496. cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32);
  2497. }
  2498. }
  2499. }
  2500. static void
  2501. bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
  2502. {
  2503. struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
  2504. struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
  2505. bfa_ablk_cbfn_t cbfn;
  2506. WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
  2507. bfa_trc(ablk->ioc, msg->mh.msg_id);
  2508. switch (msg->mh.msg_id) {
  2509. case BFI_ABLK_I2H_QUERY:
  2510. if (rsp->status == BFA_STATUS_OK) {
  2511. memcpy(ablk->cfg, ablk->dma_addr.kva,
  2512. sizeof(struct bfa_ablk_cfg_s));
  2513. bfa_ablk_config_swap(ablk->cfg);
  2514. ablk->cfg = NULL;
  2515. }
  2516. break;
  2517. case BFI_ABLK_I2H_ADPT_CONFIG:
  2518. case BFI_ABLK_I2H_PORT_CONFIG:
  2519. /* update config port mode */
  2520. ablk->ioc->port_mode_cfg = rsp->port_mode;
  2521. case BFI_ABLK_I2H_PF_DELETE:
  2522. case BFI_ABLK_I2H_PF_UPDATE:
  2523. case BFI_ABLK_I2H_OPTROM_ENABLE:
  2524. case BFI_ABLK_I2H_OPTROM_DISABLE:
  2525. /* No-op */
  2526. break;
  2527. case BFI_ABLK_I2H_PF_CREATE:
  2528. *(ablk->pcifn) = rsp->pcifn;
  2529. ablk->pcifn = NULL;
  2530. break;
  2531. default:
  2532. WARN_ON(1);
  2533. }
  2534. ablk->busy = BFA_FALSE;
  2535. if (ablk->cbfn) {
  2536. cbfn = ablk->cbfn;
  2537. ablk->cbfn = NULL;
  2538. cbfn(ablk->cbarg, rsp->status);
  2539. }
  2540. }
  2541. static void
  2542. bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
  2543. {
  2544. struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
  2545. bfa_trc(ablk->ioc, event);
  2546. switch (event) {
  2547. case BFA_IOC_E_ENABLED:
  2548. WARN_ON(ablk->busy != BFA_FALSE);
  2549. break;
  2550. case BFA_IOC_E_DISABLED:
  2551. case BFA_IOC_E_FAILED:
  2552. /* Fail any pending requests */
  2553. ablk->pcifn = NULL;
  2554. if (ablk->busy) {
  2555. if (ablk->cbfn)
  2556. ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
  2557. ablk->cbfn = NULL;
  2558. ablk->busy = BFA_FALSE;
  2559. }
  2560. break;
  2561. default:
  2562. WARN_ON(1);
  2563. break;
  2564. }
  2565. }
  2566. u32
  2567. bfa_ablk_meminfo(void)
  2568. {
  2569. return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
  2570. }
  2571. void
  2572. bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
  2573. {
  2574. ablk->dma_addr.kva = dma_kva;
  2575. ablk->dma_addr.pa = dma_pa;
  2576. }
  2577. void
  2578. bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
  2579. {
  2580. ablk->ioc = ioc;
  2581. bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
  2582. bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
  2583. list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
  2584. }
  2585. bfa_status_t
  2586. bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
  2587. bfa_ablk_cbfn_t cbfn, void *cbarg)
  2588. {
  2589. struct bfi_ablk_h2i_query_s *m;
  2590. WARN_ON(!ablk_cfg);
  2591. if (!bfa_ioc_is_operational(ablk->ioc)) {
  2592. bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
  2593. return BFA_STATUS_IOC_FAILURE;
  2594. }
  2595. if (ablk->busy) {
  2596. bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
  2597. return BFA_STATUS_DEVBUSY;
  2598. }
  2599. ablk->cfg = ablk_cfg;
  2600. ablk->cbfn = cbfn;
  2601. ablk->cbarg = cbarg;
  2602. ablk->busy = BFA_TRUE;
  2603. m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
  2604. bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
  2605. bfa_ioc_portid(ablk->ioc));
  2606. bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
  2607. bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
  2608. return BFA_STATUS_OK;
  2609. }
  2610. bfa_status_t
  2611. bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
  2612. u8 port, enum bfi_pcifn_class personality, int bw,
  2613. bfa_ablk_cbfn_t cbfn, void *cbarg)
  2614. {
  2615. struct bfi_ablk_h2i_pf_req_s *m;
  2616. if (!bfa_ioc_is_operational(ablk->ioc)) {
  2617. bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
  2618. return BFA_STATUS_IOC_FAILURE;
  2619. }
  2620. if (ablk->busy) {
  2621. bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
  2622. return BFA_STATUS_DEVBUSY;
  2623. }
  2624. ablk->pcifn = pcifn;
  2625. ablk->cbfn = cbfn;
  2626. ablk->cbarg = cbarg;
  2627. ablk->busy = BFA_TRUE;
  2628. m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
  2629. bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
  2630. bfa_ioc_portid(ablk->ioc));
  2631. m->pers = cpu_to_be16((u16)personality);
  2632. m->bw = cpu_to_be32(bw);
  2633. m->port = port;
  2634. bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
  2635. return BFA_STATUS_OK;
  2636. }
  2637. bfa_status_t
  2638. bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
  2639. bfa_ablk_cbfn_t cbfn, void *cbarg)
  2640. {
  2641. struct bfi_ablk_h2i_pf_req_s *m;
  2642. if (!bfa_ioc_is_operational(ablk->ioc)) {
  2643. bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
  2644. return BFA_STATUS_IOC_FAILURE;
  2645. }
  2646. if (ablk->busy) {
  2647. bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
  2648. return BFA_STATUS_DEVBUSY;
  2649. }
  2650. ablk->cbfn = cbfn;
  2651. ablk->cbarg = cbarg;
  2652. ablk->busy = BFA_TRUE;
  2653. m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
  2654. bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
  2655. bfa_ioc_portid(ablk->ioc));
  2656. m->pcifn = (u8)pcifn;
  2657. bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
  2658. return BFA_STATUS_OK;
  2659. }
  2660. bfa_status_t
  2661. bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
  2662. int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
  2663. {
  2664. struct bfi_ablk_h2i_cfg_req_s *m;
  2665. if (!bfa_ioc_is_operational(ablk->ioc)) {
  2666. bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
  2667. return BFA_STATUS_IOC_FAILURE;
  2668. }
  2669. if (ablk->busy) {
  2670. bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
  2671. return BFA_STATUS_DEVBUSY;
  2672. }
  2673. ablk->cbfn = cbfn;
  2674. ablk->cbarg = cbarg;
  2675. ablk->busy = BFA_TRUE;
  2676. m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
  2677. bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
  2678. bfa_ioc_portid(ablk->ioc));
  2679. m->mode = (u8)mode;
  2680. m->max_pf = (u8)max_pf;
  2681. m->max_vf = (u8)max_vf;
  2682. bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
  2683. return BFA_STATUS_OK;
  2684. }
  2685. bfa_status_t
  2686. bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
  2687. int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
  2688. {
  2689. struct bfi_ablk_h2i_cfg_req_s *m;
  2690. if (!bfa_ioc_is_operational(ablk->ioc)) {
  2691. bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
  2692. return BFA_STATUS_IOC_FAILURE;
  2693. }
  2694. if (ablk->busy) {
  2695. bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
  2696. return BFA_STATUS_DEVBUSY;
  2697. }
  2698. ablk->cbfn = cbfn;
  2699. ablk->cbarg = cbarg;
  2700. ablk->busy = BFA_TRUE;
  2701. m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
  2702. bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
  2703. bfa_ioc_portid(ablk->ioc));
  2704. m->port = (u8)port;
  2705. m->mode = (u8)mode;
  2706. m->max_pf = (u8)max_pf;
  2707. m->max_vf = (u8)max_vf;
  2708. bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
  2709. return BFA_STATUS_OK;
  2710. }
  2711. bfa_status_t
  2712. bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
  2713. bfa_ablk_cbfn_t cbfn, void *cbarg)
  2714. {
  2715. struct bfi_ablk_h2i_pf_req_s *m;
  2716. if (!bfa_ioc_is_operational(ablk->ioc)) {
  2717. bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
  2718. return BFA_STATUS_IOC_FAILURE;
  2719. }
  2720. if (ablk->busy) {
  2721. bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
  2722. return BFA_STATUS_DEVBUSY;
  2723. }
  2724. ablk->cbfn = cbfn;
  2725. ablk->cbarg = cbarg;
  2726. ablk->busy = BFA_TRUE;
  2727. m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
  2728. bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
  2729. bfa_ioc_portid(ablk->ioc));
  2730. m->pcifn = (u8)pcifn;
  2731. m->bw = cpu_to_be32(bw);
  2732. bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
  2733. return BFA_STATUS_OK;
  2734. }
  2735. bfa_status_t
  2736. bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
  2737. {
  2738. struct bfi_ablk_h2i_optrom_s *m;
  2739. if (!bfa_ioc_is_operational(ablk->ioc)) {
  2740. bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
  2741. return BFA_STATUS_IOC_FAILURE;
  2742. }
  2743. if (ablk->busy) {
  2744. bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
  2745. return BFA_STATUS_DEVBUSY;
  2746. }
  2747. ablk->cbfn = cbfn;
  2748. ablk->cbarg = cbarg;
  2749. ablk->busy = BFA_TRUE;
  2750. m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
  2751. bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
  2752. bfa_ioc_portid(ablk->ioc));
  2753. bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
  2754. return BFA_STATUS_OK;
  2755. }
  2756. bfa_status_t
  2757. bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
  2758. {
  2759. struct bfi_ablk_h2i_optrom_s *m;
  2760. if (!bfa_ioc_is_operational(ablk->ioc)) {
  2761. bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
  2762. return BFA_STATUS_IOC_FAILURE;
  2763. }
  2764. if (ablk->busy) {
  2765. bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
  2766. return BFA_STATUS_DEVBUSY;
  2767. }
  2768. ablk->cbfn = cbfn;
  2769. ablk->cbarg = cbarg;
  2770. ablk->busy = BFA_TRUE;
  2771. m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
  2772. bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
  2773. bfa_ioc_portid(ablk->ioc));
  2774. bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
  2775. return BFA_STATUS_OK;
  2776. }