bfa_ioc.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447
  1. /*
  2. * Linux network driver for Brocade Converged Network Adapter.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU General Public License (GPL) Version 2 as
  6. * published by the Free Software Foundation
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. /*
  14. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  15. * All rights reserved
  16. * www.brocade.com
  17. */
  18. #include "bfa_ioc.h"
  19. #include "cna.h"
  20. #include "bfi.h"
  21. #include "bfi_reg.h"
  22. #include "bfa_defs.h"
  23. /**
  24. * IOC local definitions
  25. */
  26. /**
  27. * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
  28. */
  29. #define bfa_ioc_firmware_lock(__ioc) \
  30. ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
  31. #define bfa_ioc_firmware_unlock(__ioc) \
  32. ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
  33. #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
  34. #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
  35. #define bfa_ioc_notify_fail(__ioc) \
  36. ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
  37. #define bfa_ioc_sync_start(__ioc) \
  38. ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
  39. #define bfa_ioc_sync_join(__ioc) \
  40. ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
  41. #define bfa_ioc_sync_leave(__ioc) \
  42. ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
  43. #define bfa_ioc_sync_ack(__ioc) \
  44. ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
  45. #define bfa_ioc_sync_complete(__ioc) \
  46. ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
  47. #define bfa_ioc_mbox_cmd_pending(__ioc) \
  48. (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
  49. readl((__ioc)->ioc_regs.hfn_mbox_cmd))
  50. static bool bfa_nw_auto_recover = true;
  51. /*
  52. * forward declarations
  53. */
  54. static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
  55. static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
  56. static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
  57. static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
  58. static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc);
  59. static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
  60. static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
  61. static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
  62. static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
  63. static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
  64. static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
  65. static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
  66. static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
  67. static void bfa_ioc_recover(struct bfa_ioc *ioc);
  68. static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
  69. static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
  70. static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
  71. static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
  72. static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
  73. static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
  74. static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
  75. static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
  76. static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
  77. static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
  78. static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
  79. u32 boot_param);
  80. static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
  81. static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
  82. char *serial_num);
  83. static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
  84. char *fw_ver);
  85. static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
  86. char *chip_rev);
  87. static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
  88. char *optrom_ver);
  89. static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
  90. char *manufacturer);
  91. static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
  92. static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
  93. /**
  94. * IOC state machine definitions/declarations
  95. */
  96. enum ioc_event {
  97. IOC_E_RESET = 1, /*!< IOC reset request */
  98. IOC_E_ENABLE = 2, /*!< IOC enable request */
  99. IOC_E_DISABLE = 3, /*!< IOC disable request */
  100. IOC_E_DETACH = 4, /*!< driver detach cleanup */
  101. IOC_E_ENABLED = 5, /*!< f/w enabled */
  102. IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */
  103. IOC_E_DISABLED = 7, /*!< f/w disabled */
  104. IOC_E_PFFAILED = 8, /*!< failure notice by iocpf sm */
  105. IOC_E_HBFAIL = 9, /*!< heartbeat failure */
  106. IOC_E_HWERROR = 10, /*!< hardware error interrupt */
  107. IOC_E_TIMEOUT = 11, /*!< timeout */
  108. IOC_E_HWFAILED = 12, /*!< PCI mapping failure notice */
  109. };
  110. bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
  111. bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
  112. bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
  113. bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
  114. bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
  115. bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
  116. bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
  117. bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
  118. bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
  119. bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
  120. static struct bfa_sm_table ioc_sm_table[] = {
  121. {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
  122. {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
  123. {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
  124. {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
  125. {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
  126. {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
  127. {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
  128. {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
  129. {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
  130. {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
  131. };
  132. /**
  133. * IOCPF state machine definitions/declarations
  134. */
  135. /*
  136. * Forward declareations for iocpf state machine
  137. */
  138. static void bfa_iocpf_enable(struct bfa_ioc *ioc);
  139. static void bfa_iocpf_disable(struct bfa_ioc *ioc);
  140. static void bfa_iocpf_fail(struct bfa_ioc *ioc);
  141. static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
  142. static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
  143. static void bfa_iocpf_stop(struct bfa_ioc *ioc);
  144. /**
  145. * IOCPF state machine events
  146. */
  147. enum iocpf_event {
  148. IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
  149. IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
  150. IOCPF_E_STOP = 3, /*!< stop on driver detach */
  151. IOCPF_E_FWREADY = 4, /*!< f/w initialization done */
  152. IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */
  153. IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */
  154. IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */
  155. IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */
  156. IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */
  157. IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
  158. IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */
  159. IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */
  160. };
  161. /**
  162. * IOCPF states
  163. */
  164. enum bfa_iocpf_state {
  165. BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */
  166. BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
  167. BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */
  168. BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */
  169. BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */
  170. BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */
  171. BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */
  172. BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */
  173. BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */
  174. };
  175. bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
  176. bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
  177. bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
  178. bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
  179. bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
  180. bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
  181. bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
  182. bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
  183. enum iocpf_event);
  184. bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
  185. bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
  186. bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
  187. bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
  188. bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
  189. enum iocpf_event);
  190. bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
  191. static struct bfa_sm_table iocpf_sm_table[] = {
  192. {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
  193. {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
  194. {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
  195. {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
  196. {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
  197. {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
  198. {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
  199. {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
  200. {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
  201. {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
  202. {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
  203. {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
  204. {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
  205. {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
  206. };
  207. /**
  208. * IOC State Machine
  209. */
  210. /**
  211. * Beginning state. IOC uninit state.
  212. */
  213. static void
  214. bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
  215. {
  216. }
  217. /**
  218. * IOC is in uninit state.
  219. */
  220. static void
  221. bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
  222. {
  223. switch (event) {
  224. case IOC_E_RESET:
  225. bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
  226. break;
  227. default:
  228. bfa_sm_fault(event);
  229. }
  230. }
  231. /**
  232. * Reset entry actions -- initialize state machine
  233. */
  234. static void
  235. bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
  236. {
  237. bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
  238. }
  239. /**
  240. * IOC is in reset state.
  241. */
  242. static void
  243. bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
  244. {
  245. switch (event) {
  246. case IOC_E_ENABLE:
  247. bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
  248. break;
  249. case IOC_E_DISABLE:
  250. bfa_ioc_disable_comp(ioc);
  251. break;
  252. case IOC_E_DETACH:
  253. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  254. break;
  255. default:
  256. bfa_sm_fault(event);
  257. }
  258. }
  259. static void
  260. bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
  261. {
  262. bfa_iocpf_enable(ioc);
  263. }
  264. /**
  265. * Host IOC function is being enabled, awaiting response from firmware.
  266. * Semaphore is acquired.
  267. */
  268. static void
  269. bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
  270. {
  271. switch (event) {
  272. case IOC_E_ENABLED:
  273. bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
  274. break;
  275. case IOC_E_PFFAILED:
  276. /* !!! fall through !!! */
  277. case IOC_E_HWERROR:
  278. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  279. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  280. if (event != IOC_E_PFFAILED)
  281. bfa_iocpf_initfail(ioc);
  282. break;
  283. case IOC_E_HWFAILED:
  284. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  285. bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
  286. break;
  287. case IOC_E_DISABLE:
  288. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  289. break;
  290. case IOC_E_DETACH:
  291. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  292. bfa_iocpf_stop(ioc);
  293. break;
  294. case IOC_E_ENABLE:
  295. break;
  296. default:
  297. bfa_sm_fault(event);
  298. }
  299. }
  300. /**
  301. * Semaphore should be acquired for version check.
  302. */
  303. static void
  304. bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
  305. {
  306. mod_timer(&ioc->ioc_timer, jiffies +
  307. msecs_to_jiffies(BFA_IOC_TOV));
  308. bfa_ioc_send_getattr(ioc);
  309. }
  310. /**
  311. * IOC configuration in progress. Timer is active.
  312. */
  313. static void
  314. bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
  315. {
  316. switch (event) {
  317. case IOC_E_FWRSP_GETATTR:
  318. del_timer(&ioc->ioc_timer);
  319. bfa_ioc_check_attr_wwns(ioc);
  320. bfa_ioc_hb_monitor(ioc);
  321. bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
  322. break;
  323. case IOC_E_PFFAILED:
  324. case IOC_E_HWERROR:
  325. del_timer(&ioc->ioc_timer);
  326. /* fall through */
  327. case IOC_E_TIMEOUT:
  328. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  329. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  330. if (event != IOC_E_PFFAILED)
  331. bfa_iocpf_getattrfail(ioc);
  332. break;
  333. case IOC_E_DISABLE:
  334. del_timer(&ioc->ioc_timer);
  335. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  336. break;
  337. case IOC_E_ENABLE:
  338. break;
  339. default:
  340. bfa_sm_fault(event);
  341. }
  342. }
  343. static void
  344. bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
  345. {
  346. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
  347. bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
  348. }
  349. static void
  350. bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
  351. {
  352. switch (event) {
  353. case IOC_E_ENABLE:
  354. break;
  355. case IOC_E_DISABLE:
  356. bfa_ioc_hb_stop(ioc);
  357. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  358. break;
  359. case IOC_E_PFFAILED:
  360. case IOC_E_HWERROR:
  361. bfa_ioc_hb_stop(ioc);
  362. /* !!! fall through !!! */
  363. case IOC_E_HBFAIL:
  364. if (ioc->iocpf.auto_recover)
  365. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
  366. else
  367. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  368. bfa_ioc_fail_notify(ioc);
  369. if (event != IOC_E_PFFAILED)
  370. bfa_iocpf_fail(ioc);
  371. break;
  372. default:
  373. bfa_sm_fault(event);
  374. }
  375. }
  376. static void
  377. bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
  378. {
  379. bfa_iocpf_disable(ioc);
  380. }
  381. /**
  382. * IOC is being desabled
  383. */
  384. static void
  385. bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
  386. {
  387. switch (event) {
  388. case IOC_E_DISABLED:
  389. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
  390. break;
  391. case IOC_E_HWERROR:
  392. /*
  393. * No state change. Will move to disabled state
  394. * after iocpf sm completes failure processing and
  395. * moves to disabled state.
  396. */
  397. bfa_iocpf_fail(ioc);
  398. break;
  399. case IOC_E_HWFAILED:
  400. bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
  401. bfa_ioc_disable_comp(ioc);
  402. break;
  403. default:
  404. bfa_sm_fault(event);
  405. }
  406. }
  407. /**
  408. * IOC desable completion entry.
  409. */
  410. static void
  411. bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
  412. {
  413. bfa_ioc_disable_comp(ioc);
  414. }
  415. static void
  416. bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
  417. {
  418. switch (event) {
  419. case IOC_E_ENABLE:
  420. bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
  421. break;
  422. case IOC_E_DISABLE:
  423. ioc->cbfn->disable_cbfn(ioc->bfa);
  424. break;
  425. case IOC_E_DETACH:
  426. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  427. bfa_iocpf_stop(ioc);
  428. break;
  429. default:
  430. bfa_sm_fault(event);
  431. }
  432. }
  433. static void
  434. bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
  435. {
  436. }
  437. /**
  438. * Hardware initialization retry.
  439. */
  440. static void
  441. bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
  442. {
  443. switch (event) {
  444. case IOC_E_ENABLED:
  445. bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
  446. break;
  447. case IOC_E_PFFAILED:
  448. case IOC_E_HWERROR:
  449. /**
  450. * Initialization retry failed.
  451. */
  452. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  453. bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
  454. if (event != IOC_E_PFFAILED)
  455. bfa_iocpf_initfail(ioc);
  456. break;
  457. case IOC_E_HWFAILED:
  458. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  459. bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
  460. break;
  461. case IOC_E_ENABLE:
  462. break;
  463. case IOC_E_DISABLE:
  464. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  465. break;
  466. case IOC_E_DETACH:
  467. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  468. bfa_iocpf_stop(ioc);
  469. break;
  470. default:
  471. bfa_sm_fault(event);
  472. }
  473. }
  474. static void
  475. bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
  476. {
  477. }
  478. /**
  479. * IOC failure.
  480. */
  481. static void
  482. bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
  483. {
  484. switch (event) {
  485. case IOC_E_ENABLE:
  486. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  487. break;
  488. case IOC_E_DISABLE:
  489. bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
  490. break;
  491. case IOC_E_DETACH:
  492. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  493. bfa_iocpf_stop(ioc);
  494. break;
  495. case IOC_E_HWERROR:
  496. /* HB failure notification, ignore. */
  497. break;
  498. default:
  499. bfa_sm_fault(event);
  500. }
  501. }
  502. static void
  503. bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
  504. {
  505. }
  506. /**
  507. * IOC failure.
  508. */
  509. static void
  510. bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
  511. {
  512. switch (event) {
  513. case IOC_E_ENABLE:
  514. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  515. break;
  516. case IOC_E_DISABLE:
  517. ioc->cbfn->disable_cbfn(ioc->bfa);
  518. break;
  519. case IOC_E_DETACH:
  520. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  521. break;
  522. default:
  523. bfa_sm_fault(event);
  524. }
  525. }
  526. /**
  527. * IOCPF State Machine
  528. */
  529. /**
  530. * Reset entry actions -- initialize state machine
  531. */
  532. static void
  533. bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
  534. {
  535. iocpf->fw_mismatch_notified = false;
  536. iocpf->auto_recover = bfa_nw_auto_recover;
  537. }
  538. /**
  539. * Beginning state. IOC is in reset state.
  540. */
  541. static void
  542. bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
  543. {
  544. switch (event) {
  545. case IOCPF_E_ENABLE:
  546. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
  547. break;
  548. case IOCPF_E_STOP:
  549. break;
  550. default:
  551. bfa_sm_fault(event);
  552. }
  553. }
  554. /**
  555. * Semaphore should be acquired for version check.
  556. */
  557. static void
  558. bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
  559. {
  560. bfa_ioc_hw_sem_init(iocpf->ioc);
  561. bfa_ioc_hw_sem_get(iocpf->ioc);
  562. }
  563. /**
  564. * Awaiting h/w semaphore to continue with version check.
  565. */
  566. static void
  567. bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
  568. {
  569. struct bfa_ioc *ioc = iocpf->ioc;
  570. switch (event) {
  571. case IOCPF_E_SEMLOCKED:
  572. if (bfa_ioc_firmware_lock(ioc)) {
  573. if (bfa_ioc_sync_start(ioc)) {
  574. bfa_ioc_sync_join(ioc);
  575. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  576. } else {
  577. bfa_ioc_firmware_unlock(ioc);
  578. bfa_nw_ioc_hw_sem_release(ioc);
  579. mod_timer(&ioc->sem_timer, jiffies +
  580. msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
  581. }
  582. } else {
  583. bfa_nw_ioc_hw_sem_release(ioc);
  584. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
  585. }
  586. break;
  587. case IOCPF_E_SEM_ERROR:
  588. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  589. bfa_ioc_pf_hwfailed(ioc);
  590. break;
  591. case IOCPF_E_DISABLE:
  592. bfa_ioc_hw_sem_get_cancel(ioc);
  593. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  594. bfa_ioc_pf_disabled(ioc);
  595. break;
  596. case IOCPF_E_STOP:
  597. bfa_ioc_hw_sem_get_cancel(ioc);
  598. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  599. break;
  600. default:
  601. bfa_sm_fault(event);
  602. }
  603. }
  604. /**
  605. * Notify enable completion callback
  606. */
  607. static void
  608. bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
  609. {
  610. /* Call only the first time sm enters fwmismatch state. */
  611. if (iocpf->fw_mismatch_notified == false)
  612. bfa_ioc_pf_fwmismatch(iocpf->ioc);
  613. iocpf->fw_mismatch_notified = true;
  614. mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
  615. msecs_to_jiffies(BFA_IOC_TOV));
  616. }
  617. /**
  618. * Awaiting firmware version match.
  619. */
  620. static void
  621. bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
  622. {
  623. struct bfa_ioc *ioc = iocpf->ioc;
  624. switch (event) {
  625. case IOCPF_E_TIMEOUT:
  626. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
  627. break;
  628. case IOCPF_E_DISABLE:
  629. del_timer(&ioc->iocpf_timer);
  630. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  631. bfa_ioc_pf_disabled(ioc);
  632. break;
  633. case IOCPF_E_STOP:
  634. del_timer(&ioc->iocpf_timer);
  635. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  636. break;
  637. default:
  638. bfa_sm_fault(event);
  639. }
  640. }
  641. /**
  642. * Request for semaphore.
  643. */
  644. static void
  645. bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
  646. {
  647. bfa_ioc_hw_sem_get(iocpf->ioc);
  648. }
  649. /**
  650. * Awaiting semaphore for h/w initialzation.
  651. */
  652. static void
  653. bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
  654. {
  655. struct bfa_ioc *ioc = iocpf->ioc;
  656. switch (event) {
  657. case IOCPF_E_SEMLOCKED:
  658. if (bfa_ioc_sync_complete(ioc)) {
  659. bfa_ioc_sync_join(ioc);
  660. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  661. } else {
  662. bfa_nw_ioc_hw_sem_release(ioc);
  663. mod_timer(&ioc->sem_timer, jiffies +
  664. msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
  665. }
  666. break;
  667. case IOCPF_E_SEM_ERROR:
  668. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  669. bfa_ioc_pf_hwfailed(ioc);
  670. break;
  671. case IOCPF_E_DISABLE:
  672. bfa_ioc_hw_sem_get_cancel(ioc);
  673. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  674. break;
  675. default:
  676. bfa_sm_fault(event);
  677. }
  678. }
  679. static void
  680. bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
  681. {
  682. iocpf->poll_time = 0;
  683. bfa_ioc_reset(iocpf->ioc, 0);
  684. }
  685. /**
  686. * Hardware is being initialized. Interrupts are enabled.
  687. * Holding hardware semaphore lock.
  688. */
  689. static void
  690. bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
  691. {
  692. struct bfa_ioc *ioc = iocpf->ioc;
  693. switch (event) {
  694. case IOCPF_E_FWREADY:
  695. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
  696. break;
  697. case IOCPF_E_TIMEOUT:
  698. bfa_nw_ioc_hw_sem_release(ioc);
  699. bfa_ioc_pf_failed(ioc);
  700. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
  701. break;
  702. case IOCPF_E_DISABLE:
  703. del_timer(&ioc->iocpf_timer);
  704. bfa_ioc_sync_leave(ioc);
  705. bfa_nw_ioc_hw_sem_release(ioc);
  706. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  707. break;
  708. default:
  709. bfa_sm_fault(event);
  710. }
  711. }
  712. static void
  713. bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
  714. {
  715. mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
  716. msecs_to_jiffies(BFA_IOC_TOV));
  717. /**
  718. * Enable Interrupts before sending fw IOC ENABLE cmd.
  719. */
  720. iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
  721. bfa_ioc_send_enable(iocpf->ioc);
  722. }
  723. /**
  724. * Host IOC function is being enabled, awaiting response from firmware.
  725. * Semaphore is acquired.
  726. */
  727. static void
  728. bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
  729. {
  730. struct bfa_ioc *ioc = iocpf->ioc;
  731. switch (event) {
  732. case IOCPF_E_FWRSP_ENABLE:
  733. del_timer(&ioc->iocpf_timer);
  734. bfa_nw_ioc_hw_sem_release(ioc);
  735. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
  736. break;
  737. case IOCPF_E_INITFAIL:
  738. del_timer(&ioc->iocpf_timer);
  739. /*
  740. * !!! fall through !!!
  741. */
  742. case IOCPF_E_TIMEOUT:
  743. bfa_nw_ioc_hw_sem_release(ioc);
  744. if (event == IOCPF_E_TIMEOUT)
  745. bfa_ioc_pf_failed(ioc);
  746. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
  747. break;
  748. case IOCPF_E_DISABLE:
  749. del_timer(&ioc->iocpf_timer);
  750. bfa_nw_ioc_hw_sem_release(ioc);
  751. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
  752. break;
  753. default:
  754. bfa_sm_fault(event);
  755. }
  756. }
  757. static void
  758. bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
  759. {
  760. bfa_ioc_pf_enabled(iocpf->ioc);
  761. }
  762. static void
  763. bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
  764. {
  765. switch (event) {
  766. case IOCPF_E_DISABLE:
  767. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
  768. break;
  769. case IOCPF_E_GETATTRFAIL:
  770. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
  771. break;
  772. case IOCPF_E_FAIL:
  773. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
  774. break;
  775. default:
  776. bfa_sm_fault(event);
  777. }
  778. }
  779. static void
  780. bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
  781. {
  782. mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
  783. msecs_to_jiffies(BFA_IOC_TOV));
  784. bfa_ioc_send_disable(iocpf->ioc);
  785. }
  786. /**
  787. * IOC is being disabled
  788. */
  789. static void
  790. bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
  791. {
  792. struct bfa_ioc *ioc = iocpf->ioc;
  793. switch (event) {
  794. case IOCPF_E_FWRSP_DISABLE:
  795. del_timer(&ioc->iocpf_timer);
  796. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  797. break;
  798. case IOCPF_E_FAIL:
  799. del_timer(&ioc->iocpf_timer);
  800. /*
  801. * !!! fall through !!!
  802. */
  803. case IOCPF_E_TIMEOUT:
  804. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  805. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  806. break;
  807. case IOCPF_E_FWRSP_ENABLE:
  808. break;
  809. default:
  810. bfa_sm_fault(event);
  811. }
  812. }
  813. static void
  814. bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
  815. {
  816. bfa_ioc_hw_sem_get(iocpf->ioc);
  817. }
  818. /**
  819. * IOC hb ack request is being removed.
  820. */
  821. static void
  822. bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
  823. {
  824. struct bfa_ioc *ioc = iocpf->ioc;
  825. switch (event) {
  826. case IOCPF_E_SEMLOCKED:
  827. bfa_ioc_sync_leave(ioc);
  828. bfa_nw_ioc_hw_sem_release(ioc);
  829. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  830. break;
  831. case IOCPF_E_SEM_ERROR:
  832. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  833. bfa_ioc_pf_hwfailed(ioc);
  834. break;
  835. case IOCPF_E_FAIL:
  836. break;
  837. default:
  838. bfa_sm_fault(event);
  839. }
  840. }
  841. /**
  842. * IOC disable completion entry.
  843. */
  844. static void
  845. bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
  846. {
  847. bfa_ioc_mbox_flush(iocpf->ioc);
  848. bfa_ioc_pf_disabled(iocpf->ioc);
  849. }
  850. static void
  851. bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
  852. {
  853. struct bfa_ioc *ioc = iocpf->ioc;
  854. switch (event) {
  855. case IOCPF_E_ENABLE:
  856. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  857. break;
  858. case IOCPF_E_STOP:
  859. bfa_ioc_firmware_unlock(ioc);
  860. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  861. break;
  862. default:
  863. bfa_sm_fault(event);
  864. }
  865. }
  866. static void
  867. bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
  868. {
  869. bfa_ioc_hw_sem_get(iocpf->ioc);
  870. }
  871. /**
  872. * Hardware initialization failed.
  873. */
  874. static void
  875. bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
  876. {
  877. struct bfa_ioc *ioc = iocpf->ioc;
  878. switch (event) {
  879. case IOCPF_E_SEMLOCKED:
  880. bfa_ioc_notify_fail(ioc);
  881. bfa_ioc_sync_leave(ioc);
  882. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  883. bfa_nw_ioc_hw_sem_release(ioc);
  884. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
  885. break;
  886. case IOCPF_E_SEM_ERROR:
  887. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  888. bfa_ioc_pf_hwfailed(ioc);
  889. break;
  890. case IOCPF_E_DISABLE:
  891. bfa_ioc_hw_sem_get_cancel(ioc);
  892. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  893. break;
  894. case IOCPF_E_STOP:
  895. bfa_ioc_hw_sem_get_cancel(ioc);
  896. bfa_ioc_firmware_unlock(ioc);
  897. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  898. break;
  899. case IOCPF_E_FAIL:
  900. break;
  901. default:
  902. bfa_sm_fault(event);
  903. }
  904. }
  905. static void
  906. bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
  907. {
  908. }
  909. /**
  910. * Hardware initialization failed.
  911. */
  912. static void
  913. bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
  914. {
  915. struct bfa_ioc *ioc = iocpf->ioc;
  916. switch (event) {
  917. case IOCPF_E_DISABLE:
  918. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  919. break;
  920. case IOCPF_E_STOP:
  921. bfa_ioc_firmware_unlock(ioc);
  922. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
  923. break;
  924. default:
  925. bfa_sm_fault(event);
  926. }
  927. }
  928. static void
  929. bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
  930. {
  931. /**
  932. * Mark IOC as failed in hardware and stop firmware.
  933. */
  934. bfa_ioc_lpu_stop(iocpf->ioc);
  935. /**
  936. * Flush any queued up mailbox requests.
  937. */
  938. bfa_ioc_mbox_flush(iocpf->ioc);
  939. bfa_ioc_hw_sem_get(iocpf->ioc);
  940. }
  941. /**
  942. * IOC is in failed state.
  943. */
  944. static void
  945. bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
  946. {
  947. struct bfa_ioc *ioc = iocpf->ioc;
  948. switch (event) {
  949. case IOCPF_E_SEMLOCKED:
  950. bfa_ioc_sync_ack(ioc);
  951. bfa_ioc_notify_fail(ioc);
  952. if (!iocpf->auto_recover) {
  953. bfa_ioc_sync_leave(ioc);
  954. writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
  955. bfa_nw_ioc_hw_sem_release(ioc);
  956. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  957. } else {
  958. if (bfa_ioc_sync_complete(ioc))
  959. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
  960. else {
  961. bfa_nw_ioc_hw_sem_release(ioc);
  962. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
  963. }
  964. }
  965. break;
  966. case IOCPF_E_SEM_ERROR:
  967. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
  968. bfa_ioc_pf_hwfailed(ioc);
  969. break;
  970. case IOCPF_E_DISABLE:
  971. bfa_ioc_hw_sem_get_cancel(ioc);
  972. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
  973. break;
  974. case IOCPF_E_FAIL:
  975. break;
  976. default:
  977. bfa_sm_fault(event);
  978. }
  979. }
  980. static void
  981. bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
  982. {
  983. }
  984. /**
  985. * @brief
  986. * IOC is in failed state.
  987. */
  988. static void
  989. bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
  990. {
  991. switch (event) {
  992. case IOCPF_E_DISABLE:
  993. bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
  994. break;
  995. default:
  996. bfa_sm_fault(event);
  997. }
  998. }
  999. /**
  1000. * BFA IOC private functions
  1001. */
  1002. /**
  1003. * Notify common modules registered for notification.
  1004. */
  1005. static void
  1006. bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
  1007. {
  1008. struct bfa_ioc_notify *notify;
  1009. struct list_head *qe;
  1010. list_for_each(qe, &ioc->notify_q) {
  1011. notify = (struct bfa_ioc_notify *)qe;
  1012. notify->cbfn(notify->cbarg, event);
  1013. }
  1014. }
  1015. static void
  1016. bfa_ioc_disable_comp(struct bfa_ioc *ioc)
  1017. {
  1018. ioc->cbfn->disable_cbfn(ioc->bfa);
  1019. bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
  1020. }
  1021. bool
  1022. bfa_nw_ioc_sem_get(void __iomem *sem_reg)
  1023. {
  1024. u32 r32;
  1025. int cnt = 0;
  1026. #define BFA_SEM_SPINCNT 3000
  1027. r32 = readl(sem_reg);
  1028. while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
  1029. cnt++;
  1030. udelay(2);
  1031. r32 = readl(sem_reg);
  1032. }
  1033. if (!(r32 & 1))
  1034. return true;
  1035. return false;
  1036. }
  1037. void
  1038. bfa_nw_ioc_sem_release(void __iomem *sem_reg)
  1039. {
  1040. readl(sem_reg);
  1041. writel(1, sem_reg);
  1042. }
  1043. static void
  1044. bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
  1045. {
  1046. struct bfi_ioc_image_hdr fwhdr;
  1047. u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  1048. if (fwstate == BFI_IOC_UNINIT)
  1049. return;
  1050. bfa_nw_ioc_fwver_get(ioc, &fwhdr);
  1051. if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
  1052. return;
  1053. writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
  1054. /*
  1055. * Try to lock and then unlock the semaphore.
  1056. */
  1057. readl(ioc->ioc_regs.ioc_sem_reg);
  1058. writel(1, ioc->ioc_regs.ioc_sem_reg);
  1059. }
  1060. static void
  1061. bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
  1062. {
  1063. u32 r32;
  1064. /**
  1065. * First read to the semaphore register will return 0, subsequent reads
  1066. * will return 1. Semaphore is released by writing 1 to the register
  1067. */
  1068. r32 = readl(ioc->ioc_regs.ioc_sem_reg);
  1069. if (r32 == ~0) {
  1070. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
  1071. return;
  1072. }
  1073. if (!(r32 & 1)) {
  1074. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
  1075. return;
  1076. }
  1077. mod_timer(&ioc->sem_timer, jiffies +
  1078. msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
  1079. }
  1080. void
  1081. bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
  1082. {
  1083. writel(1, ioc->ioc_regs.ioc_sem_reg);
  1084. }
  1085. static void
  1086. bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
  1087. {
  1088. del_timer(&ioc->sem_timer);
  1089. }
  1090. /**
  1091. * @brief
  1092. * Initialize LPU local memory (aka secondary memory / SRAM)
  1093. */
  1094. static void
  1095. bfa_ioc_lmem_init(struct bfa_ioc *ioc)
  1096. {
  1097. u32 pss_ctl;
  1098. int i;
  1099. #define PSS_LMEM_INIT_TIME 10000
  1100. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  1101. pss_ctl &= ~__PSS_LMEM_RESET;
  1102. pss_ctl |= __PSS_LMEM_INIT_EN;
  1103. /*
  1104. * i2c workaround 12.5khz clock
  1105. */
  1106. pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
  1107. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1108. /**
  1109. * wait for memory initialization to be complete
  1110. */
  1111. i = 0;
  1112. do {
  1113. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  1114. i++;
  1115. } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
  1116. /**
  1117. * If memory initialization is not successful, IOC timeout will catch
  1118. * such failures.
  1119. */
  1120. BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
  1121. pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
  1122. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1123. }
  1124. static void
  1125. bfa_ioc_lpu_start(struct bfa_ioc *ioc)
  1126. {
  1127. u32 pss_ctl;
  1128. /**
  1129. * Take processor out of reset.
  1130. */
  1131. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  1132. pss_ctl &= ~__PSS_LPU0_RESET;
  1133. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1134. }
  1135. static void
  1136. bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
  1137. {
  1138. u32 pss_ctl;
  1139. /**
  1140. * Put processors in reset.
  1141. */
  1142. pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
  1143. pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
  1144. writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
  1145. }
  1146. /**
  1147. * Get driver and firmware versions.
  1148. */
  1149. void
  1150. bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
  1151. {
  1152. u32 pgnum;
  1153. u32 loff = 0;
  1154. int i;
  1155. u32 *fwsig = (u32 *) fwhdr;
  1156. pgnum = bfa_ioc_smem_pgnum(ioc, loff);
  1157. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1158. for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
  1159. i++) {
  1160. fwsig[i] =
  1161. swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
  1162. loff += sizeof(u32);
  1163. }
  1164. }
  1165. /**
  1166. * Returns TRUE if same.
  1167. */
  1168. bool
  1169. bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
  1170. {
  1171. struct bfi_ioc_image_hdr *drv_fwhdr;
  1172. int i;
  1173. drv_fwhdr = (struct bfi_ioc_image_hdr *)
  1174. bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
  1175. for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
  1176. if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
  1177. return false;
  1178. }
  1179. return true;
  1180. }
  1181. /**
  1182. * Return true if current running version is valid. Firmware signature and
  1183. * execution context (driver/bios) must match.
  1184. */
  1185. static bool
  1186. bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
  1187. {
  1188. struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
  1189. bfa_nw_ioc_fwver_get(ioc, &fwhdr);
  1190. drv_fwhdr = (struct bfi_ioc_image_hdr *)
  1191. bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
  1192. if (fwhdr.signature != drv_fwhdr->signature)
  1193. return false;
  1194. if (swab32(fwhdr.bootenv) != boot_env)
  1195. return false;
  1196. return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
  1197. }
  1198. /**
  1199. * Conditionally flush any pending message from firmware at start.
  1200. */
  1201. static void
  1202. bfa_ioc_msgflush(struct bfa_ioc *ioc)
  1203. {
  1204. u32 r32;
  1205. r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
  1206. if (r32)
  1207. writel(1, ioc->ioc_regs.lpu_mbox_cmd);
  1208. }
  1209. /**
  1210. * @img ioc_init_logic.jpg
  1211. */
  1212. static void
  1213. bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
  1214. {
  1215. enum bfi_ioc_state ioc_fwstate;
  1216. bool fwvalid;
  1217. u32 boot_env;
  1218. ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  1219. if (force)
  1220. ioc_fwstate = BFI_IOC_UNINIT;
  1221. boot_env = BFI_FWBOOT_ENV_OS;
  1222. /**
  1223. * check if firmware is valid
  1224. */
  1225. fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
  1226. false : bfa_ioc_fwver_valid(ioc, boot_env);
  1227. if (!fwvalid) {
  1228. bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
  1229. bfa_ioc_poll_fwinit(ioc);
  1230. return;
  1231. }
  1232. /**
  1233. * If hardware initialization is in progress (initialized by other IOC),
  1234. * just wait for an initialization completion interrupt.
  1235. */
  1236. if (ioc_fwstate == BFI_IOC_INITING) {
  1237. bfa_ioc_poll_fwinit(ioc);
  1238. return;
  1239. }
  1240. /**
  1241. * If IOC function is disabled and firmware version is same,
  1242. * just re-enable IOC.
  1243. */
  1244. if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
  1245. /**
  1246. * When using MSI-X any pending firmware ready event should
  1247. * be flushed. Otherwise MSI-X interrupts are not delivered.
  1248. */
  1249. bfa_ioc_msgflush(ioc);
  1250. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
  1251. return;
  1252. }
  1253. /**
  1254. * Initialize the h/w for any other states.
  1255. */
  1256. bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env);
  1257. bfa_ioc_poll_fwinit(ioc);
  1258. }
  1259. void
  1260. bfa_nw_ioc_timeout(void *ioc_arg)
  1261. {
  1262. struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
  1263. bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
  1264. }
  1265. static void
  1266. bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
  1267. {
  1268. u32 *msgp = (u32 *) ioc_msg;
  1269. u32 i;
  1270. BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
  1271. /*
  1272. * first write msg to mailbox registers
  1273. */
  1274. for (i = 0; i < len / sizeof(u32); i++)
  1275. writel(cpu_to_le32(msgp[i]),
  1276. ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
  1277. for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
  1278. writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
  1279. /*
  1280. * write 1 to mailbox CMD to trigger LPU event
  1281. */
  1282. writel(1, ioc->ioc_regs.hfn_mbox_cmd);
  1283. (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
  1284. }
  1285. static void
  1286. bfa_ioc_send_enable(struct bfa_ioc *ioc)
  1287. {
  1288. struct bfi_ioc_ctrl_req enable_req;
  1289. struct timeval tv;
  1290. bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
  1291. bfa_ioc_portid(ioc));
  1292. enable_req.clscode = htons(ioc->clscode);
  1293. do_gettimeofday(&tv);
  1294. enable_req.tv_sec = ntohl(tv.tv_sec);
  1295. bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
  1296. }
  1297. static void
  1298. bfa_ioc_send_disable(struct bfa_ioc *ioc)
  1299. {
  1300. struct bfi_ioc_ctrl_req disable_req;
  1301. bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
  1302. bfa_ioc_portid(ioc));
  1303. bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
  1304. }
  1305. static void
  1306. bfa_ioc_send_getattr(struct bfa_ioc *ioc)
  1307. {
  1308. struct bfi_ioc_getattr_req attr_req;
  1309. bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
  1310. bfa_ioc_portid(ioc));
  1311. bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
  1312. bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
  1313. }
  1314. void
  1315. bfa_nw_ioc_hb_check(void *cbarg)
  1316. {
  1317. struct bfa_ioc *ioc = cbarg;
  1318. u32 hb_count;
  1319. hb_count = readl(ioc->ioc_regs.heartbeat);
  1320. if (ioc->hb_count == hb_count) {
  1321. bfa_ioc_recover(ioc);
  1322. return;
  1323. } else {
  1324. ioc->hb_count = hb_count;
  1325. }
  1326. bfa_ioc_mbox_poll(ioc);
  1327. mod_timer(&ioc->hb_timer, jiffies +
  1328. msecs_to_jiffies(BFA_IOC_HB_TOV));
  1329. }
  1330. static void
  1331. bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
  1332. {
  1333. ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
  1334. mod_timer(&ioc->hb_timer, jiffies +
  1335. msecs_to_jiffies(BFA_IOC_HB_TOV));
  1336. }
  1337. static void
  1338. bfa_ioc_hb_stop(struct bfa_ioc *ioc)
  1339. {
  1340. del_timer(&ioc->hb_timer);
  1341. }
  1342. /**
  1343. * @brief
  1344. * Initiate a full firmware download.
  1345. */
  1346. static void
  1347. bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
  1348. u32 boot_env)
  1349. {
  1350. u32 *fwimg;
  1351. u32 pgnum;
  1352. u32 loff = 0;
  1353. u32 chunkno = 0;
  1354. u32 i;
  1355. u32 asicmode;
  1356. /**
  1357. * Initialize LMEM first before code download
  1358. */
  1359. bfa_ioc_lmem_init(ioc);
  1360. fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
  1361. pgnum = bfa_ioc_smem_pgnum(ioc, loff);
  1362. writel(pgnum, ioc->ioc_regs.host_page_num_fn);
  1363. for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
  1364. if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
  1365. chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
  1366. fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
  1367. BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
  1368. }
  1369. /**
  1370. * write smem
  1371. */
  1372. writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
  1373. ((ioc->ioc_regs.smem_page_start) + (loff)));
  1374. loff += sizeof(u32);
  1375. /**
  1376. * handle page offset wrap around
  1377. */
  1378. loff = PSS_SMEM_PGOFF(loff);
  1379. if (loff == 0) {
  1380. pgnum++;
  1381. writel(pgnum,
  1382. ioc->ioc_regs.host_page_num_fn);
  1383. }
  1384. }
  1385. writel(bfa_ioc_smem_pgnum(ioc, 0),
  1386. ioc->ioc_regs.host_page_num_fn);
  1387. /*
  1388. * Set boot type, env and device mode at the end.
  1389. */
  1390. asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
  1391. ioc->port0_mode, ioc->port1_mode);
  1392. writel(asicmode, ((ioc->ioc_regs.smem_page_start)
  1393. + BFI_FWBOOT_DEVMODE_OFF));
  1394. writel(boot_type, ((ioc->ioc_regs.smem_page_start)
  1395. + (BFI_FWBOOT_TYPE_OFF)));
  1396. writel(boot_env, ((ioc->ioc_regs.smem_page_start)
  1397. + (BFI_FWBOOT_ENV_OFF)));
  1398. }
  1399. static void
  1400. bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
  1401. {
  1402. bfa_ioc_hwinit(ioc, force);
  1403. }
  1404. /**
  1405. * BFA ioc enable reply by firmware
  1406. */
  1407. static void
  1408. bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
  1409. u8 cap_bm)
  1410. {
  1411. struct bfa_iocpf *iocpf = &ioc->iocpf;
  1412. ioc->port_mode = ioc->port_mode_cfg = port_mode;
  1413. ioc->ad_cap_bm = cap_bm;
  1414. bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
  1415. }
  1416. /**
  1417. * @brief
  1418. * Update BFA configuration from firmware configuration.
  1419. */
  1420. static void
  1421. bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
  1422. {
  1423. struct bfi_ioc_attr *attr = ioc->attr;
  1424. attr->adapter_prop = ntohl(attr->adapter_prop);
  1425. attr->card_type = ntohl(attr->card_type);
  1426. attr->maxfrsize = ntohs(attr->maxfrsize);
  1427. bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
  1428. }
  1429. /**
  1430. * Attach time initialization of mbox logic.
  1431. */
  1432. static void
  1433. bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
  1434. {
  1435. struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
  1436. int mc;
  1437. INIT_LIST_HEAD(&mod->cmd_q);
  1438. for (mc = 0; mc < BFI_MC_MAX; mc++) {
  1439. mod->mbhdlr[mc].cbfn = NULL;
  1440. mod->mbhdlr[mc].cbarg = ioc->bfa;
  1441. }
  1442. }
  1443. /**
  1444. * Mbox poll timer -- restarts any pending mailbox requests.
  1445. */
  1446. static void
  1447. bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
  1448. {
  1449. struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
  1450. struct bfa_mbox_cmd *cmd;
  1451. bfa_mbox_cmd_cbfn_t cbfn;
  1452. void *cbarg;
  1453. u32 stat;
  1454. /**
  1455. * If no command pending, do nothing
  1456. */
  1457. if (list_empty(&mod->cmd_q))
  1458. return;
  1459. /**
  1460. * If previous command is not yet fetched by firmware, do nothing
  1461. */
  1462. stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
  1463. if (stat)
  1464. return;
  1465. /**
  1466. * Enqueue command to firmware.
  1467. */
  1468. bfa_q_deq(&mod->cmd_q, &cmd);
  1469. bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
  1470. /**
  1471. * Give a callback to the client, indicating that the command is sent
  1472. */
  1473. if (cmd->cbfn) {
  1474. cbfn = cmd->cbfn;
  1475. cbarg = cmd->cbarg;
  1476. cmd->cbfn = NULL;
  1477. cbfn(cbarg);
  1478. }
  1479. }
  1480. /**
  1481. * Cleanup any pending requests.
  1482. */
  1483. static void
  1484. bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
  1485. {
  1486. struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
  1487. struct bfa_mbox_cmd *cmd;
  1488. while (!list_empty(&mod->cmd_q))
  1489. bfa_q_deq(&mod->cmd_q, &cmd);
  1490. }
  1491. static void
  1492. bfa_ioc_fail_notify(struct bfa_ioc *ioc)
  1493. {
  1494. /**
  1495. * Notify driver and common modules registered for notification.
  1496. */
  1497. ioc->cbfn->hbfail_cbfn(ioc->bfa);
  1498. bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
  1499. }
  1500. static void
  1501. bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
  1502. {
  1503. bfa_fsm_send_event(ioc, IOC_E_ENABLED);
  1504. }
  1505. static void
  1506. bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
  1507. {
  1508. bfa_fsm_send_event(ioc, IOC_E_DISABLED);
  1509. }
  1510. static void
  1511. bfa_ioc_pf_failed(struct bfa_ioc *ioc)
  1512. {
  1513. bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
  1514. }
  1515. static void
  1516. bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc)
  1517. {
  1518. bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
  1519. }
  1520. static void
  1521. bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
  1522. {
  1523. /**
  1524. * Provide enable completion callback and AEN notification.
  1525. */
  1526. ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
  1527. }
  1528. /**
  1529. * IOC public
  1530. */
  1531. static enum bfa_status
  1532. bfa_ioc_pll_init(struct bfa_ioc *ioc)
  1533. {
  1534. /*
  1535. * Hold semaphore so that nobody can access the chip during init.
  1536. */
  1537. bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
  1538. bfa_ioc_pll_init_asic(ioc);
  1539. ioc->pllinit = true;
  1540. /*
  1541. * release semaphore.
  1542. */
  1543. bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
  1544. return BFA_STATUS_OK;
  1545. }
  1546. /**
  1547. * Interface used by diag module to do firmware boot with memory test
  1548. * as the entry vector.
  1549. */
  1550. static void
  1551. bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
  1552. u32 boot_env)
  1553. {
  1554. bfa_ioc_stats(ioc, ioc_boots);
  1555. if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
  1556. return;
  1557. /**
  1558. * Initialize IOC state of all functions on a chip reset.
  1559. */
  1560. if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
  1561. writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
  1562. writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
  1563. } else {
  1564. writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
  1565. writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
  1566. }
  1567. bfa_ioc_msgflush(ioc);
  1568. bfa_ioc_download_fw(ioc, boot_type, boot_env);
  1569. bfa_ioc_lpu_start(ioc);
  1570. }
  1571. /**
  1572. * Enable/disable IOC failure auto recovery.
  1573. */
  1574. void
  1575. bfa_nw_ioc_auto_recover(bool auto_recover)
  1576. {
  1577. bfa_nw_auto_recover = auto_recover;
  1578. }
  1579. static bool
  1580. bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
  1581. {
  1582. u32 *msgp = mbmsg;
  1583. u32 r32;
  1584. int i;
  1585. r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
  1586. if ((r32 & 1) == 0)
  1587. return false;
  1588. /**
  1589. * read the MBOX msg
  1590. */
  1591. for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
  1592. i++) {
  1593. r32 = readl(ioc->ioc_regs.lpu_mbox +
  1594. i * sizeof(u32));
  1595. msgp[i] = htonl(r32);
  1596. }
  1597. /**
  1598. * turn off mailbox interrupt by clearing mailbox status
  1599. */
  1600. writel(1, ioc->ioc_regs.lpu_mbox_cmd);
  1601. readl(ioc->ioc_regs.lpu_mbox_cmd);
  1602. return true;
  1603. }
  1604. static void
  1605. bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
  1606. {
  1607. union bfi_ioc_i2h_msg_u *msg;
  1608. struct bfa_iocpf *iocpf = &ioc->iocpf;
  1609. msg = (union bfi_ioc_i2h_msg_u *) m;
  1610. bfa_ioc_stats(ioc, ioc_isrs);
  1611. switch (msg->mh.msg_id) {
  1612. case BFI_IOC_I2H_HBEAT:
  1613. break;
  1614. case BFI_IOC_I2H_ENABLE_REPLY:
  1615. bfa_ioc_enable_reply(ioc,
  1616. (enum bfa_mode)msg->fw_event.port_mode,
  1617. msg->fw_event.cap_bm);
  1618. break;
  1619. case BFI_IOC_I2H_DISABLE_REPLY:
  1620. bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
  1621. break;
  1622. case BFI_IOC_I2H_GETATTR_REPLY:
  1623. bfa_ioc_getattr_reply(ioc);
  1624. break;
  1625. default:
  1626. BUG_ON(1);
  1627. }
  1628. }
  1629. /**
  1630. * IOC attach time initialization and setup.
  1631. *
  1632. * @param[in] ioc memory for IOC
  1633. * @param[in] bfa driver instance structure
  1634. */
  1635. void
  1636. bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
  1637. {
  1638. ioc->bfa = bfa;
  1639. ioc->cbfn = cbfn;
  1640. ioc->fcmode = false;
  1641. ioc->pllinit = false;
  1642. ioc->dbg_fwsave_once = true;
  1643. ioc->iocpf.ioc = ioc;
  1644. bfa_ioc_mbox_attach(ioc);
  1645. INIT_LIST_HEAD(&ioc->notify_q);
  1646. bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
  1647. bfa_fsm_send_event(ioc, IOC_E_RESET);
  1648. }
  1649. /**
  1650. * Driver detach time IOC cleanup.
  1651. */
  1652. void
  1653. bfa_nw_ioc_detach(struct bfa_ioc *ioc)
  1654. {
  1655. bfa_fsm_send_event(ioc, IOC_E_DETACH);
  1656. /* Done with detach, empty the notify_q. */
  1657. INIT_LIST_HEAD(&ioc->notify_q);
  1658. }
  1659. /**
  1660. * Setup IOC PCI properties.
  1661. *
  1662. * @param[in] pcidev PCI device information for this IOC
  1663. */
  1664. void
  1665. bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
  1666. enum bfi_pcifn_class clscode)
  1667. {
  1668. ioc->clscode = clscode;
  1669. ioc->pcidev = *pcidev;
  1670. /**
  1671. * Initialize IOC and device personality
  1672. */
  1673. ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
  1674. ioc->asic_mode = BFI_ASIC_MODE_FC;
  1675. switch (pcidev->device_id) {
  1676. case PCI_DEVICE_ID_BROCADE_CT:
  1677. ioc->asic_gen = BFI_ASIC_GEN_CT;
  1678. ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
  1679. ioc->asic_mode = BFI_ASIC_MODE_ETH;
  1680. ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
  1681. ioc->ad_cap_bm = BFA_CM_CNA;
  1682. break;
  1683. default:
  1684. BUG_ON(1);
  1685. }
  1686. bfa_nw_ioc_set_ct_hwif(ioc);
  1687. bfa_ioc_map_port(ioc);
  1688. bfa_ioc_reg_init(ioc);
  1689. }
  1690. /**
  1691. * Initialize IOC dma memory
  1692. *
  1693. * @param[in] dm_kva kernel virtual address of IOC dma memory
  1694. * @param[in] dm_pa physical address of IOC dma memory
  1695. */
  1696. void
  1697. bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
  1698. {
  1699. /**
  1700. * dma memory for firmware attribute
  1701. */
  1702. ioc->attr_dma.kva = dm_kva;
  1703. ioc->attr_dma.pa = dm_pa;
  1704. ioc->attr = (struct bfi_ioc_attr *) dm_kva;
  1705. }
  1706. /**
  1707. * Return size of dma memory required.
  1708. */
  1709. u32
  1710. bfa_nw_ioc_meminfo(void)
  1711. {
  1712. return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
  1713. }
  1714. void
  1715. bfa_nw_ioc_enable(struct bfa_ioc *ioc)
  1716. {
  1717. bfa_ioc_stats(ioc, ioc_enables);
  1718. ioc->dbg_fwsave_once = true;
  1719. bfa_fsm_send_event(ioc, IOC_E_ENABLE);
  1720. }
  1721. void
  1722. bfa_nw_ioc_disable(struct bfa_ioc *ioc)
  1723. {
  1724. bfa_ioc_stats(ioc, ioc_disables);
  1725. bfa_fsm_send_event(ioc, IOC_E_DISABLE);
  1726. }
  1727. static u32
  1728. bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
  1729. {
  1730. return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
  1731. }
  1732. /**
  1733. * Register mailbox message handler function, to be called by common modules
  1734. */
  1735. void
  1736. bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
  1737. bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
  1738. {
  1739. struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
  1740. mod->mbhdlr[mc].cbfn = cbfn;
  1741. mod->mbhdlr[mc].cbarg = cbarg;
  1742. }
  1743. /**
  1744. * Queue a mailbox command request to firmware. Waits if mailbox is busy.
  1745. * Responsibility of caller to serialize
  1746. *
  1747. * @param[in] ioc IOC instance
  1748. * @param[i] cmd Mailbox command
  1749. */
  1750. bool
  1751. bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
  1752. bfa_mbox_cmd_cbfn_t cbfn, void *cbarg)
  1753. {
  1754. struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
  1755. u32 stat;
  1756. cmd->cbfn = cbfn;
  1757. cmd->cbarg = cbarg;
  1758. /**
  1759. * If a previous command is pending, queue new command
  1760. */
  1761. if (!list_empty(&mod->cmd_q)) {
  1762. list_add_tail(&cmd->qe, &mod->cmd_q);
  1763. return true;
  1764. }
  1765. /**
  1766. * If mailbox is busy, queue command for poll timer
  1767. */
  1768. stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
  1769. if (stat) {
  1770. list_add_tail(&cmd->qe, &mod->cmd_q);
  1771. return true;
  1772. }
  1773. /**
  1774. * mailbox is free -- queue command to firmware
  1775. */
  1776. bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
  1777. return false;
  1778. }
  1779. /**
  1780. * Handle mailbox interrupts
  1781. */
  1782. void
  1783. bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
  1784. {
  1785. struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
  1786. struct bfi_mbmsg m;
  1787. int mc;
  1788. if (bfa_ioc_msgget(ioc, &m)) {
  1789. /**
  1790. * Treat IOC message class as special.
  1791. */
  1792. mc = m.mh.msg_class;
  1793. if (mc == BFI_MC_IOC) {
  1794. bfa_ioc_isr(ioc, &m);
  1795. return;
  1796. }
  1797. if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
  1798. return;
  1799. mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
  1800. }
  1801. bfa_ioc_lpu_read_stat(ioc);
  1802. /**
  1803. * Try to send pending mailbox commands
  1804. */
  1805. bfa_ioc_mbox_poll(ioc);
  1806. }
  1807. void
  1808. bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
  1809. {
  1810. bfa_ioc_stats(ioc, ioc_hbfails);
  1811. bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
  1812. bfa_fsm_send_event(ioc, IOC_E_HWERROR);
  1813. }
  1814. /**
  1815. * return true if IOC is disabled
  1816. */
  1817. bool
  1818. bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
  1819. {
  1820. return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
  1821. bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
  1822. }
  1823. /**
  1824. * Add to IOC heartbeat failure notification queue. To be used by common
  1825. * modules such as cee, port, diag.
  1826. */
  1827. void
  1828. bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
  1829. struct bfa_ioc_notify *notify)
  1830. {
  1831. list_add_tail(&notify->qe, &ioc->notify_q);
  1832. }
  1833. #define BFA_MFG_NAME "Brocade"
  1834. static void
  1835. bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
  1836. struct bfa_adapter_attr *ad_attr)
  1837. {
  1838. struct bfi_ioc_attr *ioc_attr;
  1839. ioc_attr = ioc->attr;
  1840. bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
  1841. bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
  1842. bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
  1843. bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
  1844. memcpy(&ad_attr->vpd, &ioc_attr->vpd,
  1845. sizeof(struct bfa_mfg_vpd));
  1846. ad_attr->nports = bfa_ioc_get_nports(ioc);
  1847. ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
  1848. bfa_ioc_get_adapter_model(ioc, ad_attr->model);
  1849. /* For now, model descr uses same model string */
  1850. bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
  1851. ad_attr->card_type = ioc_attr->card_type;
  1852. ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
  1853. if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
  1854. ad_attr->prototype = 1;
  1855. else
  1856. ad_attr->prototype = 0;
  1857. ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
  1858. ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
  1859. ad_attr->pcie_gen = ioc_attr->pcie_gen;
  1860. ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
  1861. ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
  1862. ad_attr->asic_rev = ioc_attr->asic_rev;
  1863. bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
  1864. }
  1865. static enum bfa_ioc_type
  1866. bfa_ioc_get_type(struct bfa_ioc *ioc)
  1867. {
  1868. if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
  1869. return BFA_IOC_TYPE_LL;
  1870. BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC));
  1871. return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
  1872. ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
  1873. }
  1874. static void
  1875. bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
  1876. {
  1877. memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
  1878. memcpy(serial_num,
  1879. (void *)ioc->attr->brcd_serialnum,
  1880. BFA_ADAPTER_SERIAL_NUM_LEN);
  1881. }
  1882. static void
  1883. bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
  1884. {
  1885. memset(fw_ver, 0, BFA_VERSION_LEN);
  1886. memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
  1887. }
  1888. static void
  1889. bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
  1890. {
  1891. BUG_ON(!(chip_rev));
  1892. memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
  1893. chip_rev[0] = 'R';
  1894. chip_rev[1] = 'e';
  1895. chip_rev[2] = 'v';
  1896. chip_rev[3] = '-';
  1897. chip_rev[4] = ioc->attr->asic_rev;
  1898. chip_rev[5] = '\0';
  1899. }
  1900. static void
  1901. bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
  1902. {
  1903. memset(optrom_ver, 0, BFA_VERSION_LEN);
  1904. memcpy(optrom_ver, ioc->attr->optrom_version,
  1905. BFA_VERSION_LEN);
  1906. }
  1907. static void
  1908. bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
  1909. {
  1910. memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
  1911. memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
  1912. }
  1913. static void
  1914. bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
  1915. {
  1916. struct bfi_ioc_attr *ioc_attr;
  1917. BUG_ON(!(model));
  1918. memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
  1919. ioc_attr = ioc->attr;
  1920. /**
  1921. * model name
  1922. */
  1923. snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
  1924. BFA_MFG_NAME, ioc_attr->card_type);
  1925. }
  1926. static enum bfa_ioc_state
  1927. bfa_ioc_get_state(struct bfa_ioc *ioc)
  1928. {
  1929. enum bfa_iocpf_state iocpf_st;
  1930. enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
  1931. if (ioc_st == BFA_IOC_ENABLING ||
  1932. ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
  1933. iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
  1934. switch (iocpf_st) {
  1935. case BFA_IOCPF_SEMWAIT:
  1936. ioc_st = BFA_IOC_SEMWAIT;
  1937. break;
  1938. case BFA_IOCPF_HWINIT:
  1939. ioc_st = BFA_IOC_HWINIT;
  1940. break;
  1941. case BFA_IOCPF_FWMISMATCH:
  1942. ioc_st = BFA_IOC_FWMISMATCH;
  1943. break;
  1944. case BFA_IOCPF_FAIL:
  1945. ioc_st = BFA_IOC_FAIL;
  1946. break;
  1947. case BFA_IOCPF_INITFAIL:
  1948. ioc_st = BFA_IOC_INITFAIL;
  1949. break;
  1950. default:
  1951. break;
  1952. }
  1953. }
  1954. return ioc_st;
  1955. }
  1956. void
  1957. bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
  1958. {
  1959. memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
  1960. ioc_attr->state = bfa_ioc_get_state(ioc);
  1961. ioc_attr->port_id = ioc->port_id;
  1962. ioc_attr->port_mode = ioc->port_mode;
  1963. ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
  1964. ioc_attr->cap_bm = ioc->ad_cap_bm;
  1965. ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
  1966. bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
  1967. ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
  1968. ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
  1969. bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
  1970. }
  1971. /**
  1972. * WWN public
  1973. */
  1974. static u64
  1975. bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
  1976. {
  1977. return ioc->attr->pwwn;
  1978. }
  1979. mac_t
  1980. bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
  1981. {
  1982. return ioc->attr->mac;
  1983. }
  1984. /**
  1985. * Firmware failure detected. Start recovery actions.
  1986. */
  1987. static void
  1988. bfa_ioc_recover(struct bfa_ioc *ioc)
  1989. {
  1990. pr_crit("Heart Beat of IOC has failed\n");
  1991. bfa_ioc_stats(ioc, ioc_hbfails);
  1992. bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
  1993. bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
  1994. }
  1995. static void
  1996. bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
  1997. {
  1998. if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
  1999. return;
  2000. }
  2001. /**
  2002. * @dg hal_iocpf_pvt BFA IOC PF private functions
  2003. * @{
  2004. */
  2005. static void
  2006. bfa_iocpf_enable(struct bfa_ioc *ioc)
  2007. {
  2008. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
  2009. }
  2010. static void
  2011. bfa_iocpf_disable(struct bfa_ioc *ioc)
  2012. {
  2013. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
  2014. }
  2015. static void
  2016. bfa_iocpf_fail(struct bfa_ioc *ioc)
  2017. {
  2018. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
  2019. }
  2020. static void
  2021. bfa_iocpf_initfail(struct bfa_ioc *ioc)
  2022. {
  2023. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
  2024. }
  2025. static void
  2026. bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
  2027. {
  2028. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
  2029. }
  2030. static void
  2031. bfa_iocpf_stop(struct bfa_ioc *ioc)
  2032. {
  2033. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
  2034. }
  2035. void
  2036. bfa_nw_iocpf_timeout(void *ioc_arg)
  2037. {
  2038. struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
  2039. enum bfa_iocpf_state iocpf_st;
  2040. iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
  2041. if (iocpf_st == BFA_IOCPF_HWINIT)
  2042. bfa_ioc_poll_fwinit(ioc);
  2043. else
  2044. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
  2045. }
  2046. void
  2047. bfa_nw_iocpf_sem_timeout(void *ioc_arg)
  2048. {
  2049. struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
  2050. bfa_ioc_hw_sem_get(ioc);
  2051. }
  2052. static void
  2053. bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
  2054. {
  2055. u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
  2056. if (fwstate == BFI_IOC_DISABLED) {
  2057. bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
  2058. return;
  2059. }
  2060. if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
  2061. bfa_nw_iocpf_timeout(ioc);
  2062. } else {
  2063. ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
  2064. mod_timer(&ioc->iocpf_timer, jiffies +
  2065. msecs_to_jiffies(BFA_IOC_POLL_TOV));
  2066. }
  2067. }